repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
billgoo/Rutgers-CS527-Database-Systems-for-Data-Science-Information | [
"13994f4139ba22dd0ed5b84b35d3240d1e1c06f3"
]
| [
"Assignment/4 MongoDB assignment/MongoDB_assignment_Group5/transpose.py"
]
| [
"import pandas as pd\n\nfile=open('C:\\\\Users\\\\95236\\\\Desktop\\\\MongoDB_assignment_Group5\\\\GSE13355_expr_transpose.csv','w')\n\ndf = pd.read_csv('C:\\\\Users\\\\95236\\\\Desktop\\\\MongoDB_assignment_Group5\\\\GSE13355_expr.csv',header= None)\ndata = df.values\n# data = df.as_matrix()\ndata = list(map(list,zip(*data)))\ndata = pd.DataFrame(data)\ndata.to_csv(file, header=0, index=0)\n\n"
]
| [
[
"pandas.read_csv",
"pandas.DataFrame"
]
]
|
cristianopris/tf-models | [
"b7dd462d553d868dfe446b3d6d467935333647d3"
]
| [
"official/transformer/translate.py"
]
| [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Translate text or files using trained transformer model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n# pylint: disable=g-bad-import-order\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom absl import app as absl_app\nfrom absl import flags\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nfrom official.transformer.data_download import VOCAB_FILE\nfrom official.transformer.model import model_params\nfrom official.transformer.utils import tokenizer\nfrom official.utils.flags import core as flags_core\n\n_DECODE_BATCH_SIZE = 32\n_EXTRA_DECODE_LENGTH = 100\n_BEAM_SIZE = 4\n_ALPHA = 0.6\n\n\ndef _get_sorted_inputs(filename):\n \"\"\"Read and sort lines from the file sorted by decreasing length.\n\n Args:\n filename: String name of file to read inputs from.\n Returns:\n Sorted list of inputs, and dictionary mapping original index->sorted index\n of each element.\n \"\"\"\n with tf.gfile.Open(filename) as f:\n records = f.read().split(\"\\n\")\n inputs = [record.strip() for record in records]\n if not inputs[-1]:\n inputs.pop()\n\n input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]\n sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True)\n\n sorted_inputs = []\n sorted_keys = {}\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n sorted_keys[index] = i\n return sorted_inputs, sorted_keys\n\n\ndef _encode_and_add_eos(line, subtokenizer):\n \"\"\"Encode line with subtokenizer, and add EOS id to the end.\"\"\"\n return subtokenizer.encode(line) + [tokenizer.EOS_ID]\n\n\ndef _trim_and_decode(ids, subtokenizer):\n \"\"\"Trim EOS and PAD tokens from ids, and decode to return a string.\"\"\"\n try:\n index = list(ids).index(tokenizer.EOS_ID)\n return subtokenizer.decode(ids[:index])\n except ValueError: # No EOS found in sequence\n return subtokenizer.decode(ids)\n\n\ndef translate_file(\n estimator, subtokenizer, input_file, output_file=None,\n print_all_translations=True):\n \"\"\"Translate lines in file, and save to output file if specified.\n\n Args:\n estimator: tf.Estimator used to generate the translations.\n subtokenizer: Subtokenizer object for encoding and decoding source and\n translated lines.\n input_file: file containing lines to translate\n output_file: file that stores the generated translations.\n print_all_translations: If true, all translations are printed to stdout.\n\n Raises:\n ValueError: if output file is invalid.\n \"\"\"\n batch_size = _DECODE_BATCH_SIZE\n\n # Read and sort inputs by length. Keep dictionary (original index-->new index\n # in sorted list) to write translations in the original order.\n sorted_inputs, sorted_keys = _get_sorted_inputs(input_file)\n num_decode_batches = (len(sorted_inputs) - 1) // batch_size + 1\n\n def input_generator():\n \"\"\"Yield encoded strings from sorted_inputs.\"\"\"\n for i, line in enumerate(sorted_inputs):\n if i % batch_size == 0:\n batch_num = (i // batch_size) + 1\n\n tf.logging.info(\"Decoding batch %d out of %d.\" %\n (batch_num, num_decode_batches))\n yield _encode_and_add_eos(line, subtokenizer)\n\n def input_fn():\n \"\"\"Created batched dataset of encoded inputs.\"\"\"\n ds = tf.data.Dataset.from_generator(\n input_generator, tf.int64, tf.TensorShape([None]))\n ds = ds.padded_batch(batch_size, [None])\n return ds\n\n translations = []\n for i, prediction in enumerate(estimator.predict(input_fn)):\n translation = _trim_and_decode(prediction[\"outputs\"], subtokenizer)\n translations.append(translation)\n\n if print_all_translations:\n tf.logging.info(\"Translating:\\n\\tInput: %s\\n\\tOutput: %s\" %\n (sorted_inputs[i], translation))\n\n # Write translations in the order they appeared in the original file.\n if output_file is not None:\n if tf.gfile.IsDirectory(output_file):\n raise ValueError(\"File output is a directory, will not save outputs to \"\n \"file.\")\n tf.logging.info(\"Writing to file %s\" % output_file)\n with tf.gfile.Open(output_file, \"w\") as f:\n for index, key in enumerate(sorted_keys):\n f.write(\"%s\\n\" % translations[key])\n\n\ndef translate_text(estimator, subtokenizer, txt):\n \"\"\"Translate a single string.\"\"\"\n encoded_txt = _encode_and_add_eos(txt, subtokenizer)\n\n def input_fn():\n ds = tf.data.Dataset.from_tensors(encoded_txt)\n ds = ds.batch(_DECODE_BATCH_SIZE)\n return ds\n\n predictions = estimator.predict(input_fn)\n translation = next(predictions)[\"outputs\"]\n translation = _trim_and_decode(translation, subtokenizer)\n tf.logging.info(\"Translation of \\\"%s\\\": \\\"%s\\\"\" % (txt, translation))\n\n\ndef main(unused_argv):\n from official.transformer import transformer_main\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n if FLAGS.text is None and FLAGS.file is None:\n tf.logging.warn(\"Nothing to translate. Make sure to call this script using \"\n \"flags --text or --file.\")\n return\n\n subtokenizer = tokenizer.Subtokenizer(\n os.path.join(FLAGS.data_dir, FLAGS.vocab_file))\n\n # Set up estimator and params\n params = transformer_main.PARAMS_MAP[FLAGS.param_set]\n params[\"beam_size\"] = _BEAM_SIZE\n params[\"alpha\"] = _ALPHA\n params[\"extra_decode_length\"] = _EXTRA_DECODE_LENGTH\n params[\"batch_size\"] = _DECODE_BATCH_SIZE\n estimator = tf.estimator.Estimator(\n model_fn=transformer_main.model_fn, model_dir=FLAGS.model_dir,\n params=params)\n\n if FLAGS.text is not None:\n tf.logging.info(\"Translating text: %s\" % FLAGS.text)\n translate_text(estimator, subtokenizer, FLAGS.text)\n\n if FLAGS.file is not None:\n input_file = os.path.abspath(FLAGS.file)\n tf.logging.info(\"Translating file: %s\" % input_file)\n if not tf.gfile.Exists(FLAGS.file):\n raise ValueError(\"File does not exist: %s\" % input_file)\n\n output_file = None\n if FLAGS.file_out is not None:\n output_file = os.path.abspath(FLAGS.file_out)\n tf.logging.info(\"File output specified: %s\" % output_file)\n\n translate_file(estimator, subtokenizer, input_file, output_file)\n\n\ndef define_translate_flags():\n \"\"\"Define flags used for translation script.\"\"\"\n # Model and vocab file flags\n flags.DEFINE_string(\n name=\"data_dir\", short_name=\"dd\", default=\"/tmp/translate_ende\",\n help=flags_core.help_wrap(\n \"Directory for where the translate_ende_wmt32k dataset is saved.\"))\n flags.DEFINE_string(\n name=\"vocab_file\", short_name=\"vf\", default=VOCAB_FILE,\n help=flags_core.help_wrap(\n \"Name of vocabulary file containing subtokens for subtokenizing the \"\n \"input text or file. This file is expected to be in the directory \"\n \"defined by --data_dir.\"))\n flags.DEFINE_string(\n name=\"model_dir\", short_name=\"md\", default=\"/tmp/transformer_model\",\n help=flags_core.help_wrap(\n \"Directory containing Transformer model checkpoints.\"))\n flags.DEFINE_enum(\n name=\"param_set\", short_name=\"mp\", default=\"big\",\n enum_values=[\"base\", \"big\"],\n help=flags_core.help_wrap(\n \"Parameter set to use when creating and training the model. The \"\n \"parameters define the input shape (batch size and max length), \"\n \"model configuration (size of embedding, # of hidden layers, etc.), \"\n \"and various other settings. The big parameter set increases the \"\n \"default batch size, embedding/hidden size, and filter size. For a \"\n \"complete list of parameters, please see model/model_params.py.\"))\n\n flags.DEFINE_string(\n name=\"text\", default=None,\n help=flags_core.help_wrap(\n \"Text to translate. Output will be printed to console.\"))\n flags.DEFINE_string(\n name=\"file\", default=None,\n help=flags_core.help_wrap(\n \"File containing text to translate. Translation will be printed to \"\n \"console and, if --file_out is provided, saved to an output file.\"))\n flags.DEFINE_string(\n name=\"file_out\", default=None,\n help=flags_core.help_wrap(\n \"If --file flag is specified, save translation to this file.\"))\n\n\nif __name__ == \"__main__\":\n define_translate_flags()\n FLAGS = flags.FLAGS\n absl_app.run(main)\n"
]
| [
[
"tensorflow.data.Dataset.from_tensors",
"tensorflow.TensorShape",
"tensorflow.estimator.Estimator",
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"tensorflow.logging.warn",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.gfile.IsDirectory"
]
]
|
leelew/AttConvLSTM | [
"62f614efa8654125014e3e31efd309a946f23b6c"
]
| [
"plot/figure12.py"
]
| [
"import sys\nsys.path.append('../')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nplt.rc('font', family='Times New Roman')\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\nimport matplotlib.colors as mcolors\n\nfrom figure11 import gen_metric\nfrom utils import gen_meshgrid\n\n\ndef figure12():\n\n # load 3H \n target_3H, rmse_3H, r2_3H = gen_metric(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC/figure5.npz')\n\n # load 6H \n target_6H, rmse_6H, r2_6H = gen_metric(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_6HH/figure5.npz')\n\n # load 12H \n target_12H, rmse_12H, r2_12H = gen_metric(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_12HH/figure5.npz')\n\n # load spatial mean 6H\n data = np.load(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_6HH/figure6.npz')\n\n target_mean_6 = data['arr_0']\n pred_mean_6 = data['arr_1']\n\n # load spatial mean 12H\n data = np.load(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_12HH/figure6.npz')\n\n target_mean_12 = data['arr_0']\n pred_mean_12 = data['arr_1']\n\n data = np.load(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC/figure10.npz')\n\n acf_target_3 = data['arr_0']\n acf_pred_3 = data['arr_1']\n\n data = np.load(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_6HH/figure10.npz')\n\n acf_target_6 = data['arr_0']\n acf_pred_6 = data['arr_1']\n\n data = np.load(\n '/Users/lewlee/Documents/Github/SMNet/output/figures_DAC_12HH/figure10.npz')\n\n acf_target_12 = data['arr_0']\n acf_pred_12 = data['arr_1']\n\n \n # plot\n fig = plt.figure(figsize=(9, 7))\n\n # colorbar\n #colors = ('white', 'lightcyan', 'cyan', \n # 'deepskyblue', 'dodgerblue', 'lightgreen',\n # 'yellow', 'darkorange', 'fuchsia', 'hotpink')\n colors = ('white', 'lightcyan', 'cyan','darkturquoise',\n 'deepskyblue', 'dodgerblue', 'royalblue',\n 'blue')\n clrmap = mcolors.LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n # ----------------------------- Figure 12 (b) ------------------------------\n ax1 = plt.subplot2grid((4, 3), (0, 0), colspan=2, rowspan=2)\n\n # project\n m = Basemap(projection='mill', llcrnrlat=27,\n urcrnrlat=50, llcrnrlon=-122.9, urcrnrlon=-70.2,)\n \n # line\n m.drawcoastlines()\n # m.drawcountries()\n \n # meshgrid\n lon, lat = gen_meshgrid()\n x, y = m(lon, lat)\n\n # turn nan\n a = r2_3H - r2_6H\n a[a<0] = np.nan\n a[a>0.5] = 0.5\n\n # contourf\n sc = m.contourf(x, y, a,\n cmap=clrmap,\n vmin=0, vmax=0.5)\n\n d3 = acf_target_3 - acf_target_6\n mm = np.nanpercentile(d3, 75)\n\n kk = np.full((d3.shape[0], d3.shape[1]), np.nan)\n kk[d3 > mm] = 1\n\n m.contourf(x, y,\n kk,\n hatches=['..'],cmap='gray', alpha=0)\n #sc.set_edgecolor('face')\n \n # test\n x, y = m(-123, 50.5)\n plt.text(x, y, '(a) difference between $R^{2}$ of 3HH & 6HH case',\n fontweight='bold', fontsize=14)\n\n # inset colorbar\n axin1 = ax1.inset_axes([0.899, 0.024, 0.02, 0.3])\n plt.colorbar(sc, cax=axin1,drawedges=False)\n\n # ----------------------------- Figure 5 (b) -------------------------------\n ax2 = plt.subplot2grid((4, 3), (2, 0), colspan=2, rowspan=2)\n\n # project\n m = Basemap(projection='mill', llcrnrlat=27,\n urcrnrlat=50, llcrnrlon=-122.9, urcrnrlon=-70.2,)\n \n # line\n m.drawcoastlines()\n # m.drawcountries()\n \n # meshgrid\n lon, lat = gen_meshgrid()\n x, y = m(lon, lat)\n\n # \n\n # turn nan\n b = r2_3H - r2_12H\n b[b<0] = np.nan\n b[b>0.5] = 0.5\n\n # contourf\n sc = m.contourf(x, y, b,\n cmap=clrmap,\n vmin=0, vmax=0.5)\n #sc.set_edgecolor('face')\n d3 = acf_target_3 - acf_target_12\n mm = np.nanpercentile(d3, 75)\n\n kk = np.full((d3.shape[0], d3.shape[1]), np.nan)\n kk[d3 > mm] = 1\n\n m.contourf(x, y,\n kk,\n hatches=['..'],cmap='gray', alpha=0)\n # text\n x, y = m(-123, 50.5)\n plt.text(x, y, '(b) difference between $R^{2}$ of 3HH & 12HH case',\n fontweight='bold', fontsize=14)\n\n # inset colorbar\n axin2 = ax2.inset_axes([0.899, 0.024, 0.02, 0.3])\n plt.colorbar(sc, cax=axin2,drawedges=False)\n\n\n # add axes\n axx1 = fig.add_axes([0.629, 0.5324, 0.15,0.329])\n axx1.set_yticks([])\n axx1.spines['right'].set_linewidth(2)\n axx1.spines['top'].set_visible(False)\n axx1.spines['left'].set_visible(False)\n axx1.spines['bottom'].set_linewidth(2)\n axx1.set_xlabel('mean SM')\n\n axxtwin1 = axx1.twinx()\n axxtwin1.plot(target_mean_6, range(len(target_mean_6)), lw=2, c='black')\n axxtwin1.plot(pred_mean_6, range(len(target_mean_6)), lw=2, c='hotpink')\n axxtwin1.set_ylim(0, len(target_mean_6))\n axxtwin1.spines['right'].set_linewidth(2)\n axxtwin1.spines['top'].set_visible(False)\n axxtwin1.spines['left'].set_visible(False)\n axxtwin1.spines['bottom'].set_linewidth(2)\n axxtwin1.text(0.28, 1000, '$R^{2}$ = ' +\n str(round(r2_score(target_mean_6, pred_mean_6), 3)))\n axxtwin1.text(0.28, 940, 'RMSE = ' +\n str(round(np.sqrt(mean_squared_error(target_mean_6, pred_mean_6)), 3)))\n\n #axxtwin1.set_ylabel('time')\n axxtwin1.plot([0.35, 0.38], [500, 500], lw=2, c='black')\n axxtwin1.plot([0.35, 0.38], [450, 450], lw=2, c='hotpink')\n axxtwin1.text(0.39, 490, 'SMAP(12HH)', fontsize=6)\n axxtwin1.text(0.39, 440, 'AttConvLSTM(6H)', fontsize=6, c='hotpink')\n axxtwin1.set_yticks([32, 156, 280, 404, 528, 652, 776,900, 1024, 1148,])\n axxtwin1.set_yticklabels(['2018-04', '2018-05', '2018-06',\n'2018-07','2018-08','2018-09','2018-10', '2018-11','2018-12','2019-01'])\n axxtwin1.scatter(0.53, 640, s=15, marker='<', c='red')\n axxtwin1.scatter(0.53, 100, s=15, marker='<', c='red')\n\n axxtwin1.scatter(0.365, 400, s=15, marker='<', c='red')\n axxtwin1.text(0.39, 390, 'catastrophe point', fontsize=6, c='red')\n\n #axxtwin1.set_xlabel('mean soil moisture')\n\n axx2 = fig.add_axes([0.629, 0.1285, 0.15,0.329])\n axx2.set_yticks([])\n axx2.spines['right'].set_linewidth(2)\n axx2.spines['top'].set_visible(False)\n axx2.spines['left'].set_visible(False)\n axx2.spines['bottom'].set_linewidth(2)\n \n axxtwin2 = axx2.twinx()\n axxtwin2.plot(target_mean_12, range(len(target_mean_12)),lw=2, c='black')\n axxtwin2.plot(pred_mean_12, range(len(target_mean_12)), lw=2, c='hotpink')\n print(target_mean_6.shape)\n #axxtwin2.set_ylabel('time')\n #axxtwin2.set_xlabel('mean soil moisture')\n axxtwin2.set_ylim(0, len(target_mean_12))\n axxtwin2.spines['right'].set_linewidth(2)\n axxtwin2.spines['top'].set_visible(False)\n axxtwin2.spines['left'].set_visible(False)\n axxtwin2.spines['bottom'].set_linewidth(2)\n axxtwin2.text(0.3, 500, '$R^{2}$ = ' +\n str(round(r2_score(target_mean_12, pred_mean_12), 3)))\n axxtwin2.text(0.3, 470, 'RMSE = ' +\n str(round(np.sqrt(mean_squared_error(target_mean_12, pred_mean_12)), 3)))\n axxtwin2.plot([0.36, 0.39], [250, 250], lw=2, c='black')\n axxtwin2.plot([0.36, 0.39], [225, 225], lw=2, c='hotpink')\n axxtwin2.text(0.40, 245, 'SMAP(12HH)',fontsize=6)\n axxtwin2.text(0.40, 220, 'AttConvLSTM(12HH)', fontsize=6, c='hotpink')\n axxtwin2.set_yticks([16, 78, 140, 202, 264,326, 388,450, 512, 574,])\n axxtwin2.set_yticklabels(['2018-04', '2018-05', '2018-06',\n'2018-07', '2018-08', '2018-09', '2018-10', '2018-11', '2018-12', '2019-01'])\n axxtwin2.scatter(0.53, 320, s=15, marker='<', c='red')\n axxtwin2.scatter(0.53, 50, s=15, marker='<', c='red')\n\n axxtwin2.scatter(0.375, 200, s=15, marker='<', c='red')\n axxtwin2.text(0.4, 195, 'catastrophe point', fontsize=6, c='red')\n\n plt.subplots_adjust(hspace=0.2)\n \n plt.savefig('/Users/lewlee/Desktop/figure12.pdf')\n\nif __name__ == \"__main__\":\n figure12()\n\n\n "
]
| [
[
"numpy.nanpercentile",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"numpy.full",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots_adjust",
"numpy.load",
"matplotlib.pyplot.text",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
]
|
nukui-s/mlens | [
"4133c6f6654dd785399bb068c6a6ac727a88bb92"
]
| [
"mlens/ensemble/tests/test_a_sklearn.py"
]
| [
"\"\"\"\nTest Scikit-learn\n\"\"\"\nimport numpy as np\nfrom mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble\nfrom mlens.testing.dummy import return_pickled\ntry:\n from sklearn.utils.estimator_checks import check_estimator\n from sklearn.linear_model import Lasso, LinearRegression\n from sklearn.neighbors import KNeighborsRegressor\n from sklearn.svm import SVR\n from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n\n from sklearn.decomposition import PCA\n from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n\n from sklearn.datasets import load_boston\n\n has_sklearn = True\n\nexcept ImportError:\n has_sklearn = False\n\n\nif has_sklearn:\n\n X, y = load_boston(True)\n\n estimators = [Lasso(),\n GradientBoostingRegressor(),\n LinearRegression(),\n KNeighborsRegressor(),\n SVR(),\n RandomForestRegressor(),\n ]\n\n est_prep = {'prep1': estimators,\n 'prep2': estimators,\n 'prep3': estimators}\n\n prep_1 = [PCA()]\n prep_2 = [PolynomialFeatures(), StandardScaler()]\n\n prep = {'prep1': prep_1,\n 'prep2': prep_2,\n 'prep3': []}\n\n def get_ensemble(cls, backend, preprocessing):\n \"\"\"Get ensemble.\"\"\"\n if preprocessing:\n est = est_prep\n else:\n est = estimators\n ens = cls(backend=backend)\n ens.add(est, preprocessing)\n ens.add(LinearRegression(), meta=True)\n return ens\n\n def test_super_learner_s_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n def test_super_learner_f_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_s_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_f_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n\n def test_subsemble_f_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(Subsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_f_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(Subsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n"
]
| [
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.linear_model.Lasso",
"sklearn.svm.SVR",
"numpy.testing.assert_array_equal",
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.load_boston",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA"
]
]
|
guodashun/soft-catching | [
"ffe876ab57eed053427d9719aaf7dbe8f6ad9aa6"
]
| [
"utils.py"
]
| [
"import numpy as np\nfrom scipy.spatial.transform import Rotation as R\n\ndef quat2rotvec(quat):\n r = R.from_quat(quat)\n return r.as_rotvec()\n\ndef quat2matrix(quat):\n r = R.from_quat(quat)\n return r.as_matrix()\n\ndef rotvec2quat(vec):\n r = R.from_rotvec(vec)\n return r.as_quat()\n\n# def quat2euler(quat):\n# r = R.from_quat(quat)\n# return r.as_euler('zyx')\n\n# def pose_absolute_control(tar, cur_pos, cur_vel, kp, kd):\n# tar = np.array(tar)\n# cur_pos = np.array(cur_pos)\n# cur_vel = np.array(cur_vel)\n# action = np.zeros((6,1))\n \n# for i in range(len(tar)):\n# action[i] = (tar[i] - cur_pos[i]) * kp - cur_vel[i] * kd\n\n# return action\n\ndef impedence_control(force, x_p, v, M, K, D, freq):\n acc = np.linalg.inv(M) @ ( K @ x_p - force - D @ v)\n next_v = v + acc / freq\n x = (v + next_v) / 2 / freq\n return x\n\ndef vel_follow():\n return \n"
]
| [
[
"numpy.linalg.inv",
"scipy.spatial.transform.Rotation.from_rotvec",
"scipy.spatial.transform.Rotation.from_quat"
]
]
|
zachlewis/aces-dev | [
"596dfaf71f858a5b1210f7823c83552cee02842f"
]
| [
"transforms/python/ssts_sliderPlot.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button\nfrom ssts import ssts, TsPoint\nfrom math import log10, log\n\ndef aces2stops( aces):\n return np.log2(aces)-np.log2(0.18)\n\nhgt = 0.04\nwdt = 0.23\nhspc = 0.1\nledge = 0.05\n\nfig, ax = plt.subplots( figsize=(12,7) )\nplt.subplots_adjust(left=ledge, top=0.97, bottom=0.4, right=0.97)\n\naces = 0.18*pow(2.,np.arange(-20,21,0.5))\nY = ssts(aces)\n\nx = aces2stops(aces)\ny = np.log10(Y)\nl, = plt.plot( x, y, lw=2, color='red')\n\nminxRange = [-20.,-15.,-6.5]\nminyRange = [0.0001,0.0001,0.02]\nminsRange = [0.,0.,1.]\nmidxRange = [-5.,-0.,5.]\nmidyRange = [0.,4.8,48.]\nmidsRange = [1.,1.5,2.]\nmaxxRange = [6.5,15.,20.]\nmaxyRange = [48.,10000.,10000.]\nmaxsRange = [0.,0.,1.]\npctLRange = [0.1, 0.25, 0.5]\npctHRange = [0.5, 0.75, 0.9]\n\nminx = 0.18*pow(2.,minxRange[1])\nminy = minyRange[1]\nmidx = 0.18*pow(2.,midxRange[1])\nmidy = midyRange[1]\nmaxx = 0.18*pow(2.,maxxRange[1])\nmaxy = maxyRange[1]\nhmin, = plt.plot( aces2stops(minx), log10(miny), color='red', marker='o')\nhmid, = plt.plot( aces2stops(midx), log10(midy), color='red', marker='o')\nhmax, = plt.plot( aces2stops(maxx), log10(maxy), color='red', marker='o')\n\nhpctL, = plt.plot( midxRange[1]-((midxRange[1]-minxRange[1])/2.), log10(miny)+((log10(midy)-log10(miny))*pctLRange[1]), color='red', marker='x')\nhpctH, = plt.plot( midxRange[1]+((maxxRange[1]-midxRange[1])/2.), log10(midy)+((log10(maxy)-log10(midy))*pctHRange[1]), color='red', marker='x')\n\nplt.axis([-20, 20, -4.5, 4.5])\nplt.grid(b=True,which='major',axis='both')\nplt.xlabel(\"scene exposure - stops relative to 18% mid-gray\")\nplt.ylabel(\"log$_{10}$ luminance ($cd/m^2$)\")\n\n\naxcolor = '#d1d1fa'\n\nax_minX = plt.axes([ledge, 0.175, wdt, hgt], facecolor='white')\ns_minX = Slider(ax_minX, 'X', minxRange[0], minxRange[2], valinit=minxRange[1], valfmt=\"%1.1f\")\nax_minY = plt.axes([ledge, 0.125, wdt, hgt], facecolor='white')\ns_minY = Slider(ax_minY, 'Y', minyRange[0], minyRange[2], valinit=minyRange[1], valfmt=\"%1.4f\")\nax_minS = plt.axes([ledge, 0.075, wdt, hgt], facecolor='white')\ns_minS = Slider(ax_minS, 'slope', minsRange[0], minsRange[2], valinit=minsRange[1], valfmt=\"%1.1f\")\n\nax_midX = plt.axes([ledge+wdt+hspc, 0.175, wdt, hgt], facecolor='white')\ns_midX = Slider(ax_midX, 'X', midxRange[0], midxRange[2], valinit=midxRange[1], valfmt=\"%1.1f\")\nax_midY = plt.axes([ledge+wdt+hspc, 0.125, wdt, hgt], facecolor='white')\ns_midY = Slider(ax_midY, 'Y', midyRange[0], midyRange[2], valinit=midyRange[1], valfmt=\"%1.1f\")\nax_midS = plt.axes([ledge+wdt+hspc, 0.075, wdt, hgt], facecolor='white')\ns_midS = Slider(ax_midS, 'slope', midsRange[0], midsRange[2], valinit=midsRange[1], valfmt=\"%1.2f\")\n\nax_maxX = plt.axes([ledge+2*wdt+2*hspc, 0.175, wdt, hgt], facecolor='white')\ns_maxX = Slider(ax_maxX, 'X', maxxRange[0], maxxRange[2], valinit=maxxRange[1], valfmt=\"%1.1f\")\nax_maxY = plt.axes([ledge+2*wdt+2*hspc, 0.125, wdt, hgt], facecolor='white')\ns_maxY = Slider(ax_maxY, 'Y', maxyRange[0], maxyRange[2], valinit=maxyRange[1], valfmt=\"%i\")\nax_maxS = plt.axes([ledge+2*wdt+2*hspc, 0.075, wdt, hgt], facecolor='white')\ns_maxS = Slider(ax_maxS, 'slope', maxsRange[0], maxsRange[2], valinit=maxsRange[1], valfmt=\"%1.1f\")\n\nax_pctL = plt.axes([ledge+0.5*wdt+0.6*hspc, 0.25, wdt, hgt], facecolor='white')\ns_pctL = Slider(ax_pctL, '% low', pctLRange[0], pctLRange[2], valinit=pctLRange[1], valfmt=\"%1.2f\")\nax_pctH = plt.axes([ledge+1.5*wdt+1.6*hspc, 0.25, wdt, hgt], facecolor='white')\ns_pctH = Slider(ax_pctH, '% high', pctHRange[0], pctHRange[2], valinit=pctHRange[1], valfmt=\"%1.2f\")\n\n\ndef update(val):\n minx = 0.18*pow(2.,s_minX.val)\n miny = s_minY.val\n minsl = s_minS.val\n midx = 0.18*pow(2.,s_midX.val)\n midy = s_midY.val\n midsl = s_midS.val\n maxx = 0.18*pow(2.,s_maxX.val)\n maxy = s_maxY.val\n maxsl = s_maxS.val\n pctLow = s_pctL.val\n pctHigh = s_pctH.val\n\n Min = TsPoint( minx, miny, minsl)\n Mid = TsPoint( midx, midy, midsl)\n Max = TsPoint( maxx, maxy, maxsl)\n\n l.set_ydata( np.log10( ssts(aces,Min,Mid,Max,pctLow,pctHigh)) )\n\n hmin.set_xdata( aces2stops(minx))\n hmin.set_ydata( log10(miny))\n hmid.set_xdata( aces2stops(midx))\n hmid.set_ydata( log10(midy))\n hmax.set_xdata( aces2stops(maxx))\n hmax.set_ydata( log10(maxy))\n hpctL.set_xdata( s_midX.val-((s_midX.val-s_minX.val)/2.) )\n hpctL.set_ydata( log10(miny)+((log10(midy)-log10(miny))*pctLow) )\n hpctH.set_xdata( s_midX.val+((s_maxX.val-s_midX.val)/2.) )\n hpctH.set_ydata( log10(midy)+((log10(maxy)-log10(midy))*pctHigh) )\n \n fig.canvas.draw_idle()\ns_minX.on_changed(update)\ns_minY.on_changed(update)\ns_minS.on_changed(update)\ns_midX.on_changed(update)\ns_midY.on_changed(update)\ns_midS.on_changed(update)\ns_maxX.on_changed(update)\ns_maxY.on_changed(update)\ns_maxS.on_changed(update)\ns_pctL.on_changed(update)\ns_pctH.on_changed(update)\n\n# reset button\nresetax = plt.axes([0.8, 0.025, 0.1, 0.03])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\ndef reset(event):\n s_minX.reset()\n s_minY.reset()\n s_minS.reset()\n s_midX.reset()\n s_midY.reset()\n s_midS.reset()\n s_maxX.reset()\n s_maxY.reset()\n s_maxS.reset()\n s_pctL.reset()\n s_pctH.reset()\nbutton.on_clicked(reset)\n\n\n\nplt.show()"
]
| [
[
"numpy.log2",
"matplotlib.widgets.Button",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"numpy.log10",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
ilyakava/sumproduct | [
"4a58e12d8ed1dc9b9706649c1d751b182e5199c9"
]
| [
"sumproduct.py"
]
| [
"import numpy as np\n\n\nclass Node:\n def __init__(self, name):\n self.connections = []\n self.inbox = {} # messages recieved\n self.name = name\n\n def append(self, to_node):\n \"\"\"\n Mutates the to AND from node!\n \"\"\"\n self.connections.append(to_node)\n to_node.connections.append(self)\n\n def deliver(self, step_num, mu):\n \"\"\"\n Ensures that inbox is keyed by a step number\n \"\"\"\n if self.inbox.get(step_num):\n self.inbox[step_num].append(mu)\n else:\n self.inbox[step_num] = [mu]\n\n\nclass Factor(Node):\n \"\"\"\n NOTE: For the Factor nodes in the graph, it will be assumed\n that the connections are created in the same exact order\n as the potentials' dimensions are given\n \"\"\"\n\n def __init__(self, name, potentials):\n self.p = potentials\n Node.__init__(self, name)\n\n def make_message(self, recipient):\n \"\"\"\n Does NOT mutate the Factor node!\n\n NOTE that using the log rule before 5.1.42 in BRML by David\n Barber, that the product is actually computed via a sum of logs.\n\n Steps:\n 1. reformat mus to all be the same dimension as the factor's\n potential and take logs, mus -> lambdas\n 2. find a max_lambda (element wise maximum)\n 3. sum lambdas, and subtract the max_lambda once\n 4. exponentiate the previous result, multiply by exp of max_lambda\n and run summation to sum over all the states not in the recipient\n node\n 5. log previous, add back max_lambda, and exponentiate because we\n will pass around mus rather than lambdas everywhere\n\n Note that max_lambda in 5.1.42 is NOT a element-wise maximum (and\n therefore a matrix), it is a scalar.\n \"\"\"\n if not len(self.connections) == 1:\n unfiltered_mus = self.inbox[max(self.inbox.keys())]\n mus = [mu for mu in unfiltered_mus\n if not mu.from_node == recipient]\n all_mus = [self.reformat_mu(mu) for mu in mus]\n lambdas = np.array([np.log(mu) for mu in all_mus])\n max_lambdas = np.nan_to_num(lambdas.flatten())\n max_lambda = max(max_lambdas)\n result = sum(lambdas) - max_lambda\n product_output = np.multiply(self.p, np.exp(result))\n return np.exp(\n np.log(self.summation(product_output, recipient)) + max_lambda)\n else:\n return self.summation(self.p, recipient)\n\n def reformat_mu(self, mu):\n \"\"\"\n Returns the given mu's val reformatted to be the same\n dimensions as self.p, ensuring that mu's values are\n expanded in the correct axes.\n\n The identity of mu's from_node is used to decide which axis\n the mu's val should be expaned in to fit self.p\n\n Example:\n\n # self.p (dim order: x3, x4, then x2)\n np.array([\n [\n [0.3,0.5,0.2],\n [0.1,0.1,0.8]\n ],\n [\n [0.9,0.05,0.05],\n [0.2,0.7,0.1]\n ]\n ])\n\n # mu\n x3 = np.array([0.2, 0.8])\n which_dim = 0 # the dimension which x3 changes in self.p\n dims = [2, 2, 3]\n\n # desired output\n np.array([\n [\n [0.2, 0.2, 0.2],\n [0.2, 0.2, 0.2]\n ],\n [\n [0.8, 0.8, 0.8],\n [0.8, 0.8, 0.8]\n ]\n ])\n \"\"\"\n dims = self.p.shape\n states = mu.val\n which_dim = self.connections.index(mu.from_node) # raises err\n assert dims[which_dim] is len(states)\n\n acc = np.ones(dims)\n for coord in np.ndindex(dims):\n i = coord[which_dim]\n acc[coord] *= states[i]\n return acc\n\n def summation(self, p, node):\n \"\"\"\n Does NOT mutate the factor node.\n\n Sum over all states not in the node.\n Similar to reformat_mu in strategy.\n \"\"\"\n dims = p.shape\n which_dim = self.connections.index(node)\n out = np.zeros(node.size)\n assert dims[which_dim] is node.size\n for coord in np.ndindex(dims):\n i = coord[which_dim]\n out[i] += p[coord]\n return out\n\n\nclass Variable(Node):\n def __init__(self, name, size):\n self.bfmarginal = None\n self.size = size\n Node.__init__(self, name)\n\n def marginal(self):\n \"\"\"\n Life saving normalizations:\n\n sum_logs - max(sum_logs) <- before exponentiating\n and rem_inf\n \"\"\"\n if len(self.inbox):\n mus = self.inbox[max(self.inbox.keys())]\n log_vals = [np.log(mu.val) for mu in mus]\n valid_log_vals = [np.nan_to_num(lv) for lv in log_vals]\n sum_logs = sum(valid_log_vals)\n valid_sum_logs = sum_logs - max(sum_logs) # IMPORANT!\n prod = np.exp(valid_sum_logs)\n return prod / sum(prod) # normalize\n else:\n # first time called: uniform\n return np.ones(self.size) / self.size\n\n def latex_marginal(self):\n \"\"\"\n same as marginal() but returns a nicely formatted latex string\n \"\"\"\n data = self.marginal()\n data_str = ' & '.join([str(d) for d in data])\n tabular = '|' + ' | '.join(['l' for i in range(self.size)]) + '|'\n return (\"$$p(\\mathrm{\" + self.name + \"}) = \\\\begin{tabular}{\" + tabular\n + '} \\hline' + data_str + '\\\\\\\\ \\hline \\end{tabular}$$')\n\n def make_message(self, recipient):\n \"\"\"\n Follows log rule in 5.1.38 in BRML by David Barber\n b/c of numerical issues\n \"\"\"\n if not len(self.connections) == 1:\n unfiltered_mus = self.inbox[max(self.inbox.keys())]\n mus = [mu for mu in unfiltered_mus\n if not mu.from_node == recipient]\n log_vals = [np.log(mu.val) for mu in mus]\n return np.exp(sum(log_vals))\n else:\n return np.ones(self.size)\n\n\nclass Mu:\n \"\"\"\n An object to represent a message being passed\n a to_node attribute isn't needed since that will be clear from\n whose inbox the Mu is sitting in\n \"\"\"\n\n def __init__(self, from_node, val):\n self.from_node = from_node\n # this normalization is necessary\n self.val = val.flatten() / sum(val.flatten())\n\n\nclass FactorGraph:\n def __init__(self, first_node=None, silent=False, debug=False):\n self.nodes = {}\n self.silent = silent\n self.debug = debug\n if first_node:\n self.nodes[first_node.name] = first_node\n\n def add(self, node):\n assert node not in self.nodes\n self.nodes[node.name] = node\n\n def connect(self, name1, name2):\n # no need to assert since dict lookup will raise err\n self.nodes[name1].append(self.nodes[name2])\n\n def append(self, from_node_name, to_node):\n assert from_node_name in self.nodes\n tnn = to_node.name\n # add the to_node to the graph if it is not already there\n if not (self.nodes.get(tnn, 0)):\n self.nodes[tnn] = to_node\n self.nodes[from_node_name].append(self.nodes[tnn])\n return self\n\n def leaf_nodes(self):\n return [node for node in self.nodes.values()\n if len(node.connections) == 1]\n\n def observe(self, name, state):\n \"\"\"\n Mutates the factors connected to Variable with name!\n\n @param state: Ordinal state starting at ONE (1)\n\n As described in Barber 5.1.3. But instead of multiplying\n factors with an indicator/delta_function to account for\n an observation, the factor node loses the dimensions for\n unobserved states, and then the connection to the observed\n variable node is severed (although it remains in the graph\n to give a uniform marginal when asked).\n \"\"\"\n node = self.nodes[name]\n assert isinstance(node, Variable)\n assert node.size >= state\n assert state, \"state is obsered on an ordinal scale starting at ONE(1)\"\n for factor in [c for c in node.connections if isinstance(c, Factor)]:\n delete_axis = factor.connections.index(node)\n delete_dims = list(range(node.size)) # Fixed because pop is not a method of the range object\n delete_dims.pop(state - 1)\n sliced = np.delete(factor.p, delete_dims, delete_axis)\n factor.p = np.squeeze(sliced)\n factor.connections.remove(node)\n assert len(factor.p.shape) is len(factor.connections)\n node.connections = [] # so that they don't pass messages\n\n def export_marginals(self):\n return dict([\n (n.name, n.marginal()) for n in self.nodes.values()\n if isinstance(n, Variable)\n ])\n\n @staticmethod\n def compare_marginals(m1, m2):\n \"\"\"\n For testing the difference between marginals across a graph at\n two different iteration states, in order to declare convergence.\n \"\"\"\n assert not len(np.setdiff1d(m1.keys(), m2.keys()))\n return sum([sum(np.absolute(m1[k] - m2[k])) for k in m1.keys()])\n\n def compute_marginals(self, max_iter=500, tolerance=1e-6, error_fun=None):\n \"\"\"\n sum-product algorithm\n\n @param error_fun: a custom error function that takes two arguments\n each of the form of export_marginals' return value\n\n @return epsilons[1:]: if you are using the default error function\n compate_marginals, the first two epsilons are meaningless (first\n entry is arbitrarily 1, and first marginal is arbitrarily uniform\n so the second computed epsilon will also be arbitrary). HOWEVER,\n those using a custom error function might only using the most\n recently computed marginal, and would be interested in epsilons[1].\n\n Mutates nodes by adding in the messages passed into their\n 'inbox' instance variables. It does not change the potentials\n on the Factor nodes.\n\n Using the \"Asynchronous Parallel Schedule\" from Sudderth lec04\n slide 11 after an initialization step of Variable nodes sending\n all 1's messages:\n - At each iteration, all nodes compute all outputs from all\n current inputs. Factors-Variables and then Variables-Factors\n - Iterate until convergence.\n\n This update schedule is best suited for loopy graphs. It ends\n up working best as a max sum-product algorithm as high\n probabilities dominate heavily when the tolerance is very small\n \"\"\"\n # for keeping track of state\n epsilons = [1]\n step = 0\n # for inbox clearance\n for node in self.nodes.values():\n node.inbox.clear()\n # for testing convergence\n cur_marginals = self.export_marginals()\n # initialization\n for node in self.nodes.values():\n if isinstance(node, Variable):\n message = Mu(node, np.ones(node.size))\n for recipient in node.connections:\n recipient.deliver(step, message)\n\n # propagation (w/ termination conditions)\n while (step < max_iter) and tolerance < epsilons[-1]:\n last_marginals = cur_marginals\n step += 1\n if not self.silent:\n epsilon = 'epsilon: ' + str(epsilons[-1])\n print(epsilon + ' | ' + str(step) + '-' * 20)\n factors = [n for n in self.nodes.values() if isinstance(n, Factor)]\n variables = [n for n in self.nodes.values()\n if isinstance(n, Variable)]\n senders = factors + variables\n for sender in senders:\n next_recipients = sender.connections\n for recipient in next_recipients:\n if self.debug:\n print(sender.name + ' -> ' + recipient.name)\n val = sender.make_message(recipient)\n message = Mu(sender, val)\n recipient.deliver(step, message)\n cur_marginals = self.export_marginals()\n if error_fun:\n epsilons.append(error_fun(cur_marginals, last_marginals))\n else:\n epsilons.append(\n self.compare_marginals(cur_marginals, last_marginals))\n if not self.silent:\n print('X' * 50)\n print('final epsilon after ' + str(step) + ' iterations = ' + str(\n epsilons[-1]))\n return epsilons[1:] # skip only the first, see docstring above\n\n def brute_force(self):\n \"\"\"\n Main strategy of this code was gleaned from:\n http://cs.brown.edu/courses/cs242/assignments/hw1code.zip\n\n # first compute the full joint table\n - create a joint accumulator for N variables that is N dimensional\n - iterate through factors\n - for each factor expand probabilities into dimensions of the joint\n table\n - create a factor accumulator that is N dimensional\n - for each coord in the joint table, look at the states of the\n vars that are in the factor's potentials, and add in the log of\n that probability\n - exponentiate and normalize\n # then compute the marginals\n - iterate through variables\n - for each variable sum over all other variables\n \"\"\"\n variables = [v for v in self.nodes.values() if isinstance(v, Variable)]\n\n var_dims = [v.size for v in variables]\n N = len(var_dims)\n assert N < 32, \"max number of vars for brute force is 32 (numpy's matrix dim limit)\"\n log_joint_acc = np.zeros(var_dims)\n for factor in [f for f in self.nodes.values()\n if isinstance(f, Factor)]:\n # dimensions that will matter for this factor\n which_dims = [variables.index(v) for v in factor.connections]\n factor_acc = np.ones(var_dims)\n for joint_coord in np.ndindex(tuple(var_dims)):\n factor_coord = tuple([joint_coord[i] for i in which_dims])\n factor_acc[joint_coord] *= factor.p[factor_coord]\n log_joint_acc += np.log(factor_acc)\n log_joint_acc -= np.max(log_joint_acc) # to avoid numerical issues\n joint_acc = np.exp(log_joint_acc) / np.sum(np.exp(log_joint_acc))\n # compute marginals\n for i, variable in enumerate(variables):\n sum_dims = [j for j in range(N) if not j == i]\n sum_dims.sort(reverse=True)\n collapsing_marginal = joint_acc\n for j in sum_dims:\n collapsing_marginal = collapsing_marginal.sum(j) # lose 1 dim\n variable.bfmarginal = collapsing_marginal\n return variables\n"
]
| [
[
"numpy.log",
"numpy.absolute",
"numpy.squeeze",
"numpy.nan_to_num",
"numpy.ones",
"numpy.max",
"numpy.delete",
"numpy.ndindex",
"numpy.exp",
"numpy.zeros"
]
]
|
imsb-uke/ecarenet | [
"ffbf24e740d9154312d02b9f775816ed4b3d4691"
]
| [
"unittests/unittest_helpers.py"
]
| [
"import tensorflow as tf\nimport os\n\n\ndef get_data_directory(examples='color'):\n if 'color' in examples:\n e = 'example_colors'\n elif 'circle' in examples:\n e = 'example_circles'\n directory = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', 'unittest_data', e))\n return directory\n\n\ndef get_model_directory(run_id='parent'):\n if run_id == 'parent':\n return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'unittest_data', 'test_models'))\n else:\n return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'unittest_data', 'test_models', run_id))\n\n\ndef create_image_dataset(number_of_examples, resize=False):\n directory = get_data_directory()\n image = tf.io.read_file(os.path.join(directory, \"img_red_0.png\"))\n image = tf.image.decode_image(image, 3)\n if resize:\n image = tf.image.resize(image, (128, 128))\n image = tf.cast(image, dtype=tf.dtypes.float32)\n\n dataset = tf.data.Dataset.from_tensor_slices(([image for i in range(number_of_examples)]))\n return dataset\n\n\ndef create_image_label_dataset(number_of_examples, label, resize=False):\n directory = get_data_directory()\n image = tf.io.read_file(os.path.join(directory, \"img_red_0.png\"))\n image = tf.image.decode_image(image, 3)\n if resize:\n image = tf.image.resize(image, (128, 128))\n image = tf.cast(image, dtype=tf.dtypes.float32)\n if not isinstance(label, list):\n label = [label for i in range(number_of_examples)]\n dataset = tf.data.Dataset.from_tensor_slices(([image for i in range(number_of_examples)], [label[i] for i in range(number_of_examples)]))\n return dataset"
]
| [
[
"tensorflow.cast",
"tensorflow.image.decode_image",
"tensorflow.image.resize"
]
]
|
kamilc/fiftyone | [
"031de565cfd1a6ce234920515138362884800cf6"
]
| [
"fiftyone/utils/cvat.py"
]
| [
"\"\"\"\nUtilities for working with datasets in\n`CVAT format <https://github.com/opencv/cvat>`_.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom collections import defaultdict\nfrom copy import copy, deepcopy\nfrom datetime import datetime\nimport itertools\nimport logging\nimport os\nimport warnings\nimport webbrowser\n\nfrom bson import ObjectId\nimport jinja2\nimport numpy as np\nimport requests\nimport urllib3\n\nimport eta.core.data as etad\nimport eta.core.image as etai\nimport eta.core.utils as etau\n\nimport fiftyone.constants as foc\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.media as fom\nimport fiftyone.core.metadata as fomt\nimport fiftyone.core.utils as fou\nimport fiftyone.utils.annotations as foua\nimport fiftyone.utils.data as foud\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CVATImageDatasetImporter(\n foud.LabeledImageDatasetImporter, foud.ImportPathsMixin\n):\n \"\"\"Importer for CVAT image datasets stored on disk.\n\n See :ref:`this page <CVATImageDataset-import>` for format details.\n\n Args:\n dataset_dir (None): the dataset directory. If omitted, ``data_path``\n and/or ``labels_path`` must be provided\n data_path (None): an optional parameter that enables explicit control\n over the location of the media. Can be any of the following:\n\n - a folder name like ``\"data\"`` or ``\"data/\"`` specifying a\n subfolder of ``dataset_dir`` where the media files reside\n - an absolute directory path where the media files reside. In\n this case, the ``dataset_dir`` has no effect on the location of\n the data\n - a filename like ``\"data.json\"`` specifying the filename of the\n JSON data manifest file in ``dataset_dir``\n - an absolute filepath specifying the location of the JSON data\n manifest. In this case, ``dataset_dir`` has no effect on the\n location of the data\n\n If None, this parameter will default to whichever of ``data/`` or\n ``data.json`` exists in the dataset directory\n labels_path (None): an optional parameter that enables explicit control\n over the location of the labels. Can be any of the following:\n\n - a filename like ``\"labels.xml\"`` specifying the location of the\n labels in ``dataset_dir``\n - an absolute filepath to the labels. In this case,\n ``dataset_dir`` has no effect on the location of the labels\n\n If None, the parameter will default to ``labels.xml``\n include_all_data (False): whether to generate samples for all images in\n the data directory (True) rather than only creating samples for\n images with label entries (False)\n shuffle (False): whether to randomly shuffle the order in which the\n samples are imported\n seed (None): a random seed to use when shuffling\n max_samples (None): a maximum number of samples to import. By default,\n all samples are imported\n \"\"\"\n\n def __init__(\n self,\n dataset_dir=None,\n data_path=None,\n labels_path=None,\n include_all_data=False,\n shuffle=False,\n seed=None,\n max_samples=None,\n ):\n if dataset_dir is None and data_path is None and labels_path is None:\n raise ValueError(\n \"At least one of `dataset_dir`, `data_path`, and \"\n \"`labels_path` must be provided\"\n )\n\n data_path = self._parse_data_path(\n dataset_dir=dataset_dir, data_path=data_path, default=\"data/\",\n )\n\n labels_path = self._parse_labels_path(\n dataset_dir=dataset_dir,\n labels_path=labels_path,\n default=\"labels.xml\",\n )\n\n super().__init__(\n dataset_dir=dataset_dir,\n shuffle=shuffle,\n seed=seed,\n max_samples=max_samples,\n )\n\n self.data_path = data_path\n self.labels_path = labels_path\n self.include_all_data = include_all_data\n\n self._info = None\n self._image_paths_map = None\n self._cvat_images_map = None\n self._filenames = None\n self._iter_filenames = None\n self._num_samples = None\n\n def __iter__(self):\n self._iter_filenames = iter(self._filenames)\n return self\n\n def __len__(self):\n return self._num_samples\n\n def __next__(self):\n filename = next(self._iter_filenames)\n\n if os.path.isabs(filename):\n image_path = filename\n else:\n image_path = self._image_paths_map[filename]\n\n cvat_image = self._cvat_images_map.get(filename, None)\n if cvat_image is not None:\n # Labeled image\n image_metadata = cvat_image.get_image_metadata()\n labels = cvat_image.to_labels()\n else:\n # Unlabeled image\n image_metadata = fomt.ImageMetadata.build_for(image_path)\n labels = None\n\n return image_path, image_metadata, labels\n\n @property\n def has_dataset_info(self):\n return True\n\n @property\n def has_image_metadata(self):\n return True\n\n @property\n def label_cls(self):\n return {\n \"detections\": fol.Detections,\n \"polylines\": fol.Polylines,\n \"keypoints\": fol.Keypoints,\n }\n\n def setup(self):\n self._image_paths_map = self._load_data_map(\n self.data_path, recursive=True\n )\n\n if self.labels_path is not None and os.path.isfile(self.labels_path):\n info, _, cvat_images = load_cvat_image_annotations(\n self.labels_path\n )\n else:\n info = {}\n cvat_images = []\n\n self._info = info\n\n # Use subset/name as the key if it exists, else just name\n cvat_images_map = {}\n for i in cvat_images:\n if i.subset:\n key = os.path.join(i.subset, i.name)\n else:\n key = i.name\n\n cvat_images_map[key] = i\n\n self._cvat_images_map = cvat_images_map\n\n filenames = set(self._cvat_images_map.keys())\n\n if self.include_all_data:\n filenames.update(self._image_paths_map.keys())\n\n self._filenames = self._preprocess_list(sorted(filenames))\n self._num_samples = len(self._filenames)\n\n def get_dataset_info(self):\n return self._info\n\n\nclass CVATVideoDatasetImporter(\n foud.LabeledVideoDatasetImporter, foud.ImportPathsMixin\n):\n \"\"\"Importer for CVAT video datasets stored on disk.\n\n See :ref:`this page <CVATVideoDataset-import>` for format details.\n\n Args:\n dataset_dir (None): the dataset directory. If omitted, ``data_path``\n and/or ``labels_path`` must be provided\n data_path (None): an optional parameter that enables explicit control\n over the location of the media. Can be any of the following:\n\n - a folder name like ``\"data\"`` or ``\"data/\"`` specifying a\n subfolder of ``dataset_dir`` where the media files reside\n - an absolute directory path where the media files reside. In\n this case, the ``dataset_dir`` has no effect on the location of\n the data\n - a filename like ``\"data.json\"`` specifying the filename of the\n JSON data manifest file in ``dataset_dir``\n - an absolute filepath specifying the location of the JSON data\n manifest. In this case, ``dataset_dir`` has no effect on the\n location of the data\n\n If None, this parameter will default to whichever of ``data/`` or\n ``data.json`` exists in the dataset directory\n labels_path (None): an optional parameter that enables explicit control\n over the location of the labels. Can be any of the following:\n\n - a folder name like ``\"labels\"`` or ``\"labels/\"`` specifying the\n location of the labels in ``dataset_dir``\n - an absolute folder path to the labels. In this case,\n ``dataset_dir`` has no effect on the location of the labels\n\n If None, the parameter will default to ``labels/``\n include_all_data (False): whether to generate samples for all videos in\n the data directory (True) rather than only creating samples for\n videos with label entries (False)\n shuffle (False): whether to randomly shuffle the order in which the\n samples are imported\n seed (None): a random seed to use when shuffling\n max_samples (None): a maximum number of samples to import. By default,\n all samples are imported\n \"\"\"\n\n def __init__(\n self,\n dataset_dir=None,\n data_path=None,\n labels_path=None,\n include_all_data=False,\n shuffle=False,\n seed=None,\n max_samples=None,\n ):\n if dataset_dir is None and data_path is None and labels_path is None:\n raise ValueError(\n \"At least one of `dataset_dir`, `data_path`, and \"\n \"`labels_path` must be provided\"\n )\n\n data_path = self._parse_data_path(\n dataset_dir=dataset_dir, data_path=data_path, default=\"data/\",\n )\n\n labels_path = self._parse_labels_path(\n dataset_dir=dataset_dir,\n labels_path=labels_path,\n default=\"labels/\",\n )\n\n super().__init__(\n dataset_dir=dataset_dir,\n shuffle=shuffle,\n seed=seed,\n max_samples=max_samples,\n )\n\n self.data_path = data_path\n self.labels_path = labels_path\n self.include_all_data = include_all_data\n\n self._info = None\n self._cvat_task_labels = None\n self._video_paths_map = None\n self._labels_paths_map = None\n self._uuids = None\n self._iter_uuids = None\n self._num_samples = None\n\n def __iter__(self):\n self._iter_uuids = iter(self._uuids)\n return self\n\n def __len__(self):\n return self._num_samples\n\n def __next__(self):\n uuid = next(self._iter_uuids)\n\n video_path = self._video_paths_map[uuid]\n\n labels_path = self._labels_paths_map.get(uuid, None)\n if labels_path:\n # Labeled video\n info, cvat_task_labels, cvat_tracks = load_cvat_video_annotations(\n labels_path\n )\n\n if self._info is None:\n self._info = info\n\n self._cvat_task_labels.merge_task_labels(cvat_task_labels)\n self._info[\"task_labels\"] = self._cvat_task_labels.labels\n\n frames = _cvat_tracks_to_frames_dict(cvat_tracks)\n else:\n # Unlabeled video\n frames = None\n\n return video_path, None, None, frames\n\n @property\n def has_dataset_info(self):\n return True\n\n @property\n def has_video_metadata(self):\n return False # has (width, height) but not other important info\n\n @property\n def label_cls(self):\n return None\n\n @property\n def frame_labels_cls(self):\n return {\n \"detections\": fol.Detections,\n \"polylines\": fol.Polylines,\n \"keypoints\": fol.Keypoints,\n }\n\n def setup(self):\n self._video_paths_map = self._load_data_map(\n self.data_path, ignore_exts=True, recursive=True\n )\n\n if self.labels_path is not None and os.path.isdir(self.labels_path):\n self._labels_paths_map = {\n os.path.splitext(p)[0]: os.path.join(self.labels_path, p)\n for p in etau.list_files(self.labels_path, recursive=True)\n }\n else:\n self._labels_paths_map = {}\n\n uuids = set(self._labels_paths_map.keys())\n\n if self.include_all_data:\n uuids.update(self._video_paths_map.keys())\n\n self._info = None\n self._uuids = self._preprocess_list(sorted(uuids))\n self._num_samples = len(self._uuids)\n self._cvat_task_labels = CVATTaskLabels()\n\n def get_dataset_info(self):\n return self._info\n\n\nclass CVATImageDatasetExporter(\n foud.LabeledImageDatasetExporter, foud.ExportPathsMixin\n):\n \"\"\"Exporter that writes CVAT image datasets to disk.\n\n See :ref:`this page <CVATImageDataset-export>` for format details.\n\n Args:\n export_dir (None): the directory to write the export. This has no\n effect if ``data_path`` and ``labels_path`` are absolute paths\n data_path (None): an optional parameter that enables explicit control\n over the location of the exported media. Can be any of the\n following:\n\n - a folder name like ``\"data\"`` or ``\"data/\"`` specifying a\n subfolder of ``export_dir`` in which to export the media\n - an absolute directory path in which to export the media. In\n this case, the ``export_dir`` has no effect on the location of\n the data\n - a JSON filename like ``\"data.json\"`` specifying the filename of\n the manifest file in ``export_dir`` generated when\n ``export_media`` is ``\"manifest\"``\n - an absolute filepath specifying the location to write the JSON\n manifest file when ``export_media`` is ``\"manifest\"``. In this\n case, ``export_dir`` has no effect on the location of the data\n\n If None, the default value of this parameter will be chosen based\n on the value of the ``export_media`` parameter\n labels_path (None): an optional parameter that enables explicit control\n over the location of the exported labels. Can be any of the\n following:\n\n - a filename like ``\"labels.xml\"`` specifying the location in\n ``export_dir`` in which to export the labels\n - an absolute filepath to which to export the labels. In this\n case, the ``export_dir`` has no effect on the location of the\n labels\n\n If None, the labels will be exported into ``export_dir`` using the\n default filename\n export_media (None): controls how to export the raw media. The\n supported values are:\n\n - ``True``: copy all media files into the output directory\n - ``False``: don't export media\n - ``\"move\"``: move all media files into the output directory\n - ``\"symlink\"``: create symlinks to the media files in the output\n directory\n - ``\"manifest\"``: create a ``data.json`` in the output directory\n that maps UUIDs used in the labels files to the filepaths of\n the source media, rather than exporting the actual media\n\n If None, the default value of this parameter will be chosen based\n on the value of the ``data_path`` parameter\n image_format (None): the image format to use when writing in-memory\n images to disk. By default, ``fiftyone.config.default_image_ext``\n is used\n \"\"\"\n\n def __init__(\n self,\n export_dir=None,\n data_path=None,\n labels_path=None,\n export_media=None,\n image_format=None,\n ):\n data_path, export_media = self._parse_data_path(\n export_dir=export_dir,\n data_path=data_path,\n export_media=export_media,\n default=\"data/\",\n )\n\n labels_path = self._parse_labels_path(\n export_dir=export_dir,\n labels_path=labels_path,\n default=\"labels.xml\",\n )\n\n super().__init__(export_dir=export_dir)\n\n self.data_path = data_path\n self.labels_path = labels_path\n self.export_media = export_media\n self.image_format = image_format\n\n self._name = None\n self._task_labels = None\n self._cvat_images = None\n self._media_exporter = None\n\n @property\n def requires_image_metadata(self):\n return True\n\n @property\n def label_cls(self):\n return {\n \"detections\": fol.Detections,\n \"polylines\": fol.Polylines,\n \"keypoints\": fol.Keypoints,\n }\n\n def setup(self):\n self._cvat_images = []\n self._media_exporter = foud.ImageExporter(\n self.export_media,\n export_path=self.data_path,\n default_ext=self.image_format,\n )\n self._media_exporter.setup()\n\n def log_collection(self, sample_collection):\n self._name = sample_collection.name\n self._task_labels = sample_collection.info.get(\"task_labels\", None)\n\n def export_sample(self, image_or_path, labels, metadata=None):\n _, uuid = self._media_exporter.export(image_or_path)\n\n if labels is None:\n return # unlabeled\n\n if not isinstance(labels, dict):\n labels = {\"labels\": labels}\n\n if all(v is None for v in labels.values()):\n return # unlabeled\n\n if metadata is None:\n metadata = fomt.ImageMetadata.build_for(image_or_path)\n\n cvat_image = CVATImage.from_labels(labels, metadata)\n\n cvat_image.id = len(self._cvat_images)\n cvat_image.name = uuid\n\n self._cvat_images.append(cvat_image)\n\n def close(self, *args):\n # Get task labels\n if self._task_labels is None:\n # Compute task labels from active label schema\n cvat_task_labels = CVATTaskLabels.from_cvat_images(\n self._cvat_images\n )\n else:\n # Use task labels from logged collection info\n cvat_task_labels = CVATTaskLabels(labels=self._task_labels)\n\n # Write annotations\n writer = CVATImageAnnotationWriter()\n writer.write(\n cvat_task_labels,\n self._cvat_images,\n self.labels_path,\n id=0,\n name=self._name,\n )\n\n self._media_exporter.close()\n\n\nclass CVATVideoDatasetExporter(\n foud.LabeledVideoDatasetExporter, foud.ExportPathsMixin\n):\n \"\"\"Exporter that writes CVAT video datasets to disk.\n\n See :ref:`this page <CVATVideoDataset-export>` for format details.\n\n Args:\n export_dir (None): the directory to write the export. This has no\n effect if ``data_path`` and ``labels_path`` are absolute paths\n data_path (None): an optional parameter that enables explicit control\n over the location of the exported media. Can be any of the\n following:\n\n - a folder name like ``\"data\"`` or ``\"data/\"`` specifying a\n subfolder of ``export_dir`` in which to export the media\n - an absolute directory path in which to export the media. In\n this case, the ``export_dir`` has no effect on the location of\n the data\n - a JSON filename like ``\"data.json\"`` specifying the filename of\n the manifest file in ``export_dir`` generated when\n ``export_media`` is ``\"manifest\"``\n - an absolute filepath specifying the location to write the JSON\n manifest file when ``export_media`` is ``\"manifest\"``. In this\n case, ``export_dir`` has no effect on the location of the data\n\n If None, the default value of this parameter will be chosen based\n on the value of the ``export_media`` parameter\n labels_path (None): an optional parameter that enables explicit control\n over the location of the exported labels. Can be any of the\n following:\n\n - a folder name like ``\"labels\"`` or ``\"labels/\"`` specifying the\n location in ``export_dir`` in which to export the labels\n - an absolute filepath to which to export the labels. In this\n case, the ``export_dir`` has no effect on the location of the\n labels\n\n If None, the labels will be exported into ``export_dir`` using the\n default folder name\n export_media (None): controls how to export the raw media. The\n supported values are:\n\n - ``True``: copy all media files into the output directory\n - ``False``: don't export media\n - ``\"move\"``: move all media files into the output directory\n - ``\"symlink\"``: create symlinks to the media files in the output\n directory\n - ``\"manifest\"``: create a ``data.json`` in the output directory\n that maps UUIDs used in the labels files to the filepaths of\n the source media, rather than exporting the actual media\n\n If None, the default value of this parameter will be chosen based\n on the value of the ``data_path`` parameter\n \"\"\"\n\n def __init__(\n self,\n export_dir=None,\n data_path=None,\n labels_path=None,\n export_media=None,\n ):\n data_path, export_media = self._parse_data_path(\n export_dir=export_dir,\n data_path=data_path,\n export_media=export_media,\n default=\"data/\",\n )\n\n labels_path = self._parse_labels_path(\n export_dir=export_dir, labels_path=labels_path, default=\"labels/\",\n )\n\n super().__init__(export_dir=export_dir)\n\n self.data_path = data_path\n self.labels_path = labels_path\n self.export_media = export_media\n\n self._task_labels = None\n self._num_samples = 0\n self._writer = None\n self._media_exporter = None\n\n @property\n def requires_video_metadata(self):\n return True\n\n @property\n def label_cls(self):\n return None\n\n @property\n def frame_labels_cls(self):\n return {\n \"detections\": fol.Detections,\n \"polylines\": fol.Polylines,\n \"keypoints\": fol.Keypoints,\n }\n\n def setup(self):\n self._writer = CVATVideoAnnotationWriter()\n self._media_exporter = foud.ImageExporter(\n self.export_media, export_path=self.data_path,\n )\n self._media_exporter.setup()\n\n def log_collection(self, sample_collection):\n self._task_labels = sample_collection.info.get(\"task_labels\", None)\n\n def export_sample(self, video_path, _, frames, metadata=None):\n _, filename = self._media_exporter.export(video_path)\n\n if frames is None:\n return # unlabeled\n\n if metadata is None:\n metadata = fomt.VideoMetadata.build_for(video_path)\n\n out_anno_path = os.path.join(\n self.labels_path, os.path.splitext(filename)[0] + \".xml\"\n )\n\n # Generate object tracks\n frame_size = (metadata.frame_width, metadata.frame_height)\n cvat_tracks = _frames_to_cvat_tracks(frames, frame_size)\n\n if cvat_tracks is None:\n return # unlabeled\n\n # Get task labels\n if self._task_labels is None:\n # Compute task labels from active label schema\n cvat_task_labels = CVATTaskLabels.from_cvat_tracks(cvat_tracks)\n else:\n # Use task labels from logged collection info\n cvat_task_labels = CVATTaskLabels(labels=self._task_labels)\n\n # Write annotations\n self._num_samples += 1\n self._writer.write(\n cvat_task_labels,\n cvat_tracks,\n metadata,\n out_anno_path,\n id=self._num_samples - 1,\n name=filename,\n )\n\n def close(self, *args):\n self._media_exporter.close()\n\n\nclass CVATTaskLabels(object):\n \"\"\"Description of the labels in a CVAT image annotation task.\n\n Args:\n labels (None): a list of label dicts in the following format::\n\n [\n {\n \"name\": \"car\",\n \"attributes\": [\n {\n \"name\": \"type\"\n \"categories\": [\"coupe\", \"sedan\", \"truck\"]\n },\n ...\n }\n },\n ...\n ]\n \"\"\"\n\n def __init__(self, labels=None):\n self.labels = labels or []\n\n def merge_task_labels(self, task_labels):\n \"\"\"Merges the given :class:`CVATTaskLabels` into this instance.\n\n Args:\n task_labels: a :class:`CVATTaskLabels`\n \"\"\"\n schema = self.to_schema()\n schema.merge_schema(task_labels.to_schema())\n new_task_labels = CVATTaskLabels.from_schema(schema)\n self.labels = new_task_labels.labels\n\n def to_schema(self):\n \"\"\"Returns an ``eta.core.image.ImageLabelsSchema`` representation of\n the task labels.\n\n Note that CVAT's task labels schema does not distinguish between boxes,\n polylines, and keypoints, so the returned schema stores all annotations\n under the ``\"objects\"`` field.\n\n Returns:\n an ``eta.core.image.ImageLabelsSchema``\n \"\"\"\n schema = etai.ImageLabelsSchema()\n\n for label in self.labels:\n _label = label[\"name\"]\n schema.add_object_label(_label)\n for attribute in label.get(\"attributes\", []):\n _name = attribute[\"name\"]\n _categories = attribute[\"categories\"]\n for _value in _categories:\n _attr = etad.CategoricalAttribute(_name, _value)\n schema.add_object_attribute(_label, _attr)\n\n return schema\n\n @classmethod\n def from_cvat_images(cls, cvat_images):\n \"\"\"Creates a :class:`CVATTaskLabels` instance that describes the active\n schema of the given annotations.\n\n Args:\n cvat_images: a list of :class:`CVATImage` instances\n\n Returns:\n a :class:`CVATTaskLabels`\n \"\"\"\n schema = etai.ImageLabelsSchema()\n for cvat_image in cvat_images:\n for anno in cvat_image.iter_annos():\n _label = anno.label\n schema.add_object_label(_label)\n\n if anno.occluded is not None:\n _attr = etad.BooleanAttribute(\"occluded\", anno.occluded)\n schema.add_object_attribute(_label, _attr)\n\n for attr in anno.attributes:\n _attr = attr.to_eta_attribute()\n schema.add_object_attribute(_label, _attr)\n\n return cls.from_schema(schema)\n\n @classmethod\n def from_cvat_tracks(cls, cvat_tracks):\n \"\"\"Creates a :class:`CVATTaskLabels` instance that describes the active\n schema of the given annotations.\n\n Args:\n cvat_tracks: a list of :class:`CVATTrack` instances\n\n Returns:\n a :class:`CVATTaskLabels`\n \"\"\"\n schema = etai.ImageLabelsSchema()\n for cvat_track in cvat_tracks:\n for anno in cvat_track.iter_annos():\n _label = anno.label\n schema.add_object_label(_label)\n\n if anno.outside is not None:\n _attr = etad.BooleanAttribute(\"outside\", anno.outside)\n schema.add_object_attribute(_label, _attr)\n\n if anno.occluded is not None:\n _attr = etad.BooleanAttribute(\"occluded\", anno.occluded)\n schema.add_object_attribute(_label, _attr)\n\n if anno.keyframe is not None:\n _attr = etad.BooleanAttribute(\"keyframe\", anno.keyframe)\n schema.add_object_attribute(_label, _attr)\n\n for attr in anno.attributes:\n _attr = attr.to_eta_attribute()\n schema.add_object_attribute(_label, _attr)\n\n return cls.from_schema(schema)\n\n @classmethod\n def from_labels_dict(cls, d):\n \"\"\"Creates a :class:`CVATTaskLabels` instance from the ``<labels>``\n tag of a CVAT annotation XML file.\n\n Args:\n d: a dict representation of a ``<labels>`` tag\n\n Returns:\n a :class:`CVATTaskLabels`\n \"\"\"\n labels = _ensure_list(d.get(\"label\", []))\n _labels = []\n for label in labels:\n _tmp = label.get(\"attributes\", None) or {}\n attributes = _ensure_list(_tmp.get(\"attribute\", []))\n _attributes = []\n for attribute in attributes:\n _values = attribute.get(\"values\", None)\n _categories = _values.split(\"\\n\") if _values else []\n _attributes.append(\n {\"name\": attribute[\"name\"], \"categories\": _categories}\n )\n\n _labels.append({\"name\": label[\"name\"], \"attributes\": _attributes})\n\n return cls(labels=_labels)\n\n @classmethod\n def from_schema(cls, schema):\n \"\"\"Creates a :class:`CVATTaskLabels` instance from an\n ``eta.core.image.ImageLabelsSchema``.\n\n Args:\n schema: an ``eta.core.image.ImageLabelsSchema``\n\n Returns:\n a :class:`CVATTaskLabels`\n \"\"\"\n labels = []\n obj_schemas = schema.objects\n for label in sorted(obj_schemas.schema):\n obj_schema = obj_schemas.schema[label]\n obj_attr_schemas = obj_schema.attrs\n attributes = []\n for name in sorted(obj_attr_schemas.schema):\n attr_schema = obj_attr_schemas.schema[name]\n if isinstance(attr_schema, etad.CategoricalAttributeSchema):\n attributes.append(\n {\n \"name\": name,\n \"categories\": sorted(attr_schema.categories),\n }\n )\n\n labels.append({\"name\": label, \"attributes\": attributes})\n\n return cls(labels=labels)\n\n\nclass CVATImage(object):\n \"\"\"An annotated image in CVAT image format.\n\n Args:\n id: the ID of the image\n name: the filename of the image\n width: the width of the image, in pixels\n height: the height of the image, in pixels\n boxes (None): a list of :class:`CVATImageBox` instances\n polygons (None): a list of :class:`CVATImagePolygon` instances\n polylines (None): a list of :class:`CVATImagePolyline` instances\n points (None): a list of :class:`CVATImagePoints` instances\n subset (None): the project subset of the image, if any\n \"\"\"\n\n def __init__(\n self,\n id,\n name,\n width,\n height,\n boxes=None,\n polygons=None,\n polylines=None,\n points=None,\n subset=None,\n ):\n self.id = id\n self.name = name\n self.subset = subset\n self.width = width\n self.height = height\n self.boxes = boxes or []\n self.polygons = polygons or []\n self.polylines = polylines or []\n self.points = points or []\n\n @property\n def has_boxes(self):\n \"\"\"Whether this image has 2D boxes.\"\"\"\n return bool(self.boxes)\n\n @property\n def has_polylines(self):\n \"\"\"Whether this image has polygons or polylines.\"\"\"\n return bool(self.polygons) or bool(self.polylines)\n\n @property\n def has_points(self):\n \"\"\"Whether this image has keypoints.\"\"\"\n return bool(self.points)\n\n def iter_annos(self):\n \"\"\"Returns an iterator over the annotations in the image.\n\n Returns:\n an iterator that emits :class:`CVATImageAnno` instances\n \"\"\"\n return itertools.chain(\n self.boxes, self.polygons, self.polylines, self.points\n )\n\n def get_image_metadata(self):\n \"\"\"Returns a :class:`fiftyone.core.metadata.ImageMetadata` instance for\n the annotations.\n\n Returns:\n a :class:`fiftyone.core.metadata.ImageMetadata`\n \"\"\"\n return fomt.ImageMetadata(width=self.width, height=self.height)\n\n def to_labels(self):\n \"\"\"Returns :class:`fiftyone.core.labels.Label` representations of the\n annotations.\n\n Returns:\n a dict mapping field keys to :class:`fiftyone.core.labels.Label`\n instances\n \"\"\"\n frame_size = (self.width, self.height)\n\n labels = {}\n\n if self.boxes:\n detections = [b.to_detection(frame_size) for b in self.boxes]\n labels[\"detections\"] = fol.Detections(detections=detections)\n\n if self.polygons or self.polylines:\n polygons = [p.to_polyline(frame_size) for p in self.polygons]\n polylines = [p.to_polyline(frame_size) for p in self.polylines]\n labels[\"polylines\"] = fol.Polylines(polylines=polygons + polylines)\n\n if self.points:\n keypoints = [k.to_keypoint(frame_size) for k in self.points]\n labels[\"keypoints\"] = fol.Keypoints(keypoints=keypoints)\n\n return labels\n\n @classmethod\n def from_labels(cls, labels, metadata):\n \"\"\"Creates a :class:`CVATImage` from a dictionary of labels.\n\n Args:\n labels: a dict mapping keys to :class:`fiftyone.core.labels.Label`\n instances\n metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the\n image\n\n Returns:\n a :class:`CVATImage`\n \"\"\"\n width = metadata.width\n height = metadata.height\n\n _detections = []\n _polygons = []\n _polylines = []\n _keypoints = []\n for _labels in labels.values():\n if isinstance(_labels, fol.Detection):\n _detections.append(_labels)\n elif isinstance(_labels, fol.Detections):\n _detections.extend(_labels.detections)\n elif isinstance(_labels, fol.Polyline):\n if _labels.closed:\n _polygons.append(_labels)\n else:\n _polylines.append(_labels)\n elif isinstance(_labels, fol.Polylines):\n for poly in _labels.polylines:\n if poly.closed:\n _polygons.append(poly)\n else:\n _polylines.append(poly)\n elif isinstance(_labels, fol.Keypoint):\n _keypoints.append(_labels)\n elif isinstance(_labels, fol.Keypoints):\n _keypoints.extend(_labels.keypoints)\n elif _labels is not None:\n msg = (\n \"Ignoring unsupported label type '%s'\" % _labels.__class__\n )\n warnings.warn(msg)\n\n boxes = [CVATImageBox.from_detection(d, metadata) for d in _detections]\n\n polygons = []\n for p in _polygons:\n polygons.extend(CVATImagePolygon.from_polyline(p, metadata))\n\n polylines = []\n for p in _polylines:\n polylines.extend(CVATImagePolyline.from_polyline(p, metadata))\n\n points = [\n CVATImagePoints.from_keypoint(k, metadata) for k in _keypoints\n ]\n\n return cls(\n None,\n None,\n width,\n height,\n boxes=boxes,\n polygons=polygons,\n polylines=polylines,\n points=points,\n )\n\n @classmethod\n def from_image_dict(cls, d):\n \"\"\"Creates a :class:`CVATImage` from an ``<image>`` tag of a CVAT image\n annotations XML file.\n\n Args:\n d: a dict representation of an ``<image>`` tag\n\n Returns:\n a :class:`CVATImage`\n \"\"\"\n id = d[\"@id\"]\n name = d[\"@name\"]\n subset = d.get(\"@subset\", None)\n width = int(d[\"@width\"])\n height = int(d[\"@height\"])\n\n boxes = []\n for bd in _ensure_list(d.get(\"box\", [])):\n boxes.append(CVATImageBox.from_box_dict(bd))\n\n polygons = []\n for pd in _ensure_list(d.get(\"polygon\", [])):\n polygons.append(CVATImagePolygon.from_polygon_dict(pd))\n\n polylines = []\n for pd in _ensure_list(d.get(\"polyline\", [])):\n polylines.append(CVATImagePolyline.from_polyline_dict(pd))\n\n points = []\n for pd in _ensure_list(d.get(\"points\", [])):\n points.append(CVATImagePoints.from_points_dict(pd))\n\n return cls(\n id,\n name,\n width,\n height,\n boxes=boxes,\n polygons=polygons,\n polylines=polylines,\n points=points,\n subset=subset,\n )\n\n\nclass HasCVATPoints(object):\n \"\"\"Mixin for CVAT annotations that store a list of ``(x, y)`` pixel\n coordinates.\n\n Attributes:\n points: a list of ``(x, y)`` pixel coordinates defining points\n \"\"\"\n\n def __init__(self, points):\n self.points = points\n\n @property\n def points_str(self):\n return self._to_cvat_points_str(self.points)\n\n @staticmethod\n def _to_rel_points(points, frame_size):\n w, h = frame_size\n return [(x / w, y / h) for x, y in points]\n\n @staticmethod\n def _to_abs_points(points, frame_size):\n w, h = frame_size\n return [(int(round(x * w)), int(round(y * h))) for x, y in points]\n\n @staticmethod\n def _to_cvat_points_str(points):\n return \";\".join(\"%g,%g\" % (x, y) for x, y in points)\n\n @staticmethod\n def _parse_cvat_points_str(points_str):\n points = []\n for xy_str in points_str.split(\";\"):\n x, y = xy_str.split(\",\")\n points.append((int(round(float(x))), int(round(float(y)))))\n\n return points\n\n\nclass CVATImageAnno(object):\n \"\"\"Mixin for annotations in CVAT image format.\n\n Args:\n occluded (None): whether the object is occluded\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(self, occluded=None, attributes=None):\n self.occluded = occluded\n self.attributes = attributes or []\n\n def _to_attributes(self):\n attributes = {a.name: a.value for a in self.attributes}\n\n if self.occluded == 1:\n attributes[\"occluded\"] = True\n\n return attributes\n\n @staticmethod\n def _parse_attributes(label):\n attrs = dict(label.iter_attributes())\n\n occluded = _to_int_bool(attrs.pop(\"occluded\", None))\n\n attributes = [\n CVATAttribute(k, v)\n for k, v in attrs.items()\n if _is_supported_attribute_type(v)\n ]\n\n return occluded, attributes\n\n @staticmethod\n def _parse_anno_dict(d):\n occluded = _from_int_bool(d.get(\"@occluded\", None))\n\n attributes = []\n for attr in _ensure_list(d.get(\"attribute\", [])):\n if \"#text\" in attr:\n name = attr[\"@name\"].lstrip(\"@\")\n if name == \"label_id\":\n # We assume that this is a `label_id` exported from an\n # CVAT annotation run created by our annotation API, which\n # should be ignored since we're not using the API here\n continue\n\n value = _parse_value(attr[\"#text\"])\n attributes.append(CVATAttribute(name, value))\n\n return occluded, attributes\n\n\nclass CVATImageBox(CVATImageAnno):\n \"\"\"An object bounding box in CVAT image format.\n\n Args:\n label: the object label string\n xtl: the top-left x-coordinate of the box, in pixels\n ytl: the top-left y-coordinate of the box, in pixels\n xbr: the bottom-right x-coordinate of the box, in pixels\n ybr: the bottom-right y-coordinate of the box, in pixels\n occluded (None): whether the object is occluded\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self, label, xtl, ytl, xbr, ybr, occluded=None, attributes=None\n ):\n self.label = label\n self.xtl = xtl\n self.ytl = ytl\n self.xbr = xbr\n self.ybr = ybr\n CVATImageAnno.__init__(self, occluded=occluded, attributes=attributes)\n\n def to_detection(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Detection` representation of\n the box.\n\n Args:\n frame_size: the ``(width, height)`` of the image\n\n Returns:\n a :class:`fiftyone.core.labels.Detection`\n \"\"\"\n label = self.label\n\n width, height = frame_size\n bounding_box = [\n self.xtl / width,\n self.ytl / height,\n (self.xbr - self.xtl) / width,\n (self.ybr - self.ytl) / height,\n ]\n\n attributes = self._to_attributes()\n\n return fol.Detection(\n label=label, bounding_box=bounding_box, **attributes\n )\n\n @classmethod\n def from_detection(cls, detection, metadata):\n \"\"\"Creates a :class:`CVATImageBox` from a\n :class:`fiftyone.core.labels.Detection`.\n\n Args:\n detection: a :class:`fiftyone.core.labels.Detection`\n metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the\n image\n\n Returns:\n a :class:`CVATImageBox`\n \"\"\"\n label = detection.label\n\n width = metadata.width\n height = metadata.height\n x, y, w, h = detection.bounding_box\n xtl = int(round(x * width))\n ytl = int(round(y * height))\n xbr = int(round((x + w) * width))\n ybr = int(round((y + h) * height))\n\n occluded, attributes = cls._parse_attributes(detection)\n\n return cls(\n label, xtl, ytl, xbr, ybr, occluded=occluded, attributes=attributes\n )\n\n @classmethod\n def from_box_dict(cls, d):\n \"\"\"Creates a :class:`CVATImageBox` from a ``<box>`` tag of a CVAT image\n annotation XML file.\n\n Args:\n d: a dict representation of a ``<box>`` tag\n\n Returns:\n a :class:`CVATImageBox`\n \"\"\"\n label = d[\"@label\"]\n\n xtl = int(round(float(d[\"@xtl\"])))\n ytl = int(round(float(d[\"@ytl\"])))\n xbr = int(round(float(d[\"@xbr\"])))\n ybr = int(round(float(d[\"@ybr\"])))\n\n occluded, attributes = cls._parse_anno_dict(d)\n\n return cls(\n label, xtl, ytl, xbr, ybr, occluded=occluded, attributes=attributes\n )\n\n\nclass CVATImagePolygon(CVATImageAnno, HasCVATPoints):\n \"\"\"A polygon in CVAT image format.\n\n Args:\n label: the polygon label string\n points: a list of ``(x, y)`` pixel coordinates defining the vertices of\n the polygon\n occluded (None): whether the polygon is occluded\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(self, label, points, occluded=None, attributes=None):\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATImageAnno.__init__(self, occluded=occluded, attributes=attributes)\n\n def to_polyline(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Polyline` representation of\n the polygon.\n\n Args:\n frame_size: the ``(width, height)`` of the image\n\n Returns:\n a :class:`fiftyone.core.labels.Polyline`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Polyline(\n label=label,\n points=[points],\n closed=True,\n filled=True,\n **attributes,\n )\n\n @classmethod\n def from_polyline(cls, polyline, metadata):\n \"\"\"Creates a :class:`CVATImagePolygon` from a\n :class:`fiftyone.core.labels.Polyline`.\n\n If the :class:`fiftyone.core.labels.Polyline` is composed of multiple\n shapes, one :class:`CVATImagePolygon` per shape will be generated.\n\n Args:\n polyline: a :class:`fiftyone.core.labels.Polyline`\n metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the\n image\n\n Returns:\n a list of :class:`CVATImagePolygon` instances\n \"\"\"\n label = polyline.label\n\n if len(polyline.points) > 1:\n msg = (\n \"Found polyline with more than one shape; generating separate \"\n \"annotations for each shape\"\n )\n warnings.warn(msg)\n\n frame_size = (metadata.width, metadata.height)\n occluded, attributes = cls._parse_attributes(polyline)\n\n polylines = []\n for points in polyline.points:\n abs_points = cls._to_abs_points(points, frame_size)\n polylines.append(\n cls(\n label, abs_points, occluded=occluded, attributes=attributes\n )\n )\n\n return polylines\n\n @classmethod\n def from_polygon_dict(cls, d):\n \"\"\"Creates a :class:`CVATImagePolygon` from a ``<polygon>`` tag of a\n CVAT image annotation XML file.\n\n Args:\n d: a dict representation of a ``<polygon>`` tag\n\n Returns:\n a :class:`CVATImagePolygon`\n \"\"\"\n label = d[\"@label\"]\n points = cls._parse_cvat_points_str(d[\"@points\"])\n occluded, attributes = cls._parse_anno_dict(d)\n\n return cls(label, points, occluded=occluded, attributes=attributes)\n\n\nclass CVATImagePolyline(CVATImageAnno, HasCVATPoints):\n \"\"\"A polyline in CVAT image format.\n\n Args:\n label: the polyline label string\n points: a list of ``(x, y)`` pixel coordinates defining the vertices of\n the polyline\n occluded (None): whether the polyline is occluded\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(self, label, points, occluded=None, attributes=None):\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATImageAnno.__init__(self, occluded=occluded, attributes=attributes)\n\n def to_polyline(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Polyline` representation of\n the polyline.\n\n Args:\n frame_size: the ``(width, height)`` of the image\n\n Returns:\n a :class:`fiftyone.core.labels.Polyline`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Polyline(\n label=label,\n points=[points],\n closed=False,\n filled=False,\n **attributes,\n )\n\n @classmethod\n def from_polyline(cls, polyline, metadata):\n \"\"\"Creates a :class:`CVATImagePolyline` from a\n :class:`fiftyone.core.labels.Polyline`.\n\n If the :class:`fiftyone.core.labels.Polyline` is composed of multiple\n shapes, one :class:`CVATImagePolyline` per shape will be generated.\n\n Args:\n polyline: a :class:`fiftyone.core.labels.Polyline`\n metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the\n image\n\n Returns:\n a list of :class:`CVATImagePolyline` instances\n \"\"\"\n label = polyline.label\n\n if len(polyline.points) > 1:\n msg = (\n \"Found polyline with more than one shape; generating separate \"\n \"annotations for each shape\"\n )\n warnings.warn(msg)\n\n frame_size = (metadata.width, metadata.height)\n occluded, attributes = cls._parse_attributes(polyline)\n\n polylines = []\n for points in polyline.points:\n abs_points = cls._to_abs_points(points, frame_size)\n if abs_points and polyline.closed:\n abs_points.append(copy(abs_points[0]))\n\n polylines.append(\n cls(\n label, abs_points, occluded=occluded, attributes=attributes\n )\n )\n\n return polylines\n\n @classmethod\n def from_polyline_dict(cls, d):\n \"\"\"Creates a :class:`CVATImagePolyline` from a ``<polyline>`` tag of a\n CVAT image annotation XML file.\n\n Args:\n d: a dict representation of a ``<polyline>`` tag\n\n Returns:\n a :class:`CVATImagePolyline`\n \"\"\"\n label = d[\"@label\"]\n points = cls._parse_cvat_points_str(d[\"@points\"])\n occluded, attributes = cls._parse_anno_dict(d)\n\n return cls(label, points, occluded=occluded, attributes=attributes)\n\n\nclass CVATImagePoints(CVATImageAnno, HasCVATPoints):\n \"\"\"A set of keypoints in CVAT image format.\n\n Args:\n label: the keypoints label string\n points: a list of ``(x, y)`` pixel coordinates defining the vertices of\n the keypoints\n occluded (None): whether the keypoints are occluded\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(self, label, points, occluded=None, attributes=None):\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATImageAnno.__init__(self, occluded=occluded, attributes=attributes)\n\n def to_keypoint(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Keypoint` representation of\n the points.\n\n Args:\n frame_size: the ``(width, height)`` of the image\n\n Returns:\n a :class:`fiftyone.core.labels.Keypoint`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Keypoint(label=label, points=points, **attributes)\n\n @classmethod\n def from_keypoint(cls, keypoint, metadata):\n \"\"\"Creates a :class:`CVATImagePoints` from a\n :class:`fiftyone.core.labels.Keypoint`.\n\n Args:\n keypoint: a :class:`fiftyone.core.labels.Keypoint`\n metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the\n image\n\n Returns:\n a :class:`CVATImagePoints`\n \"\"\"\n label = keypoint.label\n\n frame_size = (metadata.width, metadata.height)\n points = cls._to_abs_points(keypoint.points, frame_size)\n\n occluded, attributes = cls._parse_attributes(keypoint)\n\n return cls(label, points, occluded=occluded, attributes=attributes)\n\n @classmethod\n def from_points_dict(cls, d):\n \"\"\"Creates a :class:`CVATImagePoints` from a ``<points>`` tag of a\n CVAT image annotation XML file.\n\n Args:\n d: a dict representation of a ``<points>`` tag\n\n Returns:\n a :class:`CVATImagePoints`\n \"\"\"\n label = d[\"@label\"]\n points = cls._parse_cvat_points_str(d[\"@points\"])\n occluded, attributes = cls._parse_anno_dict(d)\n return cls(label, points, occluded=occluded, attributes=attributes)\n\n\nclass CVATTrack(object):\n \"\"\"An annotation track in CVAT video format.\n\n Args:\n id: the ID of the track\n label: the label for the track\n width: the width of the video frames, in pixels\n height: the height of the video frames, in pixels\n boxes (None): a dict mapping frame numbers to :class:`CVATVideoBox`\n instances\n polygons (None): a dict mapping frame numbers to\n :class:`CVATVideoPolygon` instances\n polylines (None): a dict mapping frame numbers to\n :class:`CVATVideoPolyline` instances\n points (None): a dict mapping frame numbers to :class:`CVATVideoPoints`\n instances\n \"\"\"\n\n def __init__(\n self,\n id,\n label,\n width,\n height,\n boxes=None,\n polygons=None,\n polylines=None,\n points=None,\n ):\n self.id = id\n self.label = label\n self.width = width\n self.height = height\n self.boxes = boxes or {}\n self.polygons = polygons or {}\n self.polylines = polylines or {}\n self.points = points or {}\n\n @property\n def has_boxes(self):\n \"\"\"Whether this track has 2D boxes.\"\"\"\n return bool(self.boxes)\n\n @property\n def has_polylines(self):\n \"\"\"Whether this track has polygons or polylines.\"\"\"\n return bool(self.polygons) or bool(self.polylines)\n\n @property\n def has_points(self):\n \"\"\"Whether this track has keypoints.\"\"\"\n return bool(self.points)\n\n def iter_annos(self):\n \"\"\"Returns an iterator over the annotations in the track.\n\n Returns:\n an iterator that emits :class:`CVATVideoAnno` instances\n \"\"\"\n return itertools.chain(\n self.boxes.values(),\n self.polygons.values(),\n self.polylines.values(),\n self.points.values(),\n )\n\n def to_labels(self):\n \"\"\"Returns :class:`fiftyone.core.labels.Label` representations of the\n annotations.\n\n Returns:\n a dict mapping frame numbers to\n :class:`fiftyone.core.labels.Label` instances\n \"\"\"\n frame_size = (self.width, self.height)\n\n labels = {}\n\n # Only one of these will actually contain labels\n\n for frame_number, box in self.boxes.items():\n if box.outside != 1:\n detection = box.to_detection(frame_size)\n detection.index = self.id\n labels[frame_number + 1] = detection\n\n for frame_number, polygon in self.polygons.items():\n if polygon.outside != 1:\n polyline = polygon.to_polyline(frame_size)\n polyline.index = self.id\n labels[frame_number + 1] = polyline\n\n for frame_number, polyline in self.polylines.items():\n if polyline.outside != 1:\n polyline = polyline.to_polyline(frame_size)\n polyline.index = self.id\n labels[frame_number + 1] = polyline\n\n for frame_number, points in self.points.items():\n if points.outside != 1:\n keypoint = points.to_keypoint(frame_size)\n keypoint.index = self.id\n labels[frame_number + 1] = keypoint\n\n return labels\n\n @classmethod\n def from_labels(cls, id, labels, frame_size):\n \"\"\"Creates a :class:`CVATTrack` from a dictionary of labels.\n\n Args:\n id: the ID of the track\n labels: a dict mapping frame numbers to\n :class:`fiftyone.core.labels.Label` instances\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATTrack`\n \"\"\"\n width, height = frame_size\n\n boxes = {}\n polygons = {}\n polylines = {}\n points = {}\n label = None\n for fn, _label in labels.items():\n label = _label.label\n\n if isinstance(_label, fol.Detection):\n boxes[fn - 1] = CVATVideoBox.from_detection(\n fn, _label, frame_size\n )\n elif isinstance(_label, fol.Polyline):\n if _label.filled:\n polygons[fn - 1] = CVATVideoPolygon.from_polyline(\n fn, _label, frame_size\n )\n else:\n polylines[fn - 1] = CVATVideoPolyline.from_polyline(\n fn, _label, frame_size\n )\n elif isinstance(_label, fol.Keypoint):\n points[fn - 1] = CVATVideoPoints.from_keypoint(\n fn, _label, frame_size\n )\n elif _label is not None:\n msg = \"Ignoring unsupported label type '%s'\" % _label.__class__\n warnings.warn(msg)\n\n # CVAT uses `outside=1` to mark the end of track segments, while\n # FiftyOne implicitly represents this by missing labels. So, we need to\n # convert to CVAT format here\n cls._add_outside_shapes(boxes)\n cls._add_outside_shapes(polygons)\n cls._add_outside_shapes(polylines)\n cls._add_outside_shapes(points)\n\n return cls(\n id,\n label,\n width,\n height,\n boxes=boxes,\n polygons=polygons,\n polylines=polylines,\n points=points,\n )\n\n @classmethod\n def from_track_dict(cls, d, frame_size):\n \"\"\"Creates a :class:`CVATTrack` from a ``<track>`` tag of a CVAT video\n annotation XML file.\n\n Args:\n d: a dict representation of an ``<track>`` tag\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATTrack`\n \"\"\"\n id = d[\"@id\"]\n label = d[\"@label\"]\n\n width, height = frame_size\n\n boxes = {}\n for bd in _ensure_list(d.get(\"box\", [])):\n box = CVATVideoBox.from_box_dict(label, bd)\n boxes[box.frame] = box\n\n polygons = {}\n for pd in _ensure_list(d.get(\"polygon\", [])):\n polygon = CVATVideoPolygon.from_polygon_dict(label, pd)\n polygons[polygon.frame] = polygon\n\n polylines = {}\n for pd in _ensure_list(d.get(\"polyline\", [])):\n polyline = CVATVideoPolyline.from_polyline_dict(label, pd)\n polylines[polyline.frame] = polyline\n\n points = {}\n for pd in _ensure_list(d.get(\"points\", [])):\n point = CVATVideoPoints.from_points_dict(label, pd)\n points[point.frame] = point\n\n return cls(\n id,\n label,\n width,\n height,\n boxes=boxes,\n polygons=polygons,\n polylines=polylines,\n points=points,\n )\n\n @staticmethod\n def _add_outside_shapes(shapes):\n if not shapes:\n return\n\n use_keyframes = any(s.keyframe for s in shapes.values())\n\n def _make_outside_shape(shape):\n shape = deepcopy(shape)\n shape.outside = 1\n if use_keyframes:\n shape.keyframe = 1\n\n return shape\n\n # Add \"outside\" shapes to represent gaps of >= 1 frame in tracks\n fns = sorted(shapes.keys())\n last_fn = fns[0]\n for fn in fns:\n if fn > last_fn + 1:\n shapes[last_fn + 1] = _make_outside_shape(shapes[last_fn])\n\n last_fn = fn\n\n # Always add an \"outside\" shape to the end of each track\n shapes[last_fn + 1] = _make_outside_shape(shapes[last_fn])\n\n\nclass CVATVideoAnno(object):\n \"\"\"Mixin for annotations in CVAT video format.\n\n Args:\n outside (None): whether the object is outside (invisible)\n occluded (None): whether the object is occluded\n keyframe (None): whether the frame is a keyframe\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self, outside=None, occluded=None, keyframe=None, attributes=None\n ):\n self.outside = outside\n self.occluded = occluded\n self.keyframe = keyframe\n self.attributes = attributes or []\n\n def _to_attributes(self):\n attributes = {a.name: a.value for a in self.attributes}\n\n # We don't include `outside` here because shapes marked as `outside`\n # are completely omitted\n\n if self.occluded == 1:\n attributes[\"occluded\"] = True\n\n if self.keyframe == 1:\n attributes[\"keyframe\"] = True\n\n return attributes\n\n @staticmethod\n def _parse_attributes(label):\n attrs = dict(label.iter_attributes())\n\n outside = 0 # any FiftyOne label is implicitly not `outside`\n occluded = _to_int_bool(attrs.pop(\"occluded\", None))\n keyframe = _to_int_bool(attrs.pop(\"keyframe\", None))\n\n attributes = [\n CVATAttribute(k, v)\n for k, v in attrs.items()\n if _is_supported_attribute_type(v)\n ]\n\n return outside, occluded, keyframe, attributes\n\n @staticmethod\n def _parse_anno_dict(d):\n outside = _from_int_bool(d.get(\"@outside\", None))\n occluded = _from_int_bool(d.get(\"@occluded\", None))\n keyframe = _from_int_bool(d.get(\"@keyframe\", None))\n\n attributes = []\n for attr in _ensure_list(d.get(\"attribute\", [])):\n if \"#text\" in attr:\n name = attr[\"@name\"].lstrip(\"@\")\n if name == \"label_id\":\n # We assume that this is a `label_id` exported from an\n # CVAT annotation run created by our annotation API, which\n # should be ignored since we're not using the API here\n continue\n\n value = _parse_value(attr[\"#text\"])\n attributes.append(CVATAttribute(name, value))\n\n return outside, occluded, keyframe, attributes\n\n\nclass CVATVideoBox(CVATVideoAnno):\n \"\"\"An object bounding box in CVAT video format.\n\n Args:\n frame: the 0-based frame number\n label: the object label string\n xtl: the top-left x-coordinate of the box, in pixels\n ytl: the top-left y-coordinate of the box, in pixels\n xbr: the bottom-right x-coordinate of the box, in pixels\n ybr: the bottom-right y-coordinate of the box, in pixels\n outside (None): whether the object is outside (invisible)\n occluded (None): whether the object is occluded\n keyframe (None): whether the frame is a keyframe\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self,\n frame,\n label,\n xtl,\n ytl,\n xbr,\n ybr,\n outside=None,\n occluded=None,\n keyframe=None,\n attributes=None,\n ):\n self.frame = frame\n self.label = label\n self.xtl = xtl\n self.ytl = ytl\n self.xbr = xbr\n self.ybr = ybr\n CVATVideoAnno.__init__(\n self,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n def to_detection(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Detection` representation of\n the box.\n\n Args:\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`fiftyone.core.labels.Detection`\n \"\"\"\n label = self.label\n\n width, height = frame_size\n bounding_box = [\n self.xtl / width,\n self.ytl / height,\n (self.xbr - self.xtl) / width,\n (self.ybr - self.ytl) / height,\n ]\n\n attributes = self._to_attributes()\n\n return fol.Detection(\n label=label, bounding_box=bounding_box, **attributes\n )\n\n @classmethod\n def from_detection(cls, frame_number, detection, frame_size):\n \"\"\"Creates a :class:`CVATVideoBox` from a\n :class:`fiftyone.core.labels.Detection`.\n\n Args:\n frame_number: the frame number\n detection: a :class:`fiftyone.core.labels.Detection`\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATVideoBox`\n \"\"\"\n frame = frame_number - 1\n label = detection.label\n\n width, height = frame_size\n x, y, w, h = detection.bounding_box\n xtl = int(round(x * width))\n ytl = int(round(y * height))\n xbr = int(round((x + w) * width))\n ybr = int(round((y + h) * height))\n\n outside, occluded, keyframe, attributes = cls._parse_attributes(\n detection\n )\n\n return cls(\n frame,\n label,\n xtl,\n ytl,\n xbr,\n ybr,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n @classmethod\n def from_box_dict(cls, label, d):\n \"\"\"Creates a :class:`CVATVideoBox` from a ``<box>`` tag of a CVAT video\n annotation XML file.\n\n Args:\n label: the object label\n d: a dict representation of a ``<box>`` tag\n\n Returns:\n a :class:`CVATVideoBox`\n \"\"\"\n frame = int(d[\"@frame\"])\n\n xtl = int(round(float(d[\"@xtl\"])))\n ytl = int(round(float(d[\"@ytl\"])))\n xbr = int(round(float(d[\"@xbr\"])))\n ybr = int(round(float(d[\"@ybr\"])))\n\n outside, occluded, keyframe, attributes = cls._parse_anno_dict(d)\n\n return cls(\n frame,\n label,\n xtl,\n ytl,\n xbr,\n ybr,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n\nclass CVATVideoPolygon(CVATVideoAnno, HasCVATPoints):\n \"\"\"A polygon in CVAT video format.\n\n Args:\n frame: the 0-based frame number\n label: the polygon label string\n points: a list of ``(x, y)`` pixel coordinates defining the vertices of\n the polygon\n outside (None): whether the polygon is outside (invisible)\n occluded (None): whether the polygon is occluded\n keyframe (None): whether the frame is a keyframe\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self,\n frame,\n label,\n points,\n outside=None,\n occluded=None,\n keyframe=None,\n attributes=None,\n ):\n self.frame = frame\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATVideoAnno.__init__(\n self,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n def to_polyline(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Polyline` representation of\n the polygon.\n\n Args:\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`fiftyone.core.labels.Polyline`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Polyline(\n label=label,\n points=[points],\n closed=True,\n filled=True,\n **attributes,\n )\n\n @classmethod\n def from_polyline(cls, frame_number, polyline, frame_size):\n \"\"\"Creates a :class:`CVATVideoPolygon` from a\n :class:`fiftyone.core.labels.Polyline`.\n\n Args:\n frame_number: the frame number\n polyline: a :class:`fiftyone.core.labels.Polyline`\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATVideoPolygon`\n \"\"\"\n frame = frame_number - 1\n label = polyline.label\n\n points = _get_single_polyline_points(polyline)\n points = cls._to_abs_points(points, frame_size)\n\n outside, occluded, keyframe, attributes = cls._parse_attributes(\n polyline\n )\n\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n @classmethod\n def from_polygon_dict(cls, label, d):\n \"\"\"Creates a :class:`CVATVideoPolygon` from a ``<polygon>`` tag of a\n CVAT video annotation XML file.\n\n Args:\n label: the object label\n d: a dict representation of a ``<polygon>`` tag\n\n Returns:\n a :class:`CVATVideoPolygon`\n \"\"\"\n frame = int(d[\"@frame\"])\n points = cls._parse_cvat_points_str(d[\"@points\"])\n outside, occluded, keyframe, attributes = cls._parse_anno_dict(d)\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n\nclass CVATVideoPolyline(CVATVideoAnno, HasCVATPoints):\n \"\"\"A polyline in CVAT video format.\n\n Args:\n frame: the 0-based frame number\n label: the polyline label string\n points: a list of ``(x, y)`` pixel coordinates defining the vertices of\n the polyline\n outside (None): whether the polyline is outside (invisible)\n occluded (None): whether the polyline is occluded\n keyframe (None): whether the frame is a keyframe\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self,\n frame,\n label,\n points,\n outside=None,\n occluded=None,\n keyframe=None,\n attributes=None,\n ):\n self.frame = frame\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATVideoAnno.__init__(\n self,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n def to_polyline(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Polyline` representation of\n the polyline.\n\n Args:\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`fiftyone.core.labels.Polyline`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Polyline(\n label=label,\n points=[points],\n closed=False,\n filled=False,\n **attributes,\n )\n\n @classmethod\n def from_polyline(cls, frame_number, polyline, frame_size):\n \"\"\"Creates a :class:`CVATVideoPolyline` from a\n :class:`fiftyone.core.labels.Polyline`.\n\n Args:\n frame_number: the frame number\n polyline: a :class:`fiftyone.core.labels.Polyline`\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATVideoPolyline`\n \"\"\"\n frame = frame_number - 1\n label = polyline.label\n\n points = _get_single_polyline_points(polyline)\n points = cls._to_abs_points(points, frame_size)\n if points and polyline.closed:\n points.append(copy(points[0]))\n\n outside, occluded, keyframe, attributes = cls._parse_attributes(\n polyline\n )\n\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n @classmethod\n def from_polyline_dict(cls, label, d):\n \"\"\"Creates a :class:`CVATVideoPolyline` from a ``<polyline>`` tag of a\n CVAT video annotation XML file.\n\n Args:\n label: the object label\n d: a dict representation of a ``<polyline>`` tag\n\n Returns:\n a :class:`CVATVideoPolyline`\n \"\"\"\n frame = int(d[\"@frame\"])\n points = cls._parse_cvat_points_str(d[\"@points\"])\n outside, occluded, keyframe, attributes = cls._parse_anno_dict(d)\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n\nclass CVATVideoPoints(CVATVideoAnno, HasCVATPoints):\n \"\"\"A set of keypoints in CVAT video format.\n\n Args:\n frame: the 0-based frame number\n label: the keypoints label string\n points: a list of ``(x, y)`` pixel coordinates defining the keypoints\n outside (None): whether the keypoints is outside (invisible)\n occluded (None): whether the keypoints are occluded\n keyframe (None): whether the frame is a keyframe\n attributes (None): a list of :class:`CVATAttribute` instances\n \"\"\"\n\n def __init__(\n self,\n frame,\n label,\n points,\n outside=None,\n occluded=None,\n keyframe=None,\n attributes=None,\n ):\n self.frame = frame\n self.label = label\n HasCVATPoints.__init__(self, points)\n CVATVideoAnno.__init__(\n self,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n def to_keypoint(self, frame_size):\n \"\"\"Returns a :class:`fiftyone.core.labels.Keypoint` representation of\n the points.\n\n Args:\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`fiftyone.core.labels.Keypoint`\n \"\"\"\n label = self.label\n points = self._to_rel_points(self.points, frame_size)\n attributes = self._to_attributes()\n return fol.Keypoint(label=label, points=points, **attributes)\n\n @classmethod\n def from_keypoint(cls, frame_number, keypoint, frame_size):\n \"\"\"Creates a :class:`CVATVideoPoints` from a\n :class:`fiftyone.core.labels.Keypoint`.\n\n Args:\n frame_number: the frame number\n keypoint: a :class:`fiftyone.core.labels.Keypoint`\n frame_size: the ``(width, height)`` of the video frames\n\n Returns:\n a :class:`CVATVideoPoints`\n \"\"\"\n frame = frame_number - 1\n label = keypoint.label\n points = cls._to_abs_points(keypoint.points, frame_size)\n outside, occluded, keyframe, attributes = cls._parse_attributes(\n keypoint\n )\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n @classmethod\n def from_points_dict(cls, label, d):\n \"\"\"Creates a :class:`CVATVideoPoints` from a ``<points>`` tag of a\n CVAT video annotation XML file.\n\n Args:\n label: the object label\n d: a dict representation of a ``<points>`` tag\n\n Returns:\n a :class:`CVATVideoPoints`\n \"\"\"\n frame = int(d[\"@frame\"])\n points = cls._parse_cvat_points_str(d[\"@points\"])\n outside, occluded, keyframe, attributes = cls._parse_anno_dict(d)\n return cls(\n frame,\n label,\n points,\n outside=outside,\n occluded=occluded,\n keyframe=keyframe,\n attributes=attributes,\n )\n\n\nclass CVATAttribute(object):\n \"\"\"An attribute in CVAT image format.\n\n Args:\n name: the attribute name\n value: the attribute value\n \"\"\"\n\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def to_eta_attribute(self):\n \"\"\"Returns an ``eta.core.data.Attribute`` representation of the\n attribute.\n\n Returns:\n an ``eta.core.data.Attribute``\n \"\"\"\n if isinstance(self.value, bool):\n return etad.BooleanAttribute(self.name, self.value)\n\n if etau.is_numeric(self.value):\n return etad.NumericAttribute(self.name, self.value)\n\n return etad.CategoricalAttribute(self.name, self.value)\n\n def to_attribute(self):\n \"\"\"Returns a :class:`fiftyone.core.labels.Attribute` representation of\n the attribute.\n Returns:\n a :class:`fiftyone.core.labels.Attribute`\n \"\"\"\n if isinstance(self.value, bool):\n return fol.BooleanAttribute(value=self.value)\n\n if etau.is_numeric(self.value):\n return fol.NumericAttribute(value=self.value)\n\n return fol.CategoricalAttribute(value=self.value)\n\n\nclass CVATImageAnnotationWriter(object):\n \"\"\"Class for writing annotations in CVAT image format.\n\n See :ref:`this page <CVATImageDataset-export>` for format details.\n \"\"\"\n\n def __init__(self):\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(foc.RESOURCES_DIR),\n trim_blocks=True,\n lstrip_blocks=True,\n )\n self.template = environment.get_template(\n \"cvat_image_annotation_template.xml\"\n )\n\n def write(\n self, cvat_task_labels, cvat_images, xml_path, id=None, name=None\n ):\n \"\"\"Writes the annotations to disk.\n\n Args:\n cvat_task_labels: a :class:`CVATTaskLabels` instance\n cvat_images: a list of :class:`CVATImage` instances\n xml_path: the path to write the annotations XML file\n id (None): an ID for the task\n name (None): a name for the task\n \"\"\"\n now = datetime.now().isoformat()\n xml_str = self.template.render(\n {\n \"id\": id,\n \"name\": name,\n \"size\": len(cvat_images),\n \"created\": now,\n \"updated\": now,\n \"labels\": cvat_task_labels.labels,\n \"dumped\": now,\n \"images\": cvat_images,\n }\n )\n etau.write_file(xml_str, xml_path)\n\n\nclass CVATVideoAnnotationWriter(object):\n \"\"\"Class for writing annotations in CVAT video format.\n\n See :ref:`this page <CVATVideoDataset-export>` for format details.\n \"\"\"\n\n def __init__(self):\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(foc.RESOURCES_DIR),\n trim_blocks=True,\n lstrip_blocks=True,\n )\n self.template = environment.get_template(\n \"cvat_video_interpolation_template.xml\"\n )\n\n def write(\n self,\n cvat_task_labels,\n cvat_tracks,\n metadata,\n xml_path,\n id=None,\n name=None,\n ):\n \"\"\"Writes the annotations to disk.\n\n Args:\n cvat_task_labels: a :class:`CVATTaskLabels` instance\n cvat_tracks: a list of :class:`CVATTrack` instances\n metadata: the :class:`fiftyone.core.metadata.VideoMetadata`\n instance for the video\n xml_path: the path to write the annotations XML file\n id (None): an ID for the task\n name (None): a name for the task\n \"\"\"\n now = datetime.now().isoformat()\n xml_str = self.template.render(\n {\n \"id\": id,\n \"name\": name,\n \"size\": metadata.total_frame_count,\n \"created\": now,\n \"updated\": now,\n \"width\": metadata.frame_width,\n \"height\": metadata.frame_height,\n \"labels\": cvat_task_labels.labels,\n \"dumped\": now,\n \"tracks\": cvat_tracks,\n }\n )\n etau.write_file(xml_str, xml_path)\n\n\nclass CVATBackendConfig(foua.AnnotationBackendConfig):\n \"\"\"Base class for configuring :class:`CVATBackend` instances.\n\n Args:\n name: the name of the backend\n label_schema: a dictionary containing the description of label fields,\n classes and attribute to annotate\n media_field (\"filepath\"): string field name containing the paths to\n media files on disk to upload\n url (None): the url of the CVAT server\n username (None): the CVAT username\n password (None): the CVAT password\n segment_size (None): maximum number of images per job. Not applicable\n to videos\n image_quality (75): an int in `[0, 100]` determining the image quality\n to upload to CVAT\n use_cache (True): whether to use a cache when uploading data. Using a\n cache reduces task creation time as data will be processed\n on-the-fly and stored in the cache when requested\n use_zip_chunks (True): when annotating videos, whether to upload video\n frames in smaller chunks. Setting this option to ``False`` may\n result in reduced video quality in CVAT due to size limitations on\n ZIP files that can be uploaded to CVAT\n chunk_size (None): the number of frames to upload per ZIP chunk\n task_assignee (None): the username(s) to which the task(s) were\n assigned. This argument can be a list of usernames when annotating\n videos as each video is uploaded to a separate task\n job_assignees (None): a list of usernames to which jobs were assigned\n job_reviewers (None): a list of usernames to which job reviews were\n assigned\n project_name (None): an optional project name to which to upload the\n created CVAT task. If a project with this name is found, it will be\n used, otherwise a new project with this name is created. By\n default, no project is used\n project_id (None): an optional ID of an existing CVAT project to which\n to upload the annotation tasks. By default, no project is used\n occluded_attr (None): an optional attribute name containing existing\n occluded values and/or in which to store downloaded occluded values\n for all objects in the annotation run\n \"\"\"\n\n def __init__(\n self,\n name,\n label_schema,\n media_field=\"filepath\",\n url=None,\n username=None,\n password=None,\n segment_size=None,\n image_quality=75,\n use_cache=True,\n use_zip_chunks=True,\n chunk_size=None,\n task_assignee=None,\n job_assignees=None,\n job_reviewers=None,\n project_name=None,\n project_id=None,\n occluded_attr=None,\n **kwargs,\n ):\n super().__init__(name, label_schema, media_field=media_field, **kwargs)\n self.url = url\n self.segment_size = segment_size\n self.image_quality = image_quality\n self.use_cache = use_cache\n self.use_zip_chunks = use_zip_chunks\n self.chunk_size = chunk_size\n self.task_assignee = task_assignee\n self.job_assignees = job_assignees\n self.job_reviewers = job_reviewers\n self.project_name = project_name\n self.project_id = project_id\n self.occluded_attr = occluded_attr\n\n # store privately so these aren't serialized\n self._username = username\n self._password = password\n\n @property\n def username(self):\n return self._username\n\n @username.setter\n def username(self, value):\n self._username = value\n\n @property\n def password(self):\n return self._password\n\n @password.setter\n def password(self, value):\n self._password = value\n\n\nclass CVATBackend(foua.AnnotationBackend):\n \"\"\"Class for interacting with the CVAT annotation backend.\"\"\"\n\n @property\n def supported_label_types(self):\n return [\n \"classification\",\n \"classifications\",\n \"detection\",\n \"detections\",\n \"instance\",\n \"instances\",\n \"polyline\",\n \"polylines\",\n \"polygon\",\n \"polygons\",\n \"keypoint\",\n \"keypoints\",\n \"segmentation\",\n \"scalar\",\n ]\n\n @property\n def supported_scalar_types(self):\n return [\n fof.IntField,\n fof.FloatField,\n fof.StringField,\n fof.BooleanField,\n ]\n\n @property\n def supported_attr_types(self):\n return [\n \"text\",\n \"select\",\n \"radio\",\n \"checkbox\",\n \"occluded\",\n ]\n\n @property\n def supports_keyframes(self):\n return True\n\n @property\n def requires_label_schema(self):\n return False # schemas can be inferred from existing CVAT projects\n\n def recommend_attr_tool(self, name, value):\n if isinstance(value, bool):\n if name == \"occluded\":\n return {\"type\": \"occluded\"}\n\n return {\"type\": \"checkbox\"}\n\n return {\"type\": \"text\"}\n\n def requires_attr_values(self, attr_type):\n return attr_type in (\"select\", \"radio\")\n\n def connect_to_api(self):\n return CVATAnnotationAPI(\n self.config.name,\n self.config.url,\n username=self.config.username,\n password=self.config.password,\n )\n\n def upload_annotations(self, samples, launch_editor=False):\n api = self.connect_to_api()\n\n logger.info(\"Uploading samples to CVAT...\")\n results = api.upload_samples(samples, self)\n logger.info(\"Upload complete\")\n\n if launch_editor:\n results.launch_editor()\n\n return results\n\n def download_annotations(self, results):\n api = self.connect_to_api()\n\n logger.info(\"Downloading labels from CVAT...\")\n annotations = api.download_annotations(results)\n logger.info(\"Download complete\")\n\n return annotations\n\n\nclass CVATAnnotationResults(foua.AnnotationResults):\n \"\"\"Class that stores all relevant information needed to monitor the\n progress of an annotation run sent to CVAT and download the results.\n \"\"\"\n\n def __init__(\n self,\n samples,\n config,\n id_map,\n server_id_map,\n project_ids,\n task_ids,\n job_ids,\n frame_id_map,\n labels_task_map,\n backend=None,\n ):\n super().__init__(samples, config, id_map, backend=backend)\n\n self.server_id_map = server_id_map\n self.project_ids = project_ids\n self.task_ids = task_ids\n self.job_ids = job_ids\n self.frame_id_map = frame_id_map\n self.labels_task_map = labels_task_map\n\n def load_credentials(self, url=None, username=None, password=None):\n \"\"\"Load the CVAT credentials from the given keyword arguments or the\n FiftyOne annotation config.\n\n Args:\n url (None): the url of the CVAT server\n username (None): the CVAT username\n password (None): the CVAT password\n \"\"\"\n self._load_config_parameters(\n url=url, username=username, password=password\n )\n\n def connect_to_api(self):\n \"\"\"Returns an API instance connected to the CVAT server.\n\n Returns:\n a :class:`CVATAnnotationAPI`\n \"\"\"\n return self._backend.connect_to_api()\n\n def launch_editor(self):\n \"\"\"Launches the CVAT editor and loads the first task for this\n annotation run.\n \"\"\"\n api = self.connect_to_api()\n task_id = self.task_ids[0]\n job_ids = self.job_ids\n\n if job_ids and job_ids[task_id]:\n editor_url = api.base_job_url(task_id, job_ids[task_id][0])\n else:\n editor_url = api.base_task_url(task_id)\n\n logger.info(\"Launching editor at '%s'...\", editor_url)\n api.launch_editor(url=editor_url)\n\n def get_status(self):\n \"\"\"Gets the status of the assigned tasks and jobs.\n\n Returns:\n a dict of status information\n \"\"\"\n return self._get_status()\n\n def print_status(self):\n \"\"\"Print the status of the assigned tasks and jobs.\"\"\"\n self._get_status(log=True)\n\n def cleanup(self):\n \"\"\"Deletes all tasks associated with this annotation run and any created\n projects from the CVAT server.\n \"\"\"\n api = self.connect_to_api()\n\n if self.task_ids:\n logger.info(\"Deleting tasks...\")\n api.delete_tasks(self.task_ids)\n\n if self.project_ids:\n projects_to_delete = api.get_empty_projects(self.project_ids)\n if projects_to_delete:\n logger.info(\"Deleting projects...\")\n api.delete_projects(self.project_ids)\n\n # @todo save updated results to DB?\n self.project_ids = []\n self.task_ids = []\n self.job_ids = {}\n\n def _get_status(self, log=False):\n api = self.connect_to_api()\n\n status = {}\n for label_field, task_ids in self.labels_task_map.items():\n if log:\n logger.info(\"\\nStatus for label field '%s':\\n\", label_field)\n\n status[label_field] = {}\n\n for task_id in task_ids:\n task_url = api.task_url(task_id)\n\n try:\n task_json = api.get(task_url).json()\n except:\n logger.warning(\n \"\\tFailed to get info for task '%d' at %s\",\n task_id,\n task_url,\n )\n continue\n\n task_name = task_json[\"name\"]\n task_status = task_json[\"status\"]\n task_assignee = task_json[\"assignee\"]\n task_updated = task_json[\"updated_date\"]\n\n if log:\n logger.info(\n \"\\tTask %d (%s):\\n\"\n \"\\t\\tStatus: %s\\n\"\n \"\\t\\tAssignee: %s\\n\"\n \"\\t\\tLast updated: %s\\n\"\n \"\\t\\tURL: %s\\n\",\n task_id,\n task_name,\n task_status,\n task_assignee,\n task_updated,\n api.base_task_url(task_id),\n )\n\n jobs_info = {}\n for job_id in self.job_ids[task_id]:\n job_url = api.taskless_job_url(job_id)\n\n try:\n job_json = api.get(job_url).json()\n except:\n logger.warning(\n \"\\t\\tFailed to get info for job '%d' at %s\",\n job_id,\n job_url,\n )\n continue\n\n jobs_info[job_id] = job_json\n\n if log:\n logger.info(\n \"\\t\\tJob %d:\\n\"\n \"\\t\\t\\tStatus: %s\\n\"\n \"\\t\\t\\tAssignee: %s\\n\"\n \"\\t\\t\\tReviewer: %s\\n\",\n job_id,\n job_json[\"status\"],\n job_json[\"assignee\"],\n job_json[\"reviewer\"],\n )\n\n status[label_field][task_id] = {\n \"name\": task_name,\n \"status\": task_status,\n \"assignee\": task_assignee,\n \"last_updated\": task_updated,\n \"jobs\": jobs_info,\n }\n\n return status\n\n @classmethod\n def _from_dict(cls, d, samples, config):\n # int keys were serialized as strings...\n job_ids = {int(task_id): ids for task_id, ids in d[\"job_ids\"].items()}\n frame_id_map = {\n int(task_id): {\n int(frame_id): frame_data\n for frame_id, frame_data in frame_map.items()\n }\n for task_id, frame_map in d[\"frame_id_map\"].items()\n }\n\n return cls(\n samples,\n config,\n d[\"id_map\"],\n d.get(\"server_id_map\", {}),\n d.get(\"project_ids\", []),\n d[\"task_ids\"],\n job_ids,\n frame_id_map,\n d[\"labels_task_map\"],\n )\n\n\nclass CVATAnnotationAPI(foua.AnnotationAPI):\n \"\"\"A class to facilitate connection to and management of tasks in CVAT.\n\n On initializiation, this class constructs a session based on the provided\n server url and credentials.\n\n This API provides methods to easily get, put, post, patch, and delete tasks\n and jobs through the formatted urls specified by the CVAT REST API.\n\n Additionally, samples and label schemas can be uploaded and annotations\n downloaded through this class.\n\n Args:\n name: the name of the backend\n url: url of the CVAT server\n username (None): the CVAT username\n password (None): the CVAT password\n \"\"\"\n\n def __init__(self, name, url, username=None, password=None):\n self._name = name\n self._url = url\n self._username = username\n self._password = password\n\n self._session = None\n self._user_id_map = {}\n self._project_id_map = {}\n\n self._setup()\n\n @property\n def base_url(self):\n return self._url\n\n @property\n def base_api_url(self):\n return \"%s/api/v1\" % self.base_url\n\n @property\n def login_url(self):\n return \"%s/auth/login\" % self.base_api_url\n\n @property\n def users_url(self):\n return \"%s/users\" % self.base_api_url\n\n @property\n def projects_url(self):\n return \"%s/projects\" % self.base_api_url\n\n def project_url(self, project_id):\n return \"%s/%d\" % (self.projects_url, project_id)\n\n @property\n def tasks_url(self):\n return \"%s/tasks\" % self.base_api_url\n\n def task_url(self, task_id):\n return \"%s/%d\" % (self.tasks_url, task_id)\n\n def task_data_url(self, task_id):\n return \"%s/data\" % self.task_url(task_id)\n\n def task_data_meta_url(self, task_id):\n return \"%s/data/meta\" % self.task_url(task_id)\n\n def task_annotation_url(self, task_id):\n return \"%s/annotations\" % self.task_url(task_id)\n\n def task_annotation_formatted_url(\n self, task_id, anno_filepath, anno_format=\"CVAT 1.1\",\n ):\n return \"%s/annotations?format=%s&filename=%s\" % (\n self.task_url(task_id),\n anno_format,\n anno_filepath,\n )\n\n def jobs_url(self, task_id):\n return \"%s/jobs\" % self.task_url(task_id)\n\n def job_url(self, task_id, job_id):\n return \"%s/%d\" % (self.jobs_url(task_id), job_id)\n\n def taskless_job_url(self, job_id):\n return \"%s/jobs/%d\" % (self.base_api_url, job_id)\n\n def base_task_url(self, task_id):\n return \"%s/tasks/%d\" % (self.base_url, task_id)\n\n def base_job_url(self, task_id, job_id):\n return \"%s/tasks/%d/jobs/%d\" % (self.base_url, task_id, job_id)\n\n def user_search_url(self, username):\n return \"%s/users?search=%s\" % (self.base_api_url, username)\n\n def project_search_url(self, project_name):\n return \"%s/projects?search=%s\" % (self.base_api_url, project_name)\n\n def project_id_search_url(self, project_id):\n return \"%s/projects?id=%d\" % (self.base_api_url, project_id)\n\n def _setup(self):\n if not self._url:\n raise ValueError(\n \"You must provide/configure the `url` of the CVAT server\"\n )\n\n username = self._username\n password = self._password\n\n if username is None or password is None:\n username, password = self._prompt_username_password(\n self._name, username=username, password=password\n )\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n self._session = requests.Session()\n response = self.post(\n self.login_url, data={\"username\": username, \"password\": password}\n )\n\n if \"csrftoken\" in response.cookies:\n self._session.headers[\"X-CSRFToken\"] = response.cookies[\n \"csrftoken\"\n ]\n\n def get(self, url, **kwargs):\n \"\"\"Sends a GET request to the given CVAT API URL.\n\n Args:\n url: the url\n **kwargs: additional request parameters\n\n Returns:\n the request response\n \"\"\"\n response = self._session.get(url, verify=False, **kwargs)\n self._validate(response, kwargs)\n return response\n\n def patch(self, url, **kwargs):\n \"\"\"Sends a PATCH request to the given CVAT API URL.\n\n Args:\n url: the url\n **kwargs: additional request parameters\n\n Returns:\n the request response\n \"\"\"\n response = self._session.patch(url, verify=False, **kwargs)\n self._validate(response, kwargs)\n return response\n\n def post(self, url, **kwargs):\n \"\"\"Sends a POST request to the given CVAT API URL.\n\n Args:\n url: the url\n **kwargs: additional request parameters\n\n Returns:\n the request response\n \"\"\"\n response = self._session.post(url, verify=False, **kwargs)\n self._validate(response, kwargs)\n return response\n\n def put(self, url, **kwargs):\n \"\"\"Sends a PUT request to the given CVAT API URL.\n\n Args:\n url: the url\n **kwargs: additional request parameters\n\n Returns:\n the request response\n \"\"\"\n response = self._session.put(url, verify=False, **kwargs)\n self._validate(response, kwargs)\n return response\n\n def delete(self, url, **kwargs):\n \"\"\"Sends a DELETE request to the given CVAT API URL.\n\n Args:\n url: the url to send the request to\n **kwargs: additional request parameters\n\n Returns:\n the request response\n \"\"\"\n response = self._session.delete(url, verify=False, **kwargs)\n self._validate(response, kwargs)\n return response\n\n def _get_value_from_search(\n self, search_url_fcn, target, target_key, value_key\n ):\n search_url = search_url_fcn(target)\n resp = self.get(search_url).json()\n for info in resp[\"results\"]:\n if info[target_key] == target:\n return info[value_key]\n\n return None\n\n def _get_value_update_map(\n self, name, id_map, result_name, search_url_fcn, name_type\n ):\n if name is None:\n return None\n\n if name in id_map:\n return id_map[name]\n\n _id = self._get_value_from_search(\n search_url_fcn, name, result_name, \"id\"\n )\n\n if _id is not None:\n id_map[name] = _id\n\n return _id\n\n def get_user_id(self, username):\n \"\"\"Retrieves the CVAT user ID for the given username.\n\n Args:\n username: the username\n\n Returns:\n the user ID, or None if the user was not found\n \"\"\"\n user_id = self._get_value_update_map(\n username,\n self._user_id_map,\n \"username\",\n self.user_search_url,\n \"User\",\n )\n\n if username is not None and user_id is None:\n logger.warning(\"User '%s' not found\", username)\n\n return user_id\n\n def get_project_id(self, project_name):\n \"\"\"Retrieves the CVAT project ID for the first instance of the given\n project name.\n\n Args:\n project_name: the name of the project\n\n Returns:\n the project ID, or None if no project with the given name was found\n \"\"\"\n return self._get_value_update_map(\n project_name,\n self._project_id_map,\n \"name\",\n self.project_search_url,\n \"Project\",\n )\n\n def get_project_name(self, project_id):\n \"\"\"Retrieves the CVAT project name for the given project ID.\n\n Args:\n project_id: the ID of the project\n\n Returns:\n the project name, or None if no project with the given ID was found\n \"\"\"\n id_map = {i: n for n, i in self._project_id_map.items()}\n project_name = id_map.get(project_id)\n if project_name:\n return project_name\n\n return self._get_value_from_search(\n self.project_id_search_url, project_id, \"id\", \"name\",\n )\n\n def get_empty_projects(self, project_ids):\n \"\"\"Check all given project ids to determine if they are empty or if\n they contain at least one task.\n\n Args:\n project_ids: a list of project ids to check\n\n Returns:\n a list of empty project ids\n \"\"\"\n return [pid for pid in project_ids if self._is_empty_project(pid)]\n\n def _is_empty_project(self, project_id):\n resp = self.get(self.project_url(project_id)).json()\n return not resp[\"tasks\"]\n\n def create_project(self, name, schema=None):\n \"\"\"Creates a project on the CVAT server using the given label schema.\n\n Args:\n name: a name for the project\n schema (None): the label schema to use for the created project\n\n Returns:\n the ID of the created project in CVAT\n \"\"\"\n if schema is None:\n schema = {}\n\n labels = [\n {\"name\": name, \"attributes\": list(attributes.values())}\n for name, attributes in schema.items()\n ]\n\n project_json = {\n \"name\": name,\n \"labels\": labels,\n }\n\n project_resp = self.post(self.projects_url, json=project_json).json()\n return project_resp[\"id\"]\n\n def create_task(\n self,\n name,\n schema=None,\n segment_size=None,\n image_quality=75,\n task_assignee=None,\n project_id=None,\n ):\n \"\"\"Creates a task on the CVAT server using the given label schema.\n\n Args:\n name: a name for the task\n schema (None): the label schema to use for the created task\n segment_size (None): maximum number of images to load into a job.\n Not applicable to videos\n image_quality (75): an int in `[0, 100]` determining the image\n quality to upload to CVAT\n task_assignee (None): the username to assign the created task(s)\n project_id (None): the ID of a project to which upload the task\n\n Returns:\n a tuple of\n\n - **task_id**: the ID of the created task in CVAT\n - **class_id_map**: a dictionary mapping the IDs assigned to\n classes by CVAT\n - **attr_id_map**: a dictionary mapping the IDs assigned to\n attributes by CVAT for every class\n \"\"\"\n task_json = {\n \"name\": name,\n \"image_quality\": image_quality,\n }\n\n if project_id is not None:\n task_json.update({\"labels\": [], \"project_id\": project_id})\n else:\n if schema is None:\n schema = {}\n\n labels = [\n {\"name\": name, \"attributes\": list(attributes.values())}\n for name, attributes in schema.items()\n ]\n\n task_json.update({\"labels\": labels})\n\n if segment_size is not None:\n task_json[\"segment_size\"] = segment_size\n\n task_resp = self.post(self.tasks_url, json=task_json).json()\n task_id = task_resp[\"id\"]\n\n class_id_map = {}\n attr_id_map = {}\n for label in task_resp[\"labels\"]:\n class_id = label[\"id\"]\n class_id_map[label[\"name\"]] = class_id\n attr_id_map[class_id] = {}\n for attr in label[\"attributes\"]:\n attr_name = attr[\"name\"]\n attr_id = attr[\"id\"]\n attr_id_map[class_id][attr_name] = attr_id\n\n if task_assignee is not None:\n user_id = self.get_user_id(task_assignee)\n if user_id is not None:\n task_patch = {\"assignee_id\": self.get_user_id(task_assignee)}\n self.patch(self.task_url(task_id), json=task_patch)\n\n return task_id, class_id_map, attr_id_map\n\n def delete_project(self, project_id):\n \"\"\"Deletes the given project from the CVAT server.\n\n Args:\n project_id: the project ID\n \"\"\"\n self.delete(self.project_url(project_id))\n\n def delete_projects(self, project_ids):\n \"\"\"Deletes the given projects from the CVAT server.\n\n Args:\n project_ids: an iterable of project IDs\n \"\"\"\n with fou.ProgressBar() as pb:\n for project_id in pb(list(project_ids)):\n self.delete_project(project_id)\n\n def delete_task(self, task_id):\n \"\"\"Deletes the given task from the CVAT server.\n\n Args:\n task_id: the task ID\n \"\"\"\n self.delete(self.task_url(task_id))\n\n def delete_tasks(self, task_ids):\n \"\"\"Deletes the given tasks from the CVAT server.\n\n Args:\n task_ids: an iterable of task IDs\n \"\"\"\n with fou.ProgressBar() as pb:\n for task_id in pb(list(task_ids)):\n self.delete_task(task_id)\n\n def launch_editor(self, url=None):\n \"\"\"Launches the CVAT editor in your default web browser.\n\n Args:\n url (None): an optional URL to open. By default, the base URL of\n the server is opened\n \"\"\"\n if url is None:\n url = self.base_url\n\n webbrowser.open(url, new=2)\n\n def upload_data(\n self,\n task_id,\n paths,\n image_quality=75,\n use_cache=True,\n use_zip_chunks=True,\n chunk_size=None,\n job_assignees=None,\n job_reviewers=None,\n ):\n \"\"\"Uploads a list of media to the task with the given ID.\n\n Args:\n task_id: the task ID\n paths: a list of media paths to upload\n image_quality (75): an int in `[0, 100]` determining the image\n quality to upload to CVAT\n use_cache (True): whether to use a cache when uploading data. Using\n a cache reduces task creation time as data will be processed\n on-the-fly and stored in the cache when requested\n use_zip_chunks (True): when annotating videos, whether to upload\n video frames in smaller chunks. Setting this option to\n ``False`` may result in reduced video quality in CVAT due to\n size limitations on ZIP files that can be uploaded to CVAT\n chunk_size (None): the number of frames to upload per ZIP chunk\n job_assignees (None): a list of usernames to assign jobs\n job_reviewers (None): a list of usernames to assign job reviews\n\n Returns:\n a list of the job IDs created for the task\n \"\"\"\n data = {\n \"image_quality\": image_quality,\n \"use_cache\": use_cache,\n \"use_zip_chunks\": use_zip_chunks,\n }\n\n if chunk_size:\n data[\"chunk_size\"] = chunk_size\n\n files = {}\n for idx, path in enumerate(paths):\n # IMPORTANT: CVAT organizes media within a task alphabetically by\n # filename, so we must give CVAT filenames whose alphabetical order\n # matches the order of `paths`\n filename = \"%06d_%s\" % (idx, os.path.basename(path))\n files[\"client_files[%d]\" % idx] = (filename, open(path, \"rb\"))\n\n self.post(self.task_data_url(task_id), data=data, files=files)\n\n # @todo is this loop really needed?\n job_ids = []\n while not job_ids:\n job_resp = self.get(self.jobs_url(task_id))\n job_ids = [j[\"id\"] for j in job_resp.json()]\n\n if job_assignees is not None:\n num_assignees = len(job_assignees)\n for idx, job_id in enumerate(job_ids):\n # Round robin strategy\n assignee = job_assignees[idx % num_assignees]\n\n user_id = self.get_user_id(assignee)\n if assignee is not None and user_id is not None:\n job_patch = {\"assignee_id\": user_id}\n self.patch(self.taskless_job_url(job_id), json=job_patch)\n\n if job_reviewers is not None:\n num_reviewers = len(job_reviewers)\n for idx, job_id in enumerate(job_ids):\n # Round robin strategy\n reviewer = job_reviewers[idx % num_reviewers]\n\n user_id = self.get_user_id(reviewer)\n if reviewer is not None and user_id is not None:\n job_patch = {\"reviewer_id\": user_id}\n self.patch(self.taskless_job_url(job_id), json=job_patch)\n\n return job_ids\n\n def upload_samples(self, samples, backend):\n \"\"\"Uploads the given samples to CVAT according to the given backend's\n annotation and server configuration.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection` to\n upload to CVAT\n backend: a :class:`CVATBackend` to use to perform the upload\n\n Returns:\n a :class:`CVATAnnotationResults`\n \"\"\"\n config = backend.config\n label_schema = config.label_schema\n occluded_attr = config.occluded_attr\n project_name, project_id = self._parse_project_details(\n config.project_name, config.project_id\n )\n\n # When using an existing project, we cannot support multiple label\n # fields of the same type, since it would not be clear which field\n # labels should be downloaded into\n if project_id is not None:\n self._ensure_one_field_per_type(label_schema)\n\n id_map = {}\n project_ids = []\n task_ids = []\n job_ids = {}\n frame_id_map = {}\n labels_task_map = {}\n\n num_samples = len(samples)\n batch_size = self._get_batch_size(samples)\n\n (\n cvat_schema,\n assign_scalar_attrs,\n occluded_attrs,\n _,\n ) = self._get_cvat_schema(\n label_schema, project_id=project_id, occluded_attr=occluded_attr\n )\n\n # When adding to an existing project, its label schema is inherited, so\n # we need to store the updated one\n if project_id is not None or occluded_attr is not None:\n config.label_schema = label_schema\n\n for idx, offset in enumerate(range(0, num_samples, batch_size)):\n samples_batch = samples[offset : (offset + batch_size)]\n anno_tags = []\n anno_shapes = []\n anno_tracks = []\n\n for label_field, label_info in label_schema.items():\n _tags = []\n _shapes = []\n _tracks = []\n\n if label_field not in id_map:\n id_map[label_field] = {}\n\n if label_field not in labels_task_map:\n labels_task_map[label_field] = []\n\n if label_info.get(\"existing_field\", False):\n label_type = label_info[\"type\"]\n only_keyframes = label_info.get(\"only_keyframes\", False)\n\n self._update_shapes_tags_tracks(\n _tags,\n _shapes,\n _tracks,\n id_map,\n label_type,\n samples_batch,\n label_field,\n label_info,\n cvat_schema,\n assign_scalar_attrs,\n only_keyframes,\n occluded_attrs,\n )\n\n anno_tags.extend(_tags)\n anno_shapes.extend(_shapes)\n anno_tracks.extend(_tracks)\n\n # We must do this here because `cvat_schema` may be altered the\n # first time shapes are created\n if project_id is None and project_name is not None:\n project_id = self.create_project(project_name, cvat_schema)\n project_ids.append(project_id)\n\n task_name = (\n \"FiftyOne_%s\"\n % samples_batch._root_dataset.name.replace(\" \", \"_\")\n )\n task_id, class_id_map, attr_id_map = self._create_task_upload_data(\n config,\n idx,\n task_name,\n cvat_schema,\n project_id,\n samples_batch,\n task_ids,\n job_ids,\n frame_id_map,\n )\n\n for label_field in label_schema.keys():\n labels_task_map[label_field].append(task_id)\n\n server_id_map = self._upload_annotations(\n anno_shapes,\n anno_tags,\n anno_tracks,\n class_id_map,\n attr_id_map,\n task_id,\n )\n\n return CVATAnnotationResults(\n samples,\n config,\n id_map,\n server_id_map,\n project_ids,\n task_ids,\n job_ids,\n frame_id_map,\n labels_task_map,\n backend=backend,\n )\n\n def download_annotations(self, results):\n \"\"\"Download the annotations from the CVAT server for the given results\n instance and parses them into the appropriate FiftyOne types.\n\n Args:\n results: a :class:`CVATAnnotationResults`\n\n Returns:\n the annotations dict\n \"\"\"\n label_schema = results.config.label_schema\n occluded_attr = results.config.occluded_attr\n id_map = results.id_map\n server_id_map = results.server_id_map\n task_ids = results.task_ids\n frame_id_map = results.frame_id_map\n labels_task_map = results.labels_task_map\n\n _, project_id = self._parse_project_details(\n results.config.project_name, results.config.project_id\n )\n\n if results.project_ids:\n # This task created the project, so we know that `label_schema` is\n # already complete and we don't need `project_id` to help us here\n project_id = None\n\n (\n _,\n assigned_scalar_attrs,\n occluded_attrs,\n label_field_classes,\n ) = self._get_cvat_schema(\n label_schema, project_id=project_id, occluded_attr=occluded_attr\n )\n\n labels_task_map_rev = defaultdict(list)\n for lf, tasks in labels_task_map.items():\n for task in tasks:\n labels_task_map_rev[task].append(lf)\n\n annotations = {}\n\n for task_id in task_ids:\n # Download task data\n task_json = self.get(self.task_url(task_id)).json()\n attr_id_map = {}\n _class_map = {}\n labels = task_json[\"labels\"]\n for label in labels:\n _class_map[label[\"id\"]] = label[\"name\"]\n attr_id_map[label[\"id\"]] = {\n i[\"name\"]: i[\"id\"] for i in label[\"attributes\"]\n }\n\n _class_map_rev = {n: i for i, n in _class_map.items()}\n\n task_resp = self.get(self.task_annotation_url(task_id)).json()\n all_shapes = task_resp[\"shapes\"]\n all_tags = task_resp[\"tags\"]\n all_tracks = task_resp[\"tracks\"]\n\n data_resp = self.get(self.task_data_meta_url(task_id)).json()\n frames = data_resp[\"frames\"]\n\n label_fields = labels_task_map_rev[task_id]\n label_types = self._get_return_label_types(\n label_schema, label_fields\n )\n\n for lf_ind, label_field in enumerate(label_fields):\n label_info = label_schema[label_field]\n label_type = label_info.get(\"type\", None)\n scalar_attrs = assigned_scalar_attrs.get(label_field, False)\n _occluded_attrs = occluded_attrs.get(label_field, {})\n _id_map = id_map.get(label_field, {})\n\n label_field_results = {}\n\n # Dict mapping class labels to the classes used in CVAT.\n # These are equal unless a class appears in multiple fields\n _classes = label_field_classes[label_field]\n\n # Maps CVAT IDs to FiftyOne labels\n class_map = {\n _class_map_rev[name_lf]: name\n for name, name_lf in _classes.items()\n }\n\n _cvat_classes = class_map.keys()\n tags, shapes, tracks = self._filter_field_classes(\n all_tags, all_shapes, all_tracks, _cvat_classes,\n )\n\n is_last_field = lf_ind == len(label_fields) - 1\n ignore_types = self._get_ignored_types(\n project_id, label_types, label_type, is_last_field\n )\n\n tag_results = self._parse_shapes_tags(\n \"tags\",\n tags,\n frame_id_map[task_id],\n label_type,\n _id_map,\n server_id_map.get(\"tags\", {}),\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=scalar_attrs,\n )\n label_field_results = self._merge_results(\n label_field_results, tag_results\n )\n\n shape_results = self._parse_shapes_tags(\n \"shapes\",\n shapes,\n frame_id_map[task_id],\n label_type,\n _id_map,\n server_id_map.get(\"shapes\", {}),\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=scalar_attrs,\n occluded_attrs=_occluded_attrs,\n )\n label_field_results = self._merge_results(\n label_field_results, shape_results\n )\n\n for track_index, track in enumerate(tracks, 1):\n label_id = track[\"label_id\"]\n shapes = track[\"shapes\"]\n for shape in shapes:\n shape[\"label_id\"] = label_id\n\n immutable_attrs = track[\"attributes\"]\n\n track_shape_results = self._parse_shapes_tags(\n \"track\",\n shapes,\n frame_id_map[task_id],\n label_type,\n _id_map,\n server_id_map.get(\"tracks\", {}),\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=scalar_attrs,\n track_index=track_index,\n immutable_attrs=immutable_attrs,\n occluded_attrs=_occluded_attrs,\n )\n label_field_results = self._merge_results(\n label_field_results, track_shape_results\n )\n\n frames_metadata = {}\n for cvat_frame_id, frame_data in frame_id_map[task_id].items():\n sample_id = frame_data[\"sample_id\"]\n if \"frame_id\" in frame_data and len(frames) == 1:\n frames_metadata[sample_id] = frames[0]\n break\n\n frames_metadata[sample_id] = frames[cvat_frame_id]\n\n # Polyline(s) corresponding to instance/semantic masks need to\n # be converted to their final format\n self._convert_polylines_to_masks(\n label_field_results, label_info, frames_metadata\n )\n\n annotations = self._merge_results(\n annotations, {label_field: label_field_results}\n )\n\n return annotations\n\n def _get_project_labels(self, project_id):\n if self.get_project_name(project_id) is None:\n raise ValueError(\"Project '%s' not found\" % project_id)\n\n return self.get(self.project_url(project_id)).json()[\"labels\"]\n\n def _parse_project_details(self, project_name, project_id):\n if project_id is not None:\n project_name = self.get_project_name(project_id)\n if not project_name:\n raise ValueError(\"Project '%d' not found\" % project_id)\n\n elif project_name is not None:\n project_id = self.get_project_id(project_name)\n\n return project_name, project_id\n\n def _get_cvat_schema(\n self, label_schema, project_id=None, occluded_attr=None\n ):\n if project_id is not None:\n return self._convert_cvat_schema(\n label_schema, project_id, occluded_attr=occluded_attr\n )\n\n return self._build_cvat_schema(\n label_schema, occluded_attr=occluded_attr\n )\n\n def _convert_cvat_schema(\n self, label_schema, project_id, occluded_attr=None\n ):\n labels = self._get_project_labels(project_id)\n\n cvat_schema = {}\n labels_to_update = []\n occluded_attrs = {}\n assign_scalar_attrs = {}\n classes_and_attrs = []\n for label in labels:\n name = label[\"name\"]\n attrs = label[\"attributes\"]\n cvat_schema[name] = {a[\"name\"]: a for a in attrs}\n\n if \"label_id\" not in cvat_schema[name]:\n labels_to_update.append(label)\n cvat_schema[name][\"label_id\"] = {\n \"name\": \"label_id\",\n \"input_type\": \"text\",\n \"mutable\": True,\n }\n\n label_attrs = {}\n for attr_name, attr in cvat_schema[name].items():\n if attr_name != \"label_id\":\n input_type = attr[\"input_type\"]\n label_attrs[attr_name] = {\"type\": input_type}\n default_value = attr[\"default_value\"]\n values = attr[\"values\"]\n if default_value:\n label_attrs[attr_name][\"default\"] = default_value\n\n if values and values[0] != \"\":\n label_attrs[attr_name][\"values\"] = values\n\n if occluded_attr is not None:\n label_attrs[occluded_attr] = {}\n\n classes_and_attrs.append(\n {\"classes\": [name], \"attributes\": label_attrs,}\n )\n\n label_field_classes = {}\n class_names = {n: n for n in cvat_schema.keys()}\n for label_field, label_info in label_schema.items():\n label_type = label_info.get(\"type\", None)\n classes = label_info.get(\"classes\", [])\n\n if label_type == \"scalar\":\n # True: scalars are annotated as tag attributes\n # False: scalars are annotated as tag labels\n assign_scalar_attrs[label_field] = not bool(classes)\n else:\n if label_type is not None:\n label_schema[label_field][\"attributes\"] = {}\n label_schema[label_field][\"classes\"] = classes_and_attrs\n\n assign_scalar_attrs[label_field] = None\n\n label_field_classes[label_field] = deepcopy(class_names)\n if occluded_attr is not None:\n occluded_attrs[label_field] = {\n c: occluded_attr for c in class_names.keys()\n }\n\n if labels_to_update:\n self._add_project_label_ids(project_id, list(labels_to_update))\n\n return (\n cvat_schema,\n assign_scalar_attrs,\n occluded_attrs,\n label_field_classes,\n )\n\n def _add_project_label_ids(self, project_id, labels):\n labels_patch = {\"labels\": []}\n for label in labels:\n label[\"attributes\"].append(\n {\"name\": \"label_id\", \"input_type\": \"text\", \"mutable\": True}\n )\n labels_patch[\"labels\"].append(label)\n\n self.patch(self.project_url(project_id), json=labels_patch)\n\n def _ensure_one_field_per_type(self, label_schema, verbose=True):\n _seen_label_types = []\n for label_field in list(label_schema.keys()): # list b/c we may edit\n if label_field is None:\n continue\n\n label_type = label_schema[label_field][\"type\"]\n if label_type == \"scalar\":\n _label_type = \"classifications\"\n else:\n _label_type = foua._RETURN_TYPES_MAP[label_type]\n\n if _label_type not in _seen_label_types:\n _seen_label_types.append(_label_type)\n elif verbose:\n label_schema.pop(label_field)\n logger.warning(\n \"A field with label type '%s' is already being annotated. \"\n \"Ignoring field '%s'...\",\n _label_type,\n label_field,\n )\n\n def _get_batch_size(self, samples):\n if samples.media_type == fom.VIDEO:\n # The current implementation (both upload and download) requires\n # frame IDs for all frames that might get labels\n samples.ensure_frames()\n\n # CVAT only allows for one video per task\n return 1\n\n samples.compute_metadata()\n\n # Put all image samples in one task\n return len(samples)\n\n def _create_task_upload_data(\n self,\n config,\n idx,\n task_name,\n cvat_schema,\n project_id,\n samples_batch,\n task_ids,\n job_ids,\n frame_id_map,\n ):\n media_field = config.media_field\n segment_size = config.segment_size\n image_quality = config.image_quality\n use_cache = config.use_cache\n use_zip_chunks = config.use_zip_chunks\n chunk_size = config.chunk_size\n task_assignee = config.task_assignee\n job_assignees = config.job_assignees\n job_reviewers = config.job_reviewers\n\n is_video = samples_batch.media_type == fom.VIDEO\n\n _task_assignee = task_assignee\n _job_assignees = job_assignees\n _job_reviewers = job_reviewers\n\n if is_video:\n # Videos are uploaded in multiple tasks with 1 job per task\n # Assign the correct users for the current task\n if job_assignees is not None:\n _job_assignees = [job_assignees[idx % len(job_assignees)]]\n\n if job_reviewers is not None:\n _job_reviewers = [job_reviewers[idx % len(job_reviewers)]]\n\n if task_assignee is not None:\n if isinstance(task_assignee, str):\n _task_assignee = task_assignee\n else:\n _task_assignee = task_assignee[idx % len(task_assignee)]\n\n # Create task\n task_id, class_id_map, attr_id_map = self.create_task(\n task_name,\n schema=cvat_schema,\n segment_size=segment_size,\n image_quality=image_quality,\n task_assignee=_task_assignee,\n project_id=project_id,\n )\n task_ids.append(task_id)\n\n # Upload media\n job_ids[task_id] = self.upload_data(\n task_id,\n samples_batch.values(media_field),\n image_quality=image_quality,\n use_cache=use_cache,\n use_zip_chunks=use_zip_chunks,\n chunk_size=chunk_size,\n job_assignees=_job_assignees,\n job_reviewers=_job_reviewers,\n )\n frame_id_map[task_id] = self._build_frame_id_map(samples_batch)\n\n return task_id, class_id_map, attr_id_map\n\n def _upload_annotations(\n self,\n anno_shapes,\n anno_tags,\n anno_tracks,\n class_id_map,\n attr_id_map,\n task_id,\n ):\n # Remap annotations to use the CVAT class/attribute IDs\n anno_shapes = self._remap_ids(anno_shapes, class_id_map, attr_id_map)\n anno_tags = self._remap_ids(anno_tags, class_id_map, attr_id_map)\n anno_tracks = self._remap_track_ids(\n anno_tracks, class_id_map, attr_id_map\n )\n\n anno_json = {\n \"version\": 0,\n \"shapes\": anno_shapes,\n \"tags\": anno_tags,\n \"tracks\": anno_tracks,\n }\n num_shapes = len(anno_shapes)\n num_tags = len(anno_tags)\n num_tracks = len(anno_tracks)\n\n # @todo is this loop really needed?\n num_uploaded_shapes = 0\n num_uploaded_tags = 0\n num_uploaded_tracks = 0\n anno_resp = {}\n while (\n num_uploaded_shapes != num_shapes\n or num_uploaded_tags != num_tags\n or num_uploaded_tracks != num_tracks\n ):\n anno_resp = self.put(\n self.task_annotation_url(task_id), json=anno_json\n ).json()\n num_uploaded_shapes = len(anno_resp[\"shapes\"])\n num_uploaded_tags = len(anno_resp[\"tags\"])\n num_uploaded_tracks = len(anno_resp[\"tracks\"])\n\n return self._create_server_id_map(anno_resp, attr_id_map)\n\n def _create_server_id_map(self, anno_resp, attr_id_map):\n label_id_map = {}\n for class_id, class_attr_map in attr_id_map.items():\n for attr_name, attr_id in class_attr_map.items():\n if attr_name == \"label_id\":\n label_id_map[class_id] = attr_id\n\n server_id_map = {}\n for anno_type, anno_list in anno_resp.items():\n if anno_type not in (\"tags\", \"shapes\", \"tracks\"):\n continue\n\n id_map = {}\n for anno in anno_list:\n server_id = anno[\"id\"]\n label_id = anno[\"label_id\"]\n if label_id in label_id_map:\n label_attr_id = label_id_map[label_id]\n for attr in anno[\"attributes\"]:\n if attr[\"spec_id\"] == label_attr_id:\n id_map[server_id] = attr[\"value\"]\n\n server_id_map[anno_type] = id_map\n\n return server_id_map\n\n def _update_shapes_tags_tracks(\n self,\n tags,\n shapes,\n tracks,\n id_map,\n label_type,\n samples_batch,\n label_field,\n label_info,\n cvat_schema,\n assign_scalar_attrs,\n only_keyframes,\n occluded_attrs,\n ):\n is_video = samples_batch.media_type == fom.VIDEO\n\n anno_tags = []\n anno_shapes = []\n anno_tracks = []\n\n if label_type in (\"classification\", \"classifications\", \"scalar\"):\n # Tag annotations\n _id_map, anno_tags = self._create_shapes_tags_tracks(\n samples_batch,\n label_field,\n label_info,\n cvat_schema,\n assign_scalar_attrs=assign_scalar_attrs,\n )\n elif is_video and label_type != \"segmentation\":\n # Video track annotations\n (\n _id_map,\n anno_shapes,\n anno_tracks,\n ) = self._create_shapes_tags_tracks(\n samples_batch,\n label_field,\n label_info,\n cvat_schema,\n load_tracks=True,\n only_keyframes=only_keyframes,\n occluded_attrs=occluded_attrs,\n )\n else:\n # Shape annotations\n _id_map, anno_shapes = self._create_shapes_tags_tracks(\n samples_batch,\n label_field,\n label_info,\n cvat_schema,\n occluded_attrs=occluded_attrs,\n )\n\n id_map[label_field].update(_id_map)\n tags.extend(anno_tags)\n shapes.extend(anno_shapes)\n tracks.extend(anno_tracks)\n\n def _filter_field_classes(self, tags, shapes, tracks, _cvat_classes):\n _tags = [t for t in tags if t[\"label_id\"] in _cvat_classes]\n _shapes = [s for s in shapes if s[\"label_id\"] in _cvat_classes]\n _tracks = [t for t in tracks if t[\"label_id\"] in _cvat_classes]\n return _tags, _shapes, _tracks\n\n def _get_return_label_types(self, label_schema, label_fields):\n label_types = []\n for label_field in label_fields:\n label_type = label_schema[label_field].get(\"type\", None)\n if label_type:\n label_types.append(foua._RETURN_TYPES_MAP[label_type])\n\n return label_types\n\n def _get_ignored_types(\n self, project_id, label_types, label_type, is_last_field\n ):\n \"\"\"When uploading multiple fields to an existing project, each field\n must have a different type but can have overlapping class names.\n Therefore, when loading annotations, if a field exists for a found\n label type, that label will not be loaded with any other fields.\n \"\"\"\n if not project_id or len(label_types) < 2:\n # Not relevant unless uploading to a project and there are multiple\n # types of labels\n return []\n\n # The last label field being loaded stores all unexpected label types\n # Ignore only the other label types that have been loaded\n label_type = foua._RETURN_TYPES_MAP[label_type]\n if is_last_field:\n ignored_types = set(label_types) - {label_type}\n else:\n # Other fields only load the expected type\n # Ignore all other types\n all_label_types = foua._RETURN_TYPES_MAP.values()\n ignored_types = set(all_label_types) - {label_type}\n\n return ignored_types\n\n def _convert_polylines_to_masks(\n self, results, label_info, frames_metadata\n ):\n for label_type, type_results in results.items():\n if label_type not in (\n \"detection\",\n \"detections\",\n \"instance\",\n \"instances\",\n \"segmentation\",\n ):\n continue\n\n for sample_id, sample_results in type_results.items():\n sample_metadata = frames_metadata[sample_id]\n frame_size = (\n sample_metadata[\"width\"],\n sample_metadata[\"height\"],\n )\n for _id, _content in sample_results.items():\n if isinstance(_content, dict):\n frame_id = _id\n frame_results = _content\n for label_id, label in frame_results.items():\n label = self._convert_polylines(\n label_id, label, label_info, frame_size\n )\n results[label_type][sample_id][frame_id][\n label_id\n ] = label\n else:\n label_id = _id\n label = self._convert_polylines(\n label_id, _content, label_info, frame_size\n )\n results[label_type][sample_id][label_id] = label\n\n def _convert_polylines(self, label_id, label, label_info, frame_size):\n # Convert Polyline to instance segmentation\n if isinstance(label, fol.Polyline):\n detection = CVATShape.polyline_to_detection(label, frame_size)\n detection._id = ObjectId(label_id)\n return detection\n\n # Convert Polylines to semantic segmentation\n if isinstance(label, fol.Polylines):\n mask_targets = label_info.get(\"mask_targets\", None)\n segmentation = CVATShape.polylines_to_segmentation(\n label, frame_size, mask_targets\n )\n segmentation._id = ObjectId(label_id)\n return segmentation\n\n return label\n\n def _merge_results(self, results, new_results):\n if isinstance(new_results, dict):\n for key, val in new_results.items():\n if key not in results:\n results[key] = val\n else:\n results[key] = self._merge_results(results[key], val)\n\n return results\n\n def _parse_shapes_tags(\n self,\n anno_type,\n annos,\n frame_id_map,\n label_type,\n id_map,\n server_id_map,\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=False,\n track_index=None,\n immutable_attrs=None,\n occluded_attrs=None,\n ):\n results = {}\n prev_type = None\n\n # For filling in tracked objects\n prev_frame = None\n prev_outside = True\n\n if anno_type == \"track\":\n annos = _get_interpolated_shapes(annos)\n\n for anno in annos:\n frame = anno[\"frame\"]\n prev_anno = anno\n prev_frame = frame\n prev_outside = anno.get(\"outside\", True)\n\n if anno.get(\"outside\", False):\n # If a tracked object is not in the frame\n continue\n\n prev_type = self._parse_annotation(\n anno,\n results,\n anno_type,\n prev_type,\n frame_id_map,\n label_type,\n id_map,\n server_id_map,\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=assigned_scalar_attrs,\n track_index=track_index,\n immutable_attrs=immutable_attrs,\n occluded_attrs=occluded_attrs,\n )\n\n # For non-outside tracked objects, the last track goes to the end of\n # the video, so fill remaining frames with copies of the last instance\n if prev_frame is not None and not prev_outside:\n for frame in range(prev_frame + 1, len(frame_id_map)):\n anno = deepcopy(prev_anno)\n anno[\"frame\"] = frame\n anno[\"keyframe\"] = False\n\n prev_type = self._parse_annotation(\n anno,\n results,\n anno_type,\n prev_type,\n frame_id_map,\n label_type,\n id_map,\n server_id_map,\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=assigned_scalar_attrs,\n track_index=track_index,\n immutable_attrs=immutable_attrs,\n occluded_attrs=occluded_attrs,\n )\n\n return results\n\n def _parse_annotation(\n self,\n anno,\n results,\n anno_type,\n prev_type,\n frame_id_map,\n expected_label_type,\n id_map,\n server_id_map,\n class_map,\n attr_id_map,\n frames,\n ignore_types,\n assigned_scalar_attrs=False,\n track_index=None,\n immutable_attrs=None,\n occluded_attrs=None,\n ):\n frame = anno[\"frame\"]\n if len(frames) > frame:\n metadata = frames[frame]\n else:\n metadata = frames[0]\n\n if frame not in frame_id_map:\n return prev_type\n\n frame_data = frame_id_map[frame]\n sample_id = frame_data[\"sample_id\"]\n frame_id = frame_data.get(\"frame_id\", None)\n\n label = None\n\n if anno_type in (\"shapes\", \"track\"):\n shape_type = anno[\"type\"]\n keyframe = anno.get(\"keyframe\", False)\n\n if expected_label_type == \"scalar\" and assigned_scalar_attrs:\n # Shapes created with values, set class to value\n anno_attrs = anno[\"attributes\"]\n if anno_attrs and \"value\" in anno_attrs[0]:\n class_val = anno_attrs[0][\"value\"]\n anno[\"attributes\"] = []\n else:\n class_val = False\n\n cvat_shape = CVATShape(\n anno,\n class_map,\n attr_id_map,\n server_id_map,\n metadata,\n index=track_index,\n immutable_attrs=immutable_attrs,\n occluded_attrs=occluded_attrs,\n )\n\n # Non-keyframe annotations were interpolated from keyframes but\n # should not inherit their label IDs\n if anno_type == \"track\" and not keyframe:\n cvat_shape._id = None\n\n if shape_type == \"rectangle\":\n label_type = \"detections\"\n label = cvat_shape.to_detection()\n elif shape_type == \"polygon\":\n if expected_label_type == \"segmentation\":\n # A piece of a segmentation mask\n label_type = \"segmentation\"\n label = cvat_shape.to_polyline(closed=True, filled=True)\n elif expected_label_type in (\n \"detection\",\n \"detections\",\n \"instance\",\n \"instances\",\n ):\n # A piece of an instance mask\n label_type = \"detections\"\n label = cvat_shape.to_polyline(closed=True, filled=True)\n else:\n # A regular polyline or polygon\n if expected_label_type in (\"polyline\", \"polylines\"):\n filled = False\n else:\n filled = True\n\n label_type = \"polylines\"\n label = cvat_shape.to_polyline(closed=True, filled=filled)\n elif shape_type == \"polyline\":\n label_type = \"polylines\"\n label = cvat_shape.to_polyline()\n elif shape_type == \"points\":\n label_type = \"keypoints\"\n label = cvat_shape.to_keypoint()\n\n if keyframe:\n label[\"keyframe\"] = True\n\n if expected_label_type == \"scalar\" and assigned_scalar_attrs:\n if class_val and label is not None:\n label.label = class_val\n\n if anno_type == \"tags\":\n if expected_label_type == \"scalar\":\n label_type = \"scalar\"\n if assigned_scalar_attrs:\n num_attrs = len(anno[\"attributes\"])\n attr_ind = 0\n while label is None and attr_ind < num_attrs:\n label = _parse_value(\n anno[\"attributes\"][attr_ind][\"value\"]\n )\n attr_ind += 1\n if label is not None:\n if prev_type is str:\n label = str(label)\n\n if prev_type is None:\n prev_type = type(label)\n elif not isinstance(label, prev_type):\n msg = (\n \"Ignoring scalar of type %s that does not \"\n \"match previously inferred scalar type %s\"\n ) % (type(label), prev_type)\n warnings.warn(msg)\n label = None\n else:\n label = class_map[anno[\"label_id\"]]\n else:\n label_type = \"classifications\"\n cvat_tag = CVATTag(anno, class_map, attr_id_map, server_id_map)\n label = cvat_tag.to_classification()\n\n if label is None or label_type in ignore_types:\n return prev_type\n\n if label_type not in results:\n results[label_type] = {}\n\n if sample_id not in results[label_type]:\n results[label_type][sample_id] = {}\n\n if (\n frame_id is not None\n and frame_id not in results[label_type][sample_id]\n ):\n results[label_type][sample_id][frame_id] = {}\n\n if label_type == \"segmentation\":\n seg_id = self._get_segmentation_id(id_map, sample_id, frame_id)\n else:\n seg_id = None\n\n if frame_id is not None:\n if label_type == \"scalar\":\n results[label_type][sample_id][frame_id] = label\n else:\n _results = results[label_type][sample_id][frame_id]\n\n self._add_label_to_results(\n _results, label_type, label, seg_id=seg_id\n )\n else:\n if label_type == \"scalar\":\n results[label_type][sample_id] = label\n else:\n _results = results[label_type][sample_id]\n\n self._add_label_to_results(\n _results, label_type, label, seg_id=seg_id\n )\n\n return prev_type\n\n def _get_segmentation_id(self, id_map, sample_id, frame_id):\n _id = id_map.get(sample_id, None)\n\n if frame_id is not None and isinstance(_id, dict):\n _id = _id.get(frame_id, None)\n\n if etau.is_str(_id):\n return _id\n\n if isinstance(_id, list) and len(_id) == 1:\n return _id[0]\n\n return None\n\n def _add_label_to_results(self, results, label_type, label, seg_id=None):\n # Merge polylines representing a semantic segmentation\n if label_type == \"segmentation\":\n if seg_id is None:\n seg_id = str(ObjectId())\n\n if results:\n polylines = next(iter(results.values()))\n else:\n polylines = fol.Polylines()\n results[seg_id] = polylines\n\n found_existing_class = False\n for polyline in polylines.polylines:\n if label.label == polyline.label:\n found_existing_class = True\n polyline.points.extend(label.points)\n\n if not found_existing_class:\n polylines.polylines.append(label)\n\n return\n\n # Merge polylines representing an instance segmentation\n if label_type == \"detections\" and isinstance(label, fol.Polyline):\n if label.id in results:\n results[label.id].points.extend(label.points)\n else:\n results[label.id] = label\n\n return\n\n results[label.id] = label\n\n def _parse_arg(self, arg, config_arg):\n if arg is None:\n return config_arg\n\n return arg\n\n def _build_cvat_schema(self, label_schema, occluded_attr=None):\n cvat_schema = {}\n assign_scalar_attrs = {}\n occluded_attrs = defaultdict(dict)\n label_field_classes = defaultdict(dict)\n\n _class_label_fields = {}\n _duplicate_classes = set()\n _prev_field_classes = set()\n\n for label_field, label_info in label_schema.items():\n _field_classes = set()\n label_type = label_info[\"type\"]\n is_existing_field = label_info[\"existing_field\"]\n classes = label_info[\"classes\"]\n attributes, occluded_attr_name = self._to_cvat_attributes(\n label_info[\"attributes\"]\n )\n if occluded_attr_name is None and occluded_attr is not None:\n occluded_attr_name = occluded_attr\n label_schema[label_field][\"attributes\"][occluded_attr] = {}\n\n # Must track label IDs for existing label fields\n if is_existing_field and label_type != \"scalar\":\n if \"label_id\" in attributes:\n raise ValueError(\n \"Label field '%s' attribute schema cannot use \"\n \"reserved name 'label_id'\" % label_field\n )\n\n attributes[\"label_id\"] = {\n \"name\": \"label_id\",\n \"input_type\": \"text\",\n \"mutable\": True,\n }\n\n if label_type == \"scalar\":\n # True: scalars are annotated as tag attributes\n # False: scalars are annotated as tag labels\n assign_scalar_attrs[label_field] = not bool(classes)\n else:\n assign_scalar_attrs[label_field] = None\n\n if not classes:\n classes = [label_field]\n\n if not attributes:\n attributes[\"value\"] = {\n \"name\": \"value\",\n \"input_type\": \"text\",\n \"mutable\": True,\n }\n\n # Handle class name clashes and global attributes\n for _class in classes:\n if etau.is_str(_class):\n _classes = [_class]\n else:\n _classes = _class[\"classes\"]\n\n for name in _classes:\n # If two label fields share a class name, we must append\n # `label_field` to all instances of `name` to disambiguate\n if (\n name in _prev_field_classes\n and name not in _duplicate_classes\n ):\n _duplicate_classes.add(name)\n\n prev_field = _class_label_fields[name]\n\n new_name = \"%s_%s\" % (name, prev_field)\n cvat_schema[new_name] = cvat_schema.pop(name)\n\n label_field_classes[prev_field][name] = new_name\n\n if name in occluded_attrs[label_field]:\n attr_name = occluded_attrs[label_field].pop(name)\n occluded_attrs[label_field][new_name] = attr_name\n\n _field_classes.add(name)\n\n if name in _duplicate_classes:\n new_name = \"%s_%s\" % (name, label_field)\n label_field_classes[label_field][name] = new_name\n name = new_name\n else:\n _class_label_fields[name] = label_field\n label_field_classes[label_field][name] = name\n\n cvat_schema[name] = deepcopy(attributes)\n if occluded_attr_name is not None:\n occluded_attrs[label_field][name] = occluded_attr_name\n\n _prev_field_classes |= _field_classes\n\n # Class-specific attributes\n for _class in classes:\n if etau.is_str(_class):\n continue\n\n _classes = _class[\"classes\"]\n _attrs, _occluded_attr_name = self._to_cvat_attributes(\n _class[\"attributes\"]\n )\n if _occluded_attr_name is None and occluded_attr is not None:\n _occluded_attr_name = occluded_attr\n\n if \"label_id\" in _attrs:\n raise ValueError(\n \"Label field '%s' attribute schema cannot use \"\n \"reserved name 'label_id'\" % label_field\n )\n\n for name in _classes:\n if name in _duplicate_classes:\n name = \"%s_%s\" % (name, label_field)\n\n cvat_schema[name].update(_attrs)\n if _occluded_attr_name is not None:\n occluded_attrs[label_field][name] = _occluded_attr_name\n\n return (\n cvat_schema,\n assign_scalar_attrs,\n dict(occluded_attrs),\n dict(label_field_classes),\n )\n\n def _to_cvat_attributes(self, attributes):\n cvat_attrs = {}\n occluded_attr_name = None\n for attr_name, info in attributes.items():\n cvat_attr = {\"name\": attr_name, \"mutable\": True}\n is_occluded = False\n for attr_key, val in info.items():\n if attr_key == \"type\":\n if val == \"occluded\":\n occluded_attr_name = attr_name\n is_occluded = True\n else:\n cvat_attr[\"input_type\"] = val\n elif attr_key == \"values\":\n cvat_attr[\"values\"] = [_stringify_value(v) for v in val]\n elif attr_key == \"default\":\n cvat_attr[\"default_value\"] = _stringify_value(val)\n elif attr_key == \"mutable\":\n cvat_attr[\"mutable\"] = bool(val)\n\n if not is_occluded:\n cvat_attrs[attr_name] = cvat_attr\n\n return cvat_attrs, occluded_attr_name\n\n def _create_shapes_tags_tracks(\n self,\n samples,\n label_field,\n label_info,\n cvat_schema,\n assign_scalar_attrs=False,\n load_tracks=False,\n only_keyframes=False,\n occluded_attrs=None,\n ):\n label_type = label_info[\"type\"]\n classes = label_info[\"classes\"]\n mask_targets = label_info.get(\"mask_targets\", None)\n\n if occluded_attrs is not None:\n occluded_attrs = occluded_attrs.get(label_field, None)\n\n id_map = {}\n tags_or_shapes = []\n tracks = {}\n\n # Tracks any \"attribute:\" prefixes that need to be prepended to\n # attributes in `cvat_schema` because the corresponding data is found\n # to be in the attributes dict of the FiftyOne labels\n remapped_attrs = {}\n\n is_video = samples.media_type == fom.VIDEO\n\n if is_video:\n field, _ = samples._handle_frame_field(label_field)\n else:\n field = label_field\n\n frame_id = -1\n for sample in samples:\n metadata = sample.metadata\n\n if is_video:\n images = sample.frames.values()\n frame_size = (metadata.frame_width, metadata.frame_height)\n else:\n images = [sample]\n frame_size = (metadata.width, metadata.height)\n\n for image in images:\n frame_id += 1\n\n label = image[field]\n\n if label is None:\n continue\n\n kwargs = {}\n\n if label_type not in (\n \"scalar\",\n \"classification\",\n \"classifications\",\n \"segmentation\",\n ):\n kwargs[\"load_tracks\"] = load_tracks\n kwargs[\"occluded_attrs\"] = occluded_attrs\n\n if label_type == \"scalar\":\n labels = label\n kwargs[\"assign_scalar_attrs\"] = assign_scalar_attrs\n func = self._create_scalar_tags\n elif label_type == \"classification\":\n labels = [label]\n func = self._create_classification_tags\n elif label_type == \"classifications\":\n labels = label.classifications\n func = self._create_classification_tags\n elif label_type in (\"detection\", \"instance\"):\n labels = [label]\n func = self._create_detection_shapes\n elif label_type in (\"detections\", \"instances\"):\n labels = label.detections\n func = self._create_detection_shapes\n elif label_type in (\"polyline\", \"polygon\"):\n labels = [label]\n func = self._create_polyline_shapes\n elif label_type in (\"polylines\", \"polygons\"):\n labels = label.polylines\n func = self._create_polyline_shapes\n elif label_type == \"keypoint\":\n labels = [label]\n func = self._create_keypoint_shapes\n elif label_type == \"keypoints\":\n labels = label.keypoints\n func = self._create_keypoint_shapes\n elif label_type == \"segmentation\":\n labels = label\n func = self._create_segmentation_shapes\n kwargs[\"mask_targets\"] = mask_targets\n else:\n raise ValueError(\n \"Label type '%s' of field '%s' is not supported\"\n % (label_type, label_field)\n )\n\n ids, _tags_or_shapes, _tracks, _remapped_attrs = func(\n labels,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=label_type,\n **kwargs,\n )\n\n tags_or_shapes.extend(_tags_or_shapes)\n self._merge_tracks(tracks, _tracks)\n remapped_attrs.update(_remapped_attrs)\n\n if ids is not None:\n if is_video:\n if sample.id not in id_map:\n id_map[sample.id] = {}\n\n id_map[sample.id][image.id] = ids\n else:\n id_map[sample.id] = ids\n\n # Record any attribute name changes due to label attributes being\n # stored in attributes dicts rather than as dynamic fields\n for attr_schema in cvat_schema.values():\n for name, attr in attr_schema.items():\n if name in remapped_attrs:\n attr[\"name\"] = remapped_attrs[name]\n\n if load_tracks:\n tracks = self._finalize_tracks(tracks, frame_id, only_keyframes)\n return id_map, tags_or_shapes, tracks\n\n return id_map, tags_or_shapes\n\n def _create_scalar_tags(\n self,\n label,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n assign_scalar_attrs=False,\n ):\n if label is None:\n label = \"\"\n\n if assign_scalar_attrs[label_field]:\n if label_field not in cvat_schema:\n return False, [], {}, {}\n\n scalar_attr_name = next(iter(cvat_schema[label_field].keys()))\n\n class_name = label_field\n attributes = [\n {\n \"spec_id\": scalar_attr_name,\n \"value\": _stringify_value(label),\n }\n ]\n else:\n class_name = _stringify_value(label)\n if class_name not in cvat_schema:\n return False, [], {}, {}\n\n attributes = []\n\n tags = [\n {\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": attributes,\n }\n ]\n\n return True, tags, {}, {}\n\n def _create_classification_tags(\n self,\n classifications,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n ):\n ids = []\n tags = []\n remapped_attrs = {}\n\n for cn in classifications:\n (\n class_name,\n attributes,\n _,\n _remapped_attrs,\n _,\n ) = self._parse_label(cn, cvat_schema, label_field)\n\n if class_name is None:\n continue\n\n ids.append(cn.id)\n remapped_attrs.update(_remapped_attrs)\n tags.append(\n {\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": attributes,\n }\n )\n\n if label_type == \"classification\":\n ids = ids[0] if ids else None\n\n return ids, tags, {}, remapped_attrs\n\n def _create_detection_shapes(\n self,\n detections,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n label_id=None,\n load_tracks=False,\n occluded_attrs=None,\n ):\n ids = []\n shapes = []\n tracks = {}\n remapped_attrs = {}\n\n for det in detections:\n (\n class_name,\n attributes,\n immutable_attrs,\n _remapped_attrs,\n is_occluded,\n ) = self._parse_label(\n det,\n cvat_schema,\n label_field,\n label_id=label_id,\n occluded_attrs=occluded_attrs,\n )\n\n if class_name is None:\n continue\n\n curr_shapes = []\n\n if label_type in (\"detection\", \"detections\"):\n x, y, w, h = det.bounding_box\n width, height = frame_size\n xtl = float(round(x * width))\n ytl = float(round(y * height))\n xbr = float(round((x + w) * width))\n ybr = float(round((y + h) * height))\n bbox = [xtl, ytl, xbr, ybr]\n\n curr_shapes.append(\n {\n \"type\": \"rectangle\",\n \"occluded\": is_occluded,\n \"z_order\": 0,\n \"points\": bbox,\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": attributes,\n }\n )\n elif label_type in (\"instance\", \"instances\"):\n if det.mask is None:\n continue\n\n polygon = det.to_polyline()\n for points in polygon.points:\n if len(points) < 3:\n continue # CVAT polygons must contain >= 3 points\n\n abs_points = HasCVATPoints._to_abs_points(\n points, frame_size\n )\n flattened_points = list(\n itertools.chain.from_iterable(abs_points)\n )\n\n curr_shapes.append(\n {\n \"type\": \"polygon\",\n \"occluded\": is_occluded,\n \"z_order\": 0,\n \"points\": flattened_points,\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": deepcopy(attributes),\n }\n )\n\n if not curr_shapes:\n continue\n\n ids.append(det.id)\n remapped_attrs.update(_remapped_attrs)\n\n if load_tracks and det.index is not None:\n keyframe = det.get_attribute_value(\"keyframe\", False)\n self._add_shapes_to_tracks(\n tracks,\n curr_shapes,\n class_name,\n det.index,\n frame_id,\n immutable_attrs,\n keyframe,\n )\n else:\n shapes.extend(curr_shapes)\n\n return ids, shapes, tracks, remapped_attrs\n\n def _create_keypoint_shapes(\n self,\n keypoints,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n load_tracks=False,\n occluded_attrs=None,\n ):\n ids = []\n shapes = []\n tracks = {}\n remapped_attrs = {}\n\n for kp in keypoints:\n (\n class_name,\n attributes,\n immutable_attrs,\n _remapped_attrs,\n is_occluded,\n ) = self._parse_label(\n kp, cvat_schema, label_field, occluded_attrs=occluded_attrs\n )\n\n if class_name is None:\n continue\n\n abs_points = HasCVATPoints._to_abs_points(kp.points, frame_size)\n flattened_points = list(itertools.chain.from_iterable(abs_points))\n\n shape = {\n \"type\": \"points\",\n \"occluded\": is_occluded,\n \"z_order\": 0,\n \"points\": flattened_points,\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": attributes,\n }\n\n ids.append(kp.id)\n remapped_attrs.update(_remapped_attrs)\n\n if load_tracks and kp.index is not None:\n keyframe = kp.get_attribute_value(\"keyframe\", False)\n self._add_shapes_to_tracks(\n tracks,\n [shape],\n class_name,\n kp.index,\n frame_id,\n immutable_attrs,\n keyframe,\n )\n else:\n shapes.append(shape)\n\n return ids, shapes, tracks, remapped_attrs\n\n def _create_polyline_shapes(\n self,\n polylines,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n load_tracks=False,\n occluded_attrs=None,\n ):\n ids = []\n shapes = []\n tracks = {}\n remapped_attrs = {}\n\n for poly in polylines:\n (\n class_name,\n attributes,\n immutable_attrs,\n _remapped_attrs,\n is_occluded,\n ) = self._parse_label(\n poly, cvat_schema, label_field, occluded_attrs=occluded_attrs\n )\n\n if class_name is None:\n continue\n\n curr_shapes = []\n\n for points in poly.points:\n if poly.filled and len(points) < 3:\n continue # CVAT polygons must contain >= 3 points\n\n abs_points = HasCVATPoints._to_abs_points(points, frame_size)\n flattened_points = list(\n itertools.chain.from_iterable(abs_points)\n )\n\n shape = {\n \"type\": \"polygon\" if poly.filled else \"polyline\",\n \"occluded\": is_occluded,\n \"z_order\": 0,\n \"points\": flattened_points,\n \"label_id\": class_name,\n \"group\": 0,\n \"frame\": frame_id,\n \"source\": \"manual\",\n \"attributes\": deepcopy(attributes),\n }\n curr_shapes.append(shape)\n\n if not curr_shapes:\n continue\n\n ids.append(poly.id)\n remapped_attrs.update(_remapped_attrs)\n\n if load_tracks and poly.index is not None:\n keyframe = poly.get_attribute_value(\"keyframe\", False)\n self._add_shapes_to_tracks(\n tracks,\n curr_shapes,\n class_name,\n poly.index,\n frame_id,\n immutable_attrs,\n keyframe,\n )\n else:\n shapes.extend(curr_shapes)\n\n return ids, shapes, tracks, remapped_attrs\n\n def _create_segmentation_shapes(\n self,\n segmentation,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=None,\n mask_targets=None,\n ):\n label_id = segmentation.id\n detections = segmentation.to_detections(mask_targets=mask_targets)\n\n _, shapes, tracks, remapped_attrs = self._create_detection_shapes(\n detections.detections,\n cvat_schema,\n label_field,\n frame_id,\n frame_size,\n label_type=\"instances\",\n label_id=label_id,\n )\n\n return label_id, shapes, tracks, remapped_attrs\n\n def _parse_label(\n self,\n label,\n cvat_schema,\n label_field,\n label_id=None,\n occluded_attrs=None,\n ):\n # If the class is a duplicate, it will have this name\n dup_class_name = \"%s_%s\" % (label.label, label_field)\n\n if label.label in cvat_schema:\n class_name = label.label\n elif dup_class_name in cvat_schema:\n class_name = dup_class_name\n else:\n return None, None, None, None, None\n\n attr_schema = cvat_schema[class_name]\n\n if label_id is None:\n label_id = label.id\n\n label_attrs = [{\"spec_id\": \"label_id\", \"value\": label_id}]\n immutable_attrs = []\n remapped_attrs = {}\n\n for name, attr in attr_schema.items():\n if name.startswith(\"attribute:\"):\n name = name[len(\"attribute:\") :]\n\n value = label.get_attribute_value(name, None)\n if value is None:\n continue\n\n if name not in label:\n # Found attribute stored in the label's attributes dict\n new_name = \"attribute:\" + name\n remapped_attrs[name] = new_name\n name = new_name\n\n attr_dict = {\"spec_id\": name, \"value\": _stringify_value(value)}\n\n if attr[\"mutable\"]:\n label_attrs.append(attr_dict)\n else:\n immutable_attrs.append(attr_dict)\n\n is_occluded = False\n if occluded_attrs is not None:\n attr_name = occluded_attrs.get(class_name, None)\n if attr_name is not None:\n is_occluded = _parse_occlusion_value(\n label.get_attribute_value(attr_name, False)\n )\n\n return (\n class_name,\n label_attrs,\n immutable_attrs,\n remapped_attrs,\n is_occluded,\n )\n\n def _add_shapes_to_tracks(\n self,\n tracks,\n shapes,\n class_name,\n index,\n frame_id,\n immutable_attrs,\n keyframe,\n ):\n if class_name not in tracks:\n tracks[class_name] = {}\n\n if index not in tracks[class_name]:\n tracks[class_name][index] = {\n \"label_id\": class_name,\n \"shapes\": [],\n \"frame\": frame_id,\n \"group\": 0,\n \"attributes\": immutable_attrs,\n }\n\n _shapes = tracks[class_name][index][\"shapes\"]\n\n for shape in shapes:\n shape[\"outside\"] = False\n shape[\"keyframe\"] = keyframe\n del shape[\"label_id\"]\n _shapes.append(shape)\n\n def _merge_tracks(self, tracks, new_tracks):\n for class_name, class_tracks in new_tracks.items():\n if class_name not in tracks:\n tracks[class_name] = class_tracks\n continue\n\n for index, track in class_tracks.items():\n if index not in tracks[class_name]:\n tracks[class_name][index] = track\n else:\n _track = tracks[class_name][index]\n _track[\"shapes\"].extend(track[\"shapes\"])\n _track[\"frame\"] = max(track[\"frame\"], _track[\"frame\"])\n\n def _finalize_tracks(self, tracks, frame_count, only_keyframes):\n formatted_tracks = []\n for class_tracks in tracks.values():\n for track in class_tracks.values():\n formatted_track = self._finalize_track(\n track, frame_count, only_keyframes\n )\n formatted_tracks.append(track)\n\n return formatted_tracks\n\n def _finalize_track(self, track, frame_count, only_keyframes):\n shapes = track[\"shapes\"]\n new_shapes = []\n prev_frame_shape_inds = []\n prev_frame = None\n next_is_keyframe = True\n\n for ind, shape in enumerate(shapes):\n frame = shape[\"frame\"]\n if prev_frame is None:\n prev_frame = frame\n\n if frame != prev_frame:\n if only_keyframes and next_is_keyframe:\n # The first frame of a new segment is always a keyframe\n next_is_keyframe = False\n for ind in prev_frame_shape_inds:\n shapes[ind][\"keyframe\"] = True\n\n # If there is a gap between shapes, we must mark the end of the\n # previous segment as \"outside\"\n if frame > prev_frame + 1:\n for prev_ind in prev_frame_shape_inds:\n last_shape = shapes[prev_ind]\n new_shape = deepcopy(last_shape)\n new_shape[\"frame\"] += 1\n new_shape[\"outside\"] = True\n if only_keyframes:\n new_shape[\"keyframe\"] = True\n\n new_shapes.append(\n (max(prev_frame_shape_inds), new_shape)\n )\n next_is_keyframe = True\n\n prev_frame_shape_inds = []\n prev_frame = frame\n\n prev_frame_shape_inds.append(ind)\n\n # The shapes in the last frame in the track must be set to \"outside\"\n last_shape = shapes[-1]\n if last_shape[\"frame\"] < frame_count:\n new_shape = deepcopy(last_shape)\n new_shape[\"frame\"] += 1\n new_shape[\"outside\"] = True\n if only_keyframes:\n new_shape[\"keyframe\"] = True\n\n new_shapes.append((len(shapes), new_shape))\n\n # Insert new shapes into track\n for ind, shape in new_shapes[::-1]:\n shapes.insert(ind, shape)\n\n # Remove non-keyframes if necessary\n if only_keyframes:\n track[\"shapes\"] = [s for s in shapes if s[\"keyframe\"]]\n\n return track\n\n def _build_frame_id_map(self, samples):\n is_video = samples.media_type == fom.VIDEO\n frame_id = -1\n\n frame_id_map = {}\n for sample in samples:\n if is_video:\n images = sample.frames.values()\n else:\n images = [sample]\n\n for image in images:\n frame_id += 1\n frame_id_map[frame_id] = {\"sample_id\": sample.id}\n if is_video:\n frame_id_map[frame_id][\"frame_id\"] = image.id\n\n return frame_id_map\n\n def _remap_ids(self, shapes_or_tags, class_id_map, attr_id_map):\n for obj in shapes_or_tags:\n label_name = obj[\"label_id\"]\n class_id = class_id_map[label_name]\n obj[\"label_id\"] = class_id\n attr_map = attr_id_map[class_id]\n attrs = []\n for attr in obj[\"attributes\"]:\n attr_name = attr[\"spec_id\"]\n if attr_name in attr_map:\n attr[\"spec_id\"] = attr_map[attr_name]\n attrs.append(attr)\n\n obj[\"attributes\"] = attrs\n\n return shapes_or_tags\n\n def _remap_track_ids(self, tracks, class_id_map, attr_id_map):\n for track in tracks:\n label_name = track[\"label_id\"]\n class_id = class_id_map[label_name]\n track[\"label_id\"] = class_id\n attr_map = attr_id_map[class_id]\n for shape in track[\"shapes\"]:\n attrs = []\n for attr in shape[\"attributes\"]:\n attr_name = attr[\"spec_id\"]\n if attr_name in attr_map:\n attr[\"spec_id\"] = attr_map[attr_name]\n attrs.append(attr)\n\n shape[\"attributes\"] = attrs\n\n attrs = []\n for attr in track[\"attributes\"]:\n attr_name = attr[\"spec_id\"]\n if attr_name in attr_map:\n attr[\"spec_id\"] = attr_map[attr_name]\n attrs.append(attr)\n\n track[\"attributes\"] = attrs\n\n return tracks\n\n def _validate(self, response, kwargs):\n try:\n response.raise_for_status()\n except:\n d = response.__dict__\n logger.info(\"Arguments the caused this error were:\")\n logger.info(kwargs)\n raise Exception(\n \"%d error for request %s to url %s with the reason %s. Error \"\n \"content: %s\"\n % (\n d[\"status_code\"],\n d[\"request\"],\n d[\"url\"],\n d[\"reason\"],\n d[\"_content\"],\n )\n )\n\n\nclass CVATLabel(object):\n \"\"\"A label returned by the CVAT API.\n\n Args:\n label_dict: the dictionary containing the label information loaded from\n the CVAT API\n class_map: a dictionary mapping label IDs to class strings\n attr_id_map: a dictionary mapping attribute IDs attribute names for\n every label\n server_id_map: a dictionary mapping server IDs to FiftyOne label IDs\n attributes (None): an optional list of additional attributes\n \"\"\"\n\n def __init__(\n self,\n label_dict,\n class_map,\n attr_id_map,\n server_id_map,\n attributes=None,\n ):\n cvat_id = label_dict[\"label_id\"]\n server_id = label_dict[\"id\"]\n attrs = label_dict[\"attributes\"]\n\n if attributes is not None:\n attrs.extend(attributes)\n\n self._id = None\n self.label = class_map[cvat_id]\n self.attributes = {}\n self.fo_attributes = {}\n\n # Parse attributes\n attr_id_map_rev = {v: k for k, v in attr_id_map[cvat_id].items()}\n for attr in attrs:\n name = attr_id_map_rev[attr[\"spec_id\"]]\n value = _parse_value(attr[\"value\"])\n if value is not None:\n if name.startswith(\"attribute:\"):\n name = name[len(\"attribute:\") :]\n fo_attr = CVATAttribute(name, value).to_attribute()\n self.fo_attributes[name] = fo_attr\n else:\n self.attributes[name] = value\n\n # Parse label ID\n label_id = self.attributes.pop(\"label_id\", None)\n\n if label_id is not None:\n self._set_id(label_id)\n\n if self._id is None:\n label_id = server_id_map.get(server_id, None)\n if label_id is not None:\n self._set_id(label_id)\n\n def _set_id(self, label_id):\n try:\n self._id = ObjectId(label_id)\n except:\n pass\n\n def _set_attributes(self, label):\n if self._id is not None:\n label._id = self._id\n\n for name, value in self.attributes.items():\n label[name] = value\n\n if self.fo_attributes:\n label.attributes = self.fo_attributes\n\n\nclass CVATShape(CVATLabel):\n \"\"\"A shape returned by the CVAT API.\n\n Args:\n label_dict: the dictionary containing the label information loaded from\n the CVAT API\n class_map: a dictionary mapping label IDs to class strings\n attr_id_map: a dictionary mapping attribute IDs attribute names for\n every label\n server_id_map: a dictionary mapping server IDs to FiftyOne label IDs\n metadata: a dictionary containing the width and height of the frame\n index (None): the tracking index of the shape\n immutable_attrs (None): immutable attributes inherited by this shape\n from its track\n occluded_attrs (None): a dictonary mapping class names to the\n corresponding attribute linked to the CVAT occlusion widget, if any\n \"\"\"\n\n def __init__(\n self,\n label_dict,\n class_map,\n attr_id_map,\n server_id_map,\n metadata,\n index=None,\n immutable_attrs=None,\n occluded_attrs=None,\n ):\n super().__init__(\n label_dict,\n class_map,\n attr_id_map,\n server_id_map,\n attributes=immutable_attrs,\n )\n\n self.frame_size = (metadata[\"width\"], metadata[\"height\"])\n self.points = label_dict[\"points\"]\n self.index = index\n\n # Parse occluded attribute, if necessary\n if occluded_attrs is not None:\n occluded_attr_name = occluded_attrs.get(self.label, None)\n if occluded_attr_name:\n self.attributes[occluded_attr_name] = label_dict[\"occluded\"]\n\n def _to_pairs_of_points(self, points):\n reshaped_points = np.reshape(points, (-1, 2))\n return reshaped_points.tolist()\n\n def to_detection(self):\n \"\"\"Converts this shape to a :class:`fiftyone.core.labels.Detection`.\n\n Returns:\n a :class:`fiftyone.core.labels.Detection`\n \"\"\"\n xtl, ytl, xbr, ybr = self.points\n width, height = self.frame_size\n bbox = [\n xtl / width,\n ytl / height,\n (xbr - xtl) / width,\n (ybr - ytl) / height,\n ]\n label = fol.Detection(\n label=self.label, bounding_box=bbox, index=self.index\n )\n self._set_attributes(label)\n return label\n\n def to_polyline(self, closed=False, filled=False):\n \"\"\"Converts this shape to a :class:`fiftyone.core.labels.Polyline`.\n\n Returns:\n a :class:`fiftyone.core.labels.Polyline`\n \"\"\"\n points = self._to_pairs_of_points(self.points)\n rel_points = HasCVATPoints._to_rel_points(points, self.frame_size)\n label = fol.Polyline(\n label=self.label,\n points=[rel_points],\n index=self.index,\n closed=closed,\n filled=filled,\n )\n self._set_attributes(label)\n return label\n\n def to_polylines(self, closed=False, filled=False):\n \"\"\"Converts this shape to a :class:`fiftyone.core.labels.Polylines`.\n\n Returns:\n a :class:`fiftyone.core.labels.Polylines`\n \"\"\"\n points = self._to_pairs_of_points(self.points)\n rel_points = HasCVATPoints._to_rel_points(points, self.frame_size)\n polyline = fol.Polyline(\n label=self.label,\n points=[rel_points],\n closed=closed,\n filled=filled,\n )\n label = fol.Polylines(polylines=[polyline])\n self._set_attributes(label)\n return label\n\n def to_keypoint(self):\n \"\"\"Converts this shape to a :class:`fiftyone.core.labels.Keypoint`.\n\n Returns:\n a :class:`fiftyone.core.labels.Keypoint`\n \"\"\"\n points = self._to_pairs_of_points(self.points)\n rel_points = HasCVATPoints._to_rel_points(points, self.frame_size)\n label = fol.Keypoint(\n label=self.label, points=rel_points, index=self.index\n )\n self._set_attributes(label)\n return label\n\n @classmethod\n def polyline_to_detection(cls, polyline, frame_size):\n \"\"\"Converts a :class:`fiftyone.core.labels.Polyline` to a\n :class:`fiftyone.core.labels.Detection` with a segmentation mask.\n\n Args:\n polyline: a :class:`fiftyone.core.labels.Polyline`\n frame_size: the ``(width, height)`` of the frame\n\n Returns:\n a :class:`fiftyone.core.labels.Detection`\n \"\"\"\n detection = polyline.to_detection(frame_size=frame_size)\n detection._id = polyline._id\n return detection\n\n @classmethod\n def polylines_to_segmentation(cls, polylines, frame_size, mask_targets):\n \"\"\"Converts a :class:`fiftyone.core.labels.Polylines` to a\n :class:`fiftyone.core.labels.Segmentation`.\n\n Args:\n polylines: a :class:`fiftyone.core.labels.Polylines`\n mask_targets: a dict mapping integer pixel values to label strings\n frame_size: the ``(width, height)`` of the frame\n\n Returns:\n a :class:`fiftyone.core.labels.Segmentation`\n \"\"\"\n return polylines.to_segmentation(\n frame_size=frame_size, mask_targets=mask_targets\n )\n\n\nclass CVATTag(CVATLabel):\n \"\"\"A tag returned by the CVAT API.\n\n Args:\n label_dict: the dictionary containing the label information loaded from\n the CVAT API\n class_map: a dictionary mapping label IDs to class strings\n attr_id_map: a dictionary mapping attribute IDs attribute names for\n every label\n server_id_map: a dictionary mapping server IDs to FiftyOne label IDs\n attributes (None): an optional list of additional attributes\n \"\"\"\n\n def to_classification(self):\n \"\"\"Converts the tag to a :class:`fiftyone.core.labels.Classification`.\n\n Returns:\n a :class:`fiftyone.core.labels.Classification`\n \"\"\"\n label = fol.Classification(label=self.label)\n self._set_attributes(label)\n return label\n\n\ndef load_cvat_image_annotations(xml_path):\n \"\"\"Loads the CVAT image annotations from the given XML file.\n\n See :ref:`this page <CVATImageDataset-import>` for format details.\n\n Args:\n xml_path: the path to the annotations XML file\n\n Returns:\n a tuple of\n\n - **info**: a dict of dataset info\n - **cvat_task_labels**: a :class:`CVATTaskLabels` instance\n - **cvat_images**: a list of :class:`CVATImage` instances\n \"\"\"\n d = fou.load_xml_as_json_dict(xml_path)\n annotations = d.get(\"annotations\", {})\n\n # Verify version\n version = annotations.get(\"version\", None)\n if version is None:\n logger.warning(\"No version tag found; assuming version 1.1\")\n elif version != \"1.1\":\n logger.warning(\n \"Only version 1.1 is explicitly supported; found %s. Trying to \"\n \"load assuming version 1.1 format\",\n version,\n )\n\n # Load meta\n meta = annotations.get(\"meta\", {})\n\n # Load task labels\n task = meta.get(\"task\", {})\n labels_dict = task.get(\"labels\", {})\n cvat_task_labels = CVATTaskLabels.from_labels_dict(labels_dict)\n\n # Load annotations\n image_dicts = _ensure_list(annotations.get(\"image\", []))\n cvat_images = [CVATImage.from_image_dict(id) for id in image_dicts]\n\n # Load dataset info\n info = {\"task_labels\": cvat_task_labels.labels}\n if \"created\" in task:\n info[\"created\"] = task[\"created\"]\n\n if \"updated\" in task:\n info[\"updated\"] = task[\"updated\"]\n\n if \"dumped\" in meta:\n info[\"dumped\"] = meta[\"dumped\"]\n\n return info, cvat_task_labels, cvat_images\n\n\ndef load_cvat_video_annotations(xml_path):\n \"\"\"Loads the CVAT video annotations from the given XML file.\n\n See :ref:`this page <CVATVideoDataset-import>` for format details.\n\n Args:\n xml_path: the path to the annotations XML file\n\n Returns:\n a tuple of\n\n - **info**: a dict of dataset info\n - **cvat_task_labels**: a :class:`CVATTaskLabels` instance\n - **cvat_tracks**: a list of :class:`CVATTrack` instances\n \"\"\"\n d = fou.load_xml_as_json_dict(xml_path)\n annotations = d.get(\"annotations\", {})\n\n # Verify version\n version = annotations.get(\"version\", None)\n if version is None:\n logger.warning(\"No version tag found; assuming version 1.1\")\n elif version != \"1.1\":\n logger.warning(\n \"Only version 1.1 is explicitly supported; found %s. Trying to \"\n \"load assuming version 1.1 format\",\n version,\n )\n\n # Load meta\n meta = annotations.get(\"meta\", {})\n\n # Load task labels\n task = meta.get(\"task\", {})\n labels_dict = task.get(\"labels\", {})\n cvat_task_labels = CVATTaskLabels.from_labels_dict(labels_dict)\n\n # Load annotations\n track_dicts = _ensure_list(annotations.get(\"track\", []))\n if track_dicts:\n original_size = task[\"original_size\"]\n frame_size = (\n int(original_size[\"width\"]),\n int(original_size[\"height\"]),\n )\n cvat_tracks = [\n CVATTrack.from_track_dict(td, frame_size) for td in track_dicts\n ]\n else:\n cvat_tracks = []\n\n # Load dataset info\n info = {\"task_labels\": cvat_task_labels.labels}\n if \"created\" in task:\n info[\"created\"] = task[\"created\"]\n\n if \"updated\" in task:\n info[\"updated\"] = task[\"updated\"]\n\n if \"dumped\" in meta:\n info[\"dumped\"] = meta[\"dumped\"]\n\n return info, cvat_task_labels, cvat_tracks\n\n\ndef _is_supported_attribute_type(value):\n return (\n isinstance(value, bool) or etau.is_str(value) or etau.is_numeric(value)\n )\n\n\ndef _cvat_tracks_to_frames_dict(cvat_tracks):\n frames = defaultdict(dict)\n for cvat_track in cvat_tracks:\n labels = cvat_track.to_labels()\n for frame_number, label in labels.items():\n frame = frames[frame_number]\n\n if isinstance(label, fol.Detection):\n if \"detections\" not in frame:\n frame[\"detections\"] = fol.Detections()\n\n frame[\"detections\"].detections.append(label)\n elif isinstance(label, fol.Polyline):\n if \"polylines\" not in frame:\n frame[\"polylines\"] = fol.Polylines()\n\n frame[\"polylines\"].polylines.append(label)\n elif isinstance(label, fol.Keypoint):\n if \"keypoints\" not in frame:\n frame[\"keypoints\"] = fol.Keypoints()\n\n frame[\"keypoints\"].keypoints.append(label)\n\n return frames\n\n\ndef _frames_to_cvat_tracks(frames, frame_size):\n labels_map = defaultdict(dict)\n no_index_map = defaultdict(list)\n found_label = False\n\n def process_label(label, frame_number):\n if label.index is not None:\n labels_map[label.index][frame_number] = label\n else:\n no_index_map[frame_number].append(label)\n\n # Convert from per-frame to per-object tracks\n for frame_number, frame_dict in frames.items():\n for _, value in frame_dict.items():\n if isinstance(value, (fol.Detection, fol.Polyline, fol.Keypoint)):\n found_label = True\n process_label(value, frame_number)\n elif isinstance(value, fol.Detections):\n found_label = True\n for detection in value.detections:\n process_label(detection, frame_number)\n elif isinstance(value, fol.Polylines):\n found_label = True\n for polyline in value.polylines:\n process_label(polyline, frame_number)\n elif isinstance(value, fol.Keypoints):\n found_label = True\n for keypoint in value.keypoints:\n process_label(keypoint, frame_number)\n elif value is not None:\n msg = \"Ignoring unsupported label type '%s'\" % value.__class__\n warnings.warn(msg)\n\n if not found_label:\n return None # unlabeled\n\n cvat_tracks = []\n\n # Generate object tracks\n max_index = -1\n for index in sorted(labels_map):\n max_index = max(index, max_index)\n labels = labels_map[index]\n cvat_track = CVATTrack.from_labels(index, labels, frame_size)\n cvat_tracks.append(cvat_track)\n\n # Generate single tracks for detections with no `index`\n index = max_index\n for frame_number, labels in no_index_map.items():\n for label in labels:\n index += 1\n cvat_track = CVATTrack.from_labels(\n index, {frame_number: label}, frame_size\n )\n cvat_tracks.append(cvat_track)\n\n return cvat_tracks\n\n\ndef _get_single_polyline_points(polyline):\n num_polylines = len(polyline.points)\n if num_polylines == 0:\n return []\n\n if num_polylines > 0:\n msg = (\n \"Found polyline with more than one shape; only the first shape \"\n \"will be stored in CVAT format\"\n )\n warnings.warn(msg)\n\n return polyline.points[0]\n\n\ndef _ensure_list(value):\n if value is None:\n return []\n\n if isinstance(value, list):\n return value\n\n return [value]\n\n\ndef _stringify_value(value):\n if value is None:\n return \"\"\n\n if value is True:\n return \"true\"\n\n if value is False:\n return \"false\"\n\n return str(value)\n\n\ndef _to_int_bool(value):\n return int(bool(value))\n\n\ndef _from_int_bool(value):\n try:\n return bool(int(value))\n except:\n pass\n\n return None\n\n\ndef _parse_value(value):\n if value in (None, \"None\", \"\"):\n return None\n\n if value in {\"True\", \"true\"}:\n return True\n\n if value in {\"False\", \"false\"}:\n return False\n\n try:\n return int(value)\n except:\n pass\n\n try:\n return float(value)\n except:\n pass\n\n return value\n\n\ndef _parse_occlusion_value(value):\n if isinstance(value, bool):\n return value\n\n if etau.is_str(value):\n str_value = \"'%s'\" % value\n bool_value = False if value.lower() == \"false\" else bool(value)\n else:\n str_value = str(value)\n bool_value = bool(value)\n\n msg = \"Casting occlusion value %s of type %s to boolean %s\" % (\n str_value,\n type(value),\n bool_value,\n )\n warnings.warn(msg)\n\n return bool_value\n\n\n# Track interpolation code sourced from CVAT:\n# https://github.com/openvinotoolkit/cvat/blob/31f6234b0cdc656c9dde4294c1008560611c6978/cvat/apps/dataset_manager/annotation.py#L431-L730\ndef _get_interpolated_shapes(track_shapes):\n def copy_shape(source, frame, points=None):\n copied = deepcopy(source)\n copied[\"keyframe\"] = False\n copied[\"frame\"] = frame\n if points is not None:\n copied[\"points\"] = points\n return copied\n\n def simple_interpolation(shape0, shape1):\n shapes = []\n distance = shape1[\"frame\"] - shape0[\"frame\"]\n diff = np.subtract(shape1[\"points\"], shape0[\"points\"])\n\n for frame in range(shape0[\"frame\"] + 1, shape1[\"frame\"]):\n offset = (frame - shape0[\"frame\"]) / distance\n points = shape0[\"points\"] + diff * offset\n\n shapes.append(copy_shape(shape0, frame, points.tolist()))\n\n return shapes\n\n def points_interpolation(shape0, shape1):\n if len(shape0[\"points\"]) == 2 and len(shape1[\"points\"]) == 2:\n return simple_interpolation(shape0, shape1)\n else:\n shapes = []\n for frame in range(shape0[\"frame\"] + 1, shape1[\"frame\"]):\n shapes.append(copy_shape(shape0, frame))\n\n return shapes\n\n def interpolate_position(left_position, right_position, offset):\n def to_array(points):\n return np.asarray(\n list(map(lambda point: [point[\"x\"], point[\"y\"]], points))\n ).flatten()\n\n def to_points(array):\n return list(\n map(\n lambda point: {\"x\": point[0], \"y\": point[1]},\n np.asarray(array).reshape(-1, 2),\n )\n )\n\n def curve_length(points):\n length = 0\n for i in range(1, len(points)):\n dx = points[i][\"x\"] - points[i - 1][\"x\"]\n dy = points[i][\"y\"] - points[i - 1][\"y\"]\n length += np.sqrt(dx ** 2 + dy ** 2)\n return length\n\n def curve_to_offset_vec(points, length):\n offset_vector = [0]\n accumulated_length = 0\n for i in range(1, len(points)):\n dx = points[i][\"x\"] - points[i - 1][\"x\"]\n dy = points[i][\"y\"] - points[i - 1][\"y\"]\n accumulated_length += np.sqrt(dx ** 2 + dy ** 2)\n offset_vector.append(accumulated_length / length)\n\n return offset_vector\n\n def find_nearest_pair(value, curve):\n minimum = [0, abs(value - curve[0])]\n for i in range(1, len(curve)):\n distance = abs(value - curve[i])\n if distance < minimum[1]:\n minimum = [i, distance]\n\n return minimum[0]\n\n def match_left_right(left_curve, right_curve):\n matching = {}\n for i, left_curve_item in enumerate(left_curve):\n matching[i] = [find_nearest_pair(left_curve_item, right_curve)]\n return matching\n\n def match_right_left(left_curve, right_curve, left_right_matching):\n matched_right_points = list(\n itertools.chain.from_iterable(left_right_matching.values())\n )\n unmatched_right_points = filter(\n lambda x: x not in matched_right_points,\n range(len(right_curve)),\n )\n updated_matching = deepcopy(left_right_matching)\n\n for right_point in unmatched_right_points:\n left_point = find_nearest_pair(\n right_curve[right_point], left_curve\n )\n updated_matching[left_point].append(right_point)\n\n for key, value in updated_matching.items():\n updated_matching[key] = sorted(value)\n\n return updated_matching\n\n def reduce_interpolation(\n interpolated_points, matching, left_points, right_points\n ):\n def average_point(points):\n sumX = 0\n sumY = 0\n for point in points:\n sumX += point[\"x\"]\n sumY += point[\"y\"]\n\n return {\"x\": sumX / len(points), \"y\": sumY / len(points)}\n\n def compute_distance(point1, point2):\n return np.sqrt(\n ((point1[\"x\"] - point2[\"x\"])) ** 2\n + ((point1[\"y\"] - point2[\"y\"]) ** 2)\n )\n\n def minimize_segment(\n base_length, N, start_interpolated, stop_interpolated\n ):\n threshold = base_length / (2 * N)\n minimized = [interpolated_points[start_interpolated]]\n latest_pushed = start_interpolated\n for i in range(start_interpolated + 1, stop_interpolated):\n distance = compute_distance(\n interpolated_points[latest_pushed],\n interpolated_points[i],\n )\n\n if distance >= threshold:\n minimized.append(interpolated_points[i])\n latest_pushed = i\n\n minimized.append(interpolated_points[stop_interpolated])\n\n if len(minimized) == 2:\n distance = compute_distance(\n interpolated_points[start_interpolated],\n interpolated_points[stop_interpolated],\n )\n\n if distance < threshold:\n return [average_point(minimized)]\n\n return minimized\n\n reduced = []\n interpolated_indexes = {}\n accumulated = 0\n for i in range(len(left_points)):\n interpolated_indexes[i] = []\n for _ in range(len(matching[i])):\n interpolated_indexes[i].append(accumulated)\n accumulated += 1\n\n def left_segment(start, stop):\n start_interpolated = interpolated_indexes[start][0]\n stop_interpolated = interpolated_indexes[stop][0]\n\n if start_interpolated == stop_interpolated:\n reduced.append(interpolated_points[start_interpolated])\n return\n\n base_length = curve_length(left_points[start : stop + 1])\n N = stop - start + 1\n\n reduced.extend(\n minimize_segment(\n base_length, N, start_interpolated, stop_interpolated\n )\n )\n\n def right_segment(left_point):\n start = matching[left_point][0]\n stop = matching[left_point][-1]\n start_interpolated = interpolated_indexes[left_point][0]\n stop_interpolated = interpolated_indexes[left_point][-1]\n base_length = curve_length(right_points[start : stop + 1])\n N = stop - start + 1\n\n reduced.extend(\n minimize_segment(\n base_length, N, start_interpolated, stop_interpolated\n )\n )\n\n previous_opened = None\n for i in range(len(left_points)):\n if len(matching[i]) == 1:\n if previous_opened is not None:\n if matching[i][0] == matching[previous_opened][0]:\n continue\n else:\n start = previous_opened\n stop = i - 1\n left_segment(start, stop)\n previous_opened = i\n else:\n previous_opened = i\n else:\n if previous_opened is not None:\n start = previous_opened\n stop = i - 1\n left_segment(start, stop)\n previous_opened = None\n\n right_segment(i)\n\n if previous_opened is not None:\n left_segment(previous_opened, len(left_points) - 1)\n\n return reduced\n\n left_points = to_points(left_position[\"points\"])\n right_points = to_points(right_position[\"points\"])\n left_offset_vec = curve_to_offset_vec(\n left_points, curve_length(left_points)\n )\n right_offset_vec = curve_to_offset_vec(\n right_points, curve_length(right_points)\n )\n\n matching = match_left_right(left_offset_vec, right_offset_vec)\n completed_matching = match_right_left(\n left_offset_vec, right_offset_vec, matching\n )\n\n interpolated_points = []\n for left_point_index, left_point in enumerate(left_points):\n for right_point_index in completed_matching[left_point_index]:\n right_point = right_points[right_point_index]\n interpolated_points.append(\n {\n \"x\": left_point[\"x\"]\n + (right_point[\"x\"] - left_point[\"x\"]) * offset,\n \"y\": left_point[\"y\"]\n + (right_point[\"y\"] - left_point[\"y\"]) * offset,\n }\n )\n\n reducedPoints = reduce_interpolation(\n interpolated_points, completed_matching, left_points, right_points\n )\n\n return to_array(reducedPoints).tolist()\n\n def polyshape_interpolation(shape0, shape1):\n shapes = []\n is_polygon = shape0[\"type\"] == \"polygon\"\n if is_polygon:\n shape0[\"points\"].extend(shape0[\"points\"][:2])\n shape1[\"points\"].extend(shape1[\"points\"][:2])\n\n distance = shape1[\"frame\"] - shape0[\"frame\"]\n for frame in range(shape0[\"frame\"] + 1, shape1[\"frame\"]):\n offset = (frame - shape0[\"frame\"]) / distance\n points = interpolate_position(shape0, shape1, offset)\n\n shapes.append(copy_shape(shape0, frame, points))\n\n if is_polygon:\n shape0[\"points\"] = shape0[\"points\"][:-2]\n shape1[\"points\"] = shape1[\"points\"][:-2]\n for shape in shapes:\n shape[\"points\"] = shape[\"points\"][:-2]\n\n return shapes\n\n def interpolate(shape0, shape1):\n is_same_type = shape0[\"type\"] == shape1[\"type\"]\n is_rectangle = shape0[\"type\"] == \"rectangle\"\n is_cuboid = shape0[\"type\"] == \"cuboid\"\n is_polygon = shape0[\"type\"] == \"polygon\"\n is_polyline = shape0[\"type\"] == \"polyline\"\n is_points = shape0[\"type\"] == \"points\"\n\n if not is_same_type:\n raise NotImplementedError()\n\n shapes = []\n if is_rectangle or is_cuboid:\n shapes = simple_interpolation(shape0, shape1)\n elif is_points:\n shapes = points_interpolation(shape0, shape1)\n elif is_polygon or is_polyline:\n shapes = polyshape_interpolation(shape0, shape1)\n else:\n raise NotImplementedError()\n\n return shapes\n\n if not track_shapes:\n return []\n\n if len(track_shapes) == 1:\n track_shapes[0][\"keyframe\"] = True\n return track_shapes\n\n shapes = []\n curr_frame = track_shapes[0][\"frame\"]\n end_frame = track_shapes[-1][\"frame\"]\n prev_shape = {}\n for shape in track_shapes:\n if prev_shape:\n if shape[\"frame\"] <= curr_frame:\n continue\n\n for attr in prev_shape[\"attributes\"]:\n if attr[\"spec_id\"] not in map(\n lambda el: el[\"spec_id\"], shape[\"attributes\"]\n ):\n shape[\"attributes\"].append(deepcopy(attr))\n\n if not prev_shape[\"outside\"]:\n shapes.extend(interpolate(prev_shape, shape))\n\n shape[\"keyframe\"] = True\n shapes.append(shape)\n\n curr_frame = shape[\"frame\"]\n prev_shape = shape\n\n if end_frame <= curr_frame:\n break\n\n if not prev_shape[\"outside\"]:\n shape = deepcopy(prev_shape)\n shape[\"frame\"] = end_frame\n shapes.extend(interpolate(prev_shape, shape))\n\n return shapes\n"
]
| [
[
"numpy.reshape",
"numpy.subtract",
"numpy.sqrt",
"numpy.asarray"
]
]
|
lindenmp/NormativeNeuroDev_CrossSec | [
"f5a05dd869b9190e52620db70953e1cdf5150d7d"
]
| [
"code/results_s1.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Results, section 1:\n\n# In[1]:\n\n\nimport os, sys\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nfrom scipy import stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nsys.path.append('/Users/lindenmp/Dropbox/Work/ResProjects/NormativeNeuroDev_CrossSec/code/func/')\nfrom proj_environment import set_proj_env\nfrom func import run_corr, get_fdr_p, get_cmap\n\n\n# In[3]:\n\n\ntrain_test_str = 'squeakycleanExclude' # 'squeakycleanExclude' 'trte_psychopathology'\nexclude_str = 't1Exclude' # 't1Exclude' 'fsFinalExclude'\nparc_str = 'schaefer' # 'schaefer' 'lausanne'\nparc_scale = 400 # 125 400\nprimary_covariate = 'ageAtScan1_Years'\nparcel_names, parcel_loc, drop_parcels, num_parcels, yeo_idx, yeo_labels = set_proj_env(train_test_str = train_test_str, exclude_str = exclude_str,\n parc_str = parc_str, parc_scale = parc_scale)\n\n\n# In[4]:\n\n\nos.environ['MODELDIR']\n\n\n# ## Load data pre-nispat data\n\n# In[5]:\n\n\n# Train\ndf_train = pd.read_csv(os.path.join(os.environ['NORMATIVEDIR'], 'train.csv'))\ndf_train.set_index(['bblid', 'scanid'], inplace = True); print(df_train.shape)\ndf_node_train = pd.read_csv(os.path.join(os.environ['NORMATIVEDIR'], 'resp_train.csv'))\ndf_node_train.set_index(['bblid', 'scanid'], inplace = True)\n\n# Test\ndf_test = pd.read_csv(os.path.join(os.environ['NORMATIVEDIR'], 'test.csv'))\ndf_test.set_index(['bblid', 'scanid'], inplace = True); print(df_test.shape)\ndf_node_test = pd.read_csv(os.path.join(os.environ['NORMATIVEDIR'], 'resp_test.csv'))\ndf_node_test.set_index(['bblid', 'scanid'], inplace = True)\n\n# concat\ndf = pd.concat([df_train, df_test])\ndf_node = pd.concat([df_node_train, df_node_test])\n\n\n# ## Age effects\n\n# In[6]:\n\n\n# age effect on training set\ndf_age_effect = run_corr(df_train[primary_covariate], df_node_train, typ = 'spearmanr'); df_age_effect['p_fdr'] = get_fdr_p(df_age_effect['p'])\nif parc_str == 'lausanne':\n df_age_effect.drop(my_list, axis = 0, inplace = True)\nage_alpha = 0.05\nage_filter = df_age_effect['p_fdr'].values < age_alpha\n\n\n# In[7]:\n\n\nage_filter.sum()\n\n\n# ## Load nispat outputs\n\n# In[8]:\n\n\n# Forward model\nsynth_cov_test = pd.read_csv(os.path.join(os.environ['NORMATIVEDIR'], 'forward/synth_cov_test.txt'),\n delim_whitespace = True, names=[primary_covariate, 'sex_adj'])\n\nyhat_forward = np.loadtxt(os.path.join(os.environ['NORMATIVEDIR'], 'forward/yhat.txt'), delimiter = ' ').transpose()\ndf_yhat_forward = pd.DataFrame(data = yhat_forward, index = synth_cov_test.index, columns = df_node.columns)\n\nys2_forward = np.loadtxt(os.path.join(os.environ['NORMATIVEDIR'], 'forward/ys2.txt'), delimiter = ' ').transpose()\ndf_ys2_forward = pd.DataFrame(data = ys2_forward, index = synth_cov_test.index, columns = df_node.columns)\n\n\n# In[9]:\n\n\nsmse = np.loadtxt(os.path.join(os.environ['NORMATIVEDIR'], 'smse.txt'), delimiter = ' ').transpose()\ndf_smse = pd.DataFrame(data = smse, index = df_node.columns)\n\n\n# In[10]:\n\n\nsmse_thresh = 1\nsmse_filter = df_smse.values < smse_thresh\nsmse_filter = smse_filter.reshape(-1)\n\n\n# In[11]:\n\n\nsmse_filter.sum()\n\n\n# In[12]:\n\n\ndf_yhat_forward_tmp = df_yhat_forward + (df_yhat_forward.abs().max()+1)\nprint((df_yhat_forward_tmp<0).any().any())\n\nannualized = True\nif annualized:\n elapsed_time = synth_cov_test.loc[synth_cov_test['sex_adj'] == 0,'ageAtScan1_Years'].iloc[-1] - synth_cov_test.loc[synth_cov_test['sex_adj'] == 0,'ageAtScan1_Years'].iloc[0]\n print(elapsed_time)\n x = (df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 0].iloc[-1,:] / df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 0].iloc[0,:]) - 1\n df_yhat_tmp1 = (np.power(1+x.abs(),1/elapsed_time)-1)*100\n df_yhat_tmp1[x<0] = df_yhat_tmp1[x<0]*-1\n\n x = (df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 1].iloc[-1,:] / df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 1].iloc[0,:]) - 1\n df_yhat_tmp2 = (np.power(1+x.abs(),1/elapsed_time)-1)*100\n df_yhat_tmp2[x<0] = df_yhat_tmp2[x<0]*-1\nelse:\n df_yhat_tmp1 = ((df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 0].iloc[-1,:] / df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 0].iloc[0,:]) - 1) * 100\n df_yhat_tmp2 = ((df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 1].iloc[-1,:] / df_yhat_forward_tmp[synth_cov_test['sex_adj'] == 1].iloc[0,:]) - 1) * 100\n\ndf_yhat_diff = pd.concat((df_yhat_tmp1, df_yhat_tmp2), axis = 1)\ndf_yhat_diff.head()\n\n\n# # Plots\n\n# In[13]:\n\n\nif not os.path.exists(os.environ['FIGDIR']): os.makedirs(os.environ['FIGDIR'])\nos.chdir(os.environ['FIGDIR'])\nsns.set(style='white', context = 'paper', font_scale = 1)\ncmap = get_cmap('pair')\n\nmetrics = ('ct', 'str', 'ac', 'mc')\nmetrics_label_short = ('Thickness', 'Strength', 'Ave. ctrb.', 'Mod. ctrb.')\nmetrics_label = ('Thickness', 'Strength', 'Average controllability', 'Modal controllability')\nprint(metrics)\n\n\n# ## Brain plots nispat\n\n# In[14]:\n\n\nimport matplotlib.image as mpimg\nfrom brain_plot_func import roi_to_vtx, brain_plot\n\n\n# In[15]:\n\n\nif parc_str == 'schaefer':\n subject_id = 'fsaverage'\nelif parc_str == 'lausanne':\n subject_id = 'lausanne125'\n\n\n# In[16]:\n\n\nget_ipython().run_line_magic('pylab', 'qt')\n\n\n# 0 = Male, 1 = Female\n\n# In[17]:\n\n\nfor metric in metrics:\n for hemi in ('lh', 'rh'):\n for sx in ('sex0','sex1'):\n # Plots of age correlation\n fig_str = hemi + '_' + metric + '_age_' + sx + '_frwd'\n if sx == 'sex0':\n roi_data = df_yhat_diff.loc[:,0].filter(regex = metric, axis = 0).values\n elif sx == 'sex1':\n roi_data = df_yhat_diff.loc[:,1].filter(regex = metric, axis = 0).values\n age_filt = df_age_effect.filter(regex = metric, axis = 0)['p_fdr'].values < age_alpha\n smse_filt = df_smse.filter(regex = metric, axis = 0).values < smse_thresh\n smse_filt = smse_filt.reshape(-1)\n region_filt = np.logical_and(age_filt,smse_filt)\n\n roi_data[~region_filt] = -1000\n if metric == 'ct':\n center_anchor = 3\n if metric == 'str':\n center_anchor = 30\n elif metric == 'ac':\n center_anchor = 4\n elif metric == 'mc':\n center_anchor = 1\n\n if region_filt.any():\n if subject_id == 'lausanne125':\n parc_file = os.path.join('/Applications/freesurfer/subjects/', subject_id, 'label', hemi + '.myaparc_' + str(parc_scale) + '.annot')\n elif subject_id == 'fsaverage':\n parc_file = os.path.join('/Users/lindenmp/Dropbox/Work/ResProjects/NeuroDev_NetworkControl/figs/Parcellations/FreeSurfer5.3/fsaverage/label/',\n hemi + '.Schaefer2018_' + str(parc_scale) + 'Parcels_17Networks_order.annot')\n\n if subject_id == 'lausanne125' and metric == 'ct':\n brain_plot(roi_data, parcel_names[parcel_loc == 1], parc_file, fig_str, subject_id = subject_id, hemi = hemi, color = 'coolwarm', center_anchor = center_anchor)\n else:\n brain_plot(roi_data, parcel_names, parc_file, fig_str, subject_id = subject_id, hemi = hemi, color = 'coolwarm', center_anchor = center_anchor)\n else:\n print('Nothing significant')\n\n\n# # Figures\n\n# In[18]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# Figure 2C (top)\n\n# In[19]:\n\n\n# Age effects\nf, axes = plt.subplots(2, 4)\n# f.suptitle('age')\nf.set_figwidth(4)\nf.set_figheight(2)\nplt.subplots_adjust(wspace=0, hspace=0)\n\nmy_str = '_sex1_frwd'\n# column 0:\nfig_str = 'lh_ct_age'+my_str+'.png'\ntry:\n# axes[0,0].set_title('Thickness (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,0].imshow(image); axes[0,0].axis('off')\nexcept FileNotFoundError: axes[0,0].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,0].imshow(image); axes[1,0].axis('off')\nexcept FileNotFoundError: axes[1,0].axis('off')\n \n# column 1:\nfig_str = 'rh_ct_age'+my_str+'.png'\ntry:\n# axes[0,1].set_title('Thickness (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,1].imshow(image); axes[0,1].axis('off')\nexcept FileNotFoundError: axes[0,1].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,1].imshow(image); axes[1,1].axis('off')\nexcept FileNotFoundError: axes[1,1].axis('off')\n \n# column 2:\nfig_str = 'lh_str_age'+my_str+'.png'\ntry:\n# axes[0,2].set_title('Ave. ctrb. (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,2].imshow(image); axes[0,2].axis('off')\nexcept FileNotFoundError: axes[0,2].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,2].imshow(image); axes[1,2].axis('off')\nexcept FileNotFoundError: axes[1,2].axis('off')\n \n# column 3:\nfig_str = 'rh_str_age'+my_str+'.png'\ntry:\n# axes[0,3].set_title('Ave. ctrb. (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,3].imshow(image); axes[0,3].axis('off')\nexcept FileNotFoundError: axes[0,3].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,3].imshow(image); axes[1,3].axis('off')\nexcept FileNotFoundError: axes[1,3].axis('off')\n\nplt.show()\nf.savefig('brain_age_ct-str'+my_str+'.svg', dpi = 1200, bbox_inches = 'tight', pad_inches = 0)\n\n\n# Figure 2C (bottom)\n\n# In[20]:\n\n\n# Age effects\nf, axes = plt.subplots(2, 4)\n# f.suptitle('age')\nf.set_figwidth(4)\nf.set_figheight(2)\nplt.subplots_adjust(wspace=0, hspace=0)\n\nmy_str = '_sex1_frwd'\n# column 0:\nfig_str = 'lh_ac_age'+my_str+'.png'\ntry:\n# axes[0,0].set_title('Thickness (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,0].imshow(image); axes[0,0].axis('off')\nexcept FileNotFoundError: axes[0,0].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,0].imshow(image); axes[1,0].axis('off')\nexcept FileNotFoundError: axes[1,0].axis('off')\n \n# column 1:\nfig_str = 'rh_ac_age'+my_str+'.png'\ntry:\n# axes[0,1].set_title('Thickness (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,1].imshow(image); axes[0,1].axis('off')\nexcept FileNotFoundError: axes[0,1].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,1].imshow(image); axes[1,1].axis('off')\nexcept FileNotFoundError: axes[1,1].axis('off')\n \n# column 2:\nfig_str = 'lh_mc_age'+my_str+'.png'\ntry:\n# axes[0,2].set_title('Ave. ctrb. (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,2].imshow(image); axes[0,2].axis('off')\nexcept FileNotFoundError: axes[0,2].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,2].imshow(image); axes[1,2].axis('off')\nexcept FileNotFoundError: axes[1,2].axis('off')\n \n# column 3:\nfig_str = 'rh_mc_age'+my_str+'.png'\ntry:\n# axes[0,3].set_title('Ave. ctrb. (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,3].imshow(image); axes[0,3].axis('off')\nexcept FileNotFoundError: axes[0,3].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,3].imshow(image); axes[1,3].axis('off')\nexcept FileNotFoundError: axes[1,3].axis('off')\n\nplt.show()\nf.savefig('brain_age_ac-mc'+my_str+'.svg', dpi = 1200, bbox_inches = 'tight', pad_inches = 0)\n\n\n# eFigure 1A\n\n# In[21]:\n\n\n# Age effects\nf, axes = plt.subplots(2, 8)\n# f.suptitle('age')\nf.set_figwidth(8)\nf.set_figheight(2)\nplt.subplots_adjust(wspace=0, hspace=0)\n\nmy_str = '_sex0_frwd'\n# column 0:\nfig_str = 'lh_ct_age'+my_str+'.png'\ntry:\n# axes[0,0].set_title('Thickness (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,0].imshow(image); axes[0,0].axis('off')\nexcept FileNotFoundError: axes[0,0].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,0].imshow(image); axes[1,0].axis('off')\nexcept FileNotFoundError: axes[1,0].axis('off')\n \n# column 1:\nfig_str = 'rh_ct_age'+my_str+'.png'\ntry:\n# axes[0,1].set_title('Thickness (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,1].imshow(image); axes[0,1].axis('off')\nexcept FileNotFoundError: axes[0,1].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,1].imshow(image); axes[1,1].axis('off')\nexcept FileNotFoundError: axes[1,1].axis('off')\n \n# column 2:\nfig_str = 'lh_str_age'+my_str+'.png'\ntry:\n# axes[0,2].set_title('Degree (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,2].imshow(image); axes[0,2].axis('off')\nexcept FileNotFoundError: axes[0,2].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,2].imshow(image); axes[1,2].axis('off')\nexcept FileNotFoundError: axes[1,2].axis('off')\n \n# column 3:\nfig_str = 'rh_str_age'+my_str+'.png'\ntry:\n# axes[0,3].set_title('Degree (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,3].imshow(image); axes[0,3].axis('off')\nexcept FileNotFoundError: axes[0,3].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,3].imshow(image); axes[1,3].axis('off')\nexcept FileNotFoundError: axes[1,3].axis('off')\n \n# column 4:\nfig_str = 'lh_ac_age'+my_str+'.png'\ntry:\n# axes[0,4].set_title('Ave. ctrb. (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,4].imshow(image); axes[0,4].axis('off')\nexcept FileNotFoundError: axes[0,4].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,4].imshow(image); axes[1,4].axis('off')\nexcept FileNotFoundError: axes[1,4].axis('off')\n \n# column 5:\nfig_str = 'rh_ac_age'+my_str+'.png'\ntry:\n# axes[0,5].set_title('Ave. ctrb. (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,5].imshow(image); axes[0,5].axis('off')\nexcept FileNotFoundError: axes[0,5].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,5].imshow(image); axes[1,5].axis('off')\nexcept FileNotFoundError: axes[1,5].axis('off')\n \n# column 6:\nfig_str = 'lh_mc_age'+my_str+'.png'\ntry:\n# axes[0,6].set_title('Mod. ctrb. (left)')\n image = mpimg.imread('lat_' + fig_str); axes[0,6].imshow(image); axes[0,6].axis('off')\nexcept FileNotFoundError: axes[0,6].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,6].imshow(image); axes[1,6].axis('off')\nexcept FileNotFoundError: axes[1,6].axis('off')\n \n# column 7:\nfig_str = 'rh_mc_age'+my_str+'.png'\ntry:\n# axes[0,7].set_title('Mod. ctrb. (right)')\n image = mpimg.imread('lat_' + fig_str); axes[0,7].imshow(image); axes[0,7].axis('off')\nexcept FileNotFoundError: axes[0,7].axis('off')\ntry:\n image = mpimg.imread('med_' + fig_str); axes[1,7].imshow(image); axes[1,7].axis('off')\nexcept FileNotFoundError: axes[1,7].axis('off')\n\nplt.show()\nf.savefig('brain_age'+my_str+'.svg', dpi = 1200, bbox_inches = 'tight', pad_inches = 0)\n\n"
]
| [
[
"pandas.concat",
"numpy.logical_and",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.image.imread",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
]
]
|
RuslanAgishev/crazyflie_ros | [
"892426ce70cdb4708dacd1933877a6e8f66c9286",
"892426ce70cdb4708dacd1933877a6e8f66c9286"
]
| [
"crazyflie_demo/scripts/adaptive_formation_control/gradient_interactive.py",
"crazyflie_demo/scripts/imp_cargo.py"
]
| [
"#!/usr/bin/env python\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections\nfrom scipy.ndimage.morphology import distance_transform_edt as bwdist\nfrom math import *\nimport random\nfrom impedance_modeles import *\nimport time\n\nfrom progress.bar import FillingCirclesBar\nfrom tasks import *\nfrom threading import Thread\nfrom multiprocessing import Process\nimport os\nimport swarmlib\n\n\"\"\" ROS \"\"\"\nimport rospy\nfrom geometry_msgs.msg import TransformStamped\n\ndef poly_area(x,y):\n # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n # https://en.wikipedia.org/wiki/Shoelace_formula\n return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n\ndef meters2grid(pose_m, nrows=500, ncols=500):\n # [0, 0](m) -> [250, 250]\n # [1, 0](m) -> [250+100, 250]\n # [0,-1](m) -> [250, 250-100]\n pose_on_grid = np.array(pose_m)*100 + np.array([ncols/2, nrows/2])\n return np.array( pose_on_grid, dtype=int)\ndef grid2meters(pose_grid, nrows=500, ncols=500):\n # [250, 250] -> [0, 0](m)\n # [250+100, 250] -> [1, 0](m)\n # [250, 250-100] -> [0,-1](m)\n pose_meters = ( np.array(pose_grid) - np.array([ncols/2, nrows/2]) ) / 100.0\n return pose_meters\n\ndef gradient_planner(f, current_point, ncols=500, nrows=500):\n \"\"\"\n GradientBasedPlanner : This function computes the next_point\n given current location, goal location and potential map, f.\n It also returns mean velocity, V, of the gradient map in current point.\n \"\"\"\n [gy, gx] = np.gradient(-f);\n iy, ix = np.array( meters2grid(current_point), dtype=int )\n w = 10 # smoothing window size for gradient-velocity\n vx = np.mean(gx[ix-int(w/2) : ix+int(w/2), iy-int(w/2) : iy+int(w/2)])\n vy = np.mean(gy[ix-int(w/2) : ix+int(w/2), iy-int(w/2) : iy+int(w/2)])\n V = np.array([vx, vy])\n dt = 0.1 / norm(V);\n next_point = current_point + dt*V;\n\n return next_point, V\n\ndef combined_potential(obstacles_poses, goal, repulsive_coef=200, attractive_coef=1./700, nrows=500, ncols=500):\n \"\"\" Repulsive potential \"\"\"\n obstacles_map = map(obstacles_poses)\n goal = meters2grid(goal)\n d = bwdist(obstacles_map==0);\n d2 = (d/100.) + 1; # Rescale and transform distances\n d0 = 2;\n nu = repulsive_coef;\n repulsive = nu*((1./d2 - 1./d0)**2);\n repulsive [d2 > d0] = 0;\n \"\"\" Attractive potential \"\"\"\n [x, y] = np.meshgrid(np.arange(ncols), np.arange(nrows))\n xi = attractive_coef;\n attractive = xi * ( (x - goal[0])**2 + (y - goal[1])**2 );\n \"\"\" Combine terms \"\"\"\n f = attractive + repulsive;\n return f\n\ndef map(obstacles_poses, nrows=500, ncols=500):\n \"\"\" Obstacles map \"\"\"\n obstacles_map = np.zeros((nrows, ncols));\n [x, y] = np.meshgrid(np.arange(ncols), np.arange(nrows))\n for pose in obstacles_poses:\n pose = meters2grid(pose)\n x0 = pose[0]; y0 = pose[1]\n # cylindrical obstacles\n t = ((x - x0)**2 + (y - y0)**2) < (100*R_obstacles)**2\n obstacles_map[t] = 1;\n # rectangular obstacles\n obstacles_map[400:, 130:150] = 1;\n obstacles_map[130:150, :200] = 1;\n obstacles_map[330:380, 300:] = 1;\n return obstacles_map\n\ndef move_obstacles(obstacles_poses, obstacles_goal_poses):\n \"\"\" All of the obstacles tend to go to the origin, (0,0) - point \"\"\"\n # for pose in obstacles_poses:\n # dx = random.uniform(0, 0.03); dy = random.uniform(0,0.03);\n # pose[0] -= np.sign(pose[0])*dx; pose[1] -= np.sign(pose[1])*dy;\n\n \"\"\" Each obstacles tends to go to its selected goal point with random speed \"\"\"\n for p in range(len(obstacles_poses)):\n pose = obstacles_poses[p]; goal = obstacles_goal_poses[p]\n dx, dy = (goal - pose) / norm(goal-pose) * 0.05#random.uniform(0,0.05)\n pose[0] += dx; pose[1] += dy;\n\n return obstacles_poses\n\n\ndef formation(num_robots, leader_des, v, R_swarm):\n if num_robots<=1: return []\n u = np.array([-v[1], v[0]])\n des4 = leader_des - v*R_swarm*sqrt(3) # follower\n if num_robots==2: return [des4]\n des2 = leader_des - v*R_swarm*sqrt(3)/2 + u*R_swarm/2 # follower\n des3 = leader_des - v*R_swarm*sqrt(3)/2 - u*R_swarm/2 # follower\n if num_robots==3: return [des2, des3]\n \n return [des2, des3, des4]\n\n\"\"\" initialization \"\"\"\nanimate = 1 # show 1-each frame or 0-just final configuration\nrandom_obstacles = 1 # randomly distributed obstacles on the map\nnum_random_obstacles = 8 # number of random circular obstacles on the map\nnum_robots = 4 # number of drones in formation\nmoving_obstacles = 1 # 0-static or 1-dynamic obstacles\nimpedance = 1 # impedance links between the leader and followers (leader's velocity)\nformation_gradient = 1 # followers are attracting to their formation position and repelling from obstacles\ndraw_gradients = 1 # 1-gradients plot, 0-grid\npostprocessing = 1\n\"\"\" human guided swarm params \"\"\"\ninteractive = 0 # 1-human guided swarm, 0-potential fields as a planner to goal pose\nhuman_name = 'palm' # vicon mocap object\npos_coef = 3.0 # scale of the leader's movement relatively to the human operator\ninitialized = False # is always inits with False: for relative position control\nmax_its = 1000 if interactive else 100 # max number of allowed iters for formation to reach the goal\n# movie writer\nprogress_bar = FillingCirclesBar('Number of Iterations', max=max_its)\nshould_write_movie = 0; movie_file_name = os.getcwd()+'/videos/output.avi'\nmovie_writer = get_movie_writer(should_write_movie, 'Simulation Potential Fields', movie_fps=10., plot_pause_len=0.01)\n\nR_obstacles = 0.05 # [m]\nrepulsive_coef = 200\nattractive_coef = 1./700\nR_swarm = 0.3 # [m]\nstart = np.array([-1.8, 1.8]); goal = np.array([1.8, -1.8])\nV0 = (goal - start) / norm(goal-start) # initial movement direction, |V0| = 1\nU0 = np.array([-V0[1], V0[0]]) / norm(V0) # perpendicular to initial movement direction, |U0|=1\nimp_pose_prev = np.array( [0,0] )\nimp_vel_prev = np.array( [0,0] )\nimp_time_prev = time.time()\n\nif random_obstacles:\n obstacles_poses = np.random.uniform(low=-2.5, high=2.5, size=(num_random_obstacles,2)) # randomly located obstacles\n obstacles_goal_poses = np.random.uniform(low=-1.3, high=1.3, size=(num_random_obstacles,2)) # randomly located obstacles goal poses\nelse:\n obstacles_poses = np.array([[-2, 1], [1.5, 0.5], [-1.0, 1.5], [0.1, 0.1], [1, -2], [-1.8, -1.8]]) # 2D - coordinates [m]\n obstacles_goal_poses = np.array([[-0, 0], [0.0, 0.0], [ 0.0, 0.0], [0.0, 0.0], [0, 0], [ 0.0, 0.0]])\n\nif interactive:\n def human_pos_callback(data):\n global human_pose\n global human_yaw\n human_pose = np.array( [data.transform.translation.x, data.transform.translation.y, data.transform.translation.z] )\n # human_yaw = np.array( [data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w] )\n\n pos_sub = rospy.Subscriber('/vicon/' + human_name + '/' + human_name, TransformStamped, human_pos_callback)\n time.sleep(1)\n\n\n\"\"\" Main loop \"\"\"\nrospy.init_node('gradient_interactive', anonymous=True)\n# drones polygonal formation\nroute1 = start # leader\ncurrent_point1 = start\nrobots_poses = [start] + formation(num_robots, start, V0, R_swarm)\nroutes = [route1] + robots_poses[1:]\ncentroid_route = [ sum([p[0] for p in robots_poses])/len(robots_poses), sum([p[1] for p in robots_poses])/len(robots_poses) ]\ndes_poses = robots_poses\nvels = [];\nfor r in range(num_robots): vels.append([])\nnorm_vels = [];\nfor r in range(num_robots): norm_vels.append([])\n\n# variables for postprocessing and performance estimation\narea_array = []\nstart_time = time.time()\n\nfig = plt.figure(figsize=(10, 10))\nwith movie_writer.saving(fig, movie_file_name, max_its) if should_write_movie else get_dummy_context_mgr():\n for i in range(max_its):\n if moving_obstacles: obstacles_poses = move_obstacles(obstacles_poses, obstacles_goal_poses)\n\n \"\"\" Leader's pose update \"\"\"\n if interactive:\n f1 = combined_potential(obstacles_poses, current_point1, repulsive_coef=repulsive_coef, attractive_coef=attractive_coef)\n # human palm pose and velocity using Vicon motion capture\n if not initialized:\n human_pose_init = human_pose[:2]\n drone1_pose_init = start\n initialized = True\n dx, dy = human_pose[:2] - human_pose_init\n des_poses[0] = np.array([ drone1_pose_init[0] + pos_coef*dx, drone1_pose_init[1] + pos_coef*dy ])\n vels[0] = hum_vel(human_pose)\n # TODO: implemete rotation of the swarm relatively to human orientation: change direction variable\n # for instance: direction=[cos(hum_yaw), sin(hum_yaw)]\n # direction = np.array( des_poses[0] - human_pose_init ) / norm(des_poses[0] - human_pose_init)\n else:\n f1 = combined_potential(obstacles_poses, goal)\n des_poses[0], vels[0] = gradient_planner(f1, current_point1)\n norm_vels[0].append(norm(vels[0]))\n\n # drones polygonal formation\n direction = ( goal - des_poses[0] ) / norm(goal - des_poses[0])\n des_poses[1:] = formation(num_robots, des_poses[0], direction, R_swarm)\n v = direction; u = np.array([-v[1], v[0]])\n\n if impedance:\n # drones positions are corrected according to the impedance model\n # based on leader's velocity\n imp_pose, imp_vel, imp_time_prev = velocity_imp(vels[0], imp_pose_prev, imp_vel_prev, imp_time_prev, mode='critically_damped')\n imp_pose_prev = imp_pose\n imp_vel_prev = imp_vel\n\n imp_scale = 0.1 if interactive else 0.012\n # des_poses[0] += 0.1*imp_scale * imp_pose\n if num_robots>=2:\n des_poses[1] -= 2*np.dot(imp_scale*imp_pose, u)/norm(u) # impedance correction term is projected in u-vector direction\n if num_robots>=3:\n des_poses[2] += 2*np.dot(imp_scale*imp_pose, u)/norm(u) # u-vector direction\n if num_robots>=4:\n des_poses[3] -= np.dot(imp_scale*imp_pose, v)/norm(v) # v-vector direction\n\n if formation_gradient:\n # following drones are attracting to desired points - vertices of the polygonal formation\n for p in range(1,num_robots):\n f = combined_potential(obstacles_poses, des_poses[p])\n des_poses[p], vels[p] = gradient_planner(f, des_poses[p])\n norm_vels[p].append(norm(vels[p]))\n\n for r in range(num_robots):\n routes[r] = np.vstack([routes[r], des_poses[r]])\n\n current_point1 = des_poses[0] # update current point of the leader\n\n pp = des_poses\n centroid = [ sum([p[0] for p in pp])/len(pp), sum([p[1] for p in pp])/len(pp) ]\n centroid_route = np.vstack([centroid_route, centroid])\n dist_to_goal = norm(centroid - goal)\n if dist_to_goal < 1.5*R_swarm:\n print('\\nReached the goal')\n break\n\n progress_bar.next()\n plt.cla()\n\n draw_map(start, goal, obstacles_poses, R_obstacles, f1, draw_gradients=draw_gradients)\n draw_robots(current_point1, routes, num_robots, robots_poses, centroid, vels[0])\n if animate:\n plt.draw()\n plt.pause(0.01)\n\n if should_write_movie:\n movie_writer.grab_frame()\n # print('Current simulation time: ', time.time()-start_time)\n print('\\nDone')\n progress_bar.finish()\n end_time = time.time()\n print('Simulation execution time: ', round(end_time-start_time,2))\n plt.show()\n\nif postprocessing:\n plt.figure()\n plt.title(\"Centroid's trajectory\")\n plt.plot(centroid_route[:,0], centroid_route[:,1])\n for route in routes:\n plt.plot(route[:,0], route[:,1], '--')\n plt.grid()\n\n plt.figure()\n plt.title(\"1st Drone velocity, <V>=\"+str(np.mean(np.array(norm_vels[0])))+\" m/s\")\n for r in range(num_robots):\n plt.plot(norm_vels[r])\n plt.xlabel('time')\n plt.ylabel('vel1, [m/s]')\n plt.grid()\n\n for i in range(len(routes[0])):\n X = np.array([]); Y = np.array([])\n for r in range(num_robots):\n X = np.append( X, routes[r][i,0] )\n Y = np.append( Y, routes[r][i,1] )\n area_array.append(poly_area(X,Y))\n\n plt.figure()\n plt.title(\"Area of robots' formation\")\n plt.plot(area_array)\n plt.xlabel('time')\n plt.ylabel('Formation area, [m^2]')\n plt.grid()\n plt.draw()\n plt.pause(1)\n input(\"Hit Enter To Close\")\n plt.close('all')\n\n\n# TODO:\n# local minimum problem (FM2 - algorithm: https://pythonhosted.org/scikit-fmm/)\n# impedance controlled shape of the formation: area(velocity)\n# postprocessing: trajectories smoothness, etc. compare imp modeles:\n# oscillation, underdamped, critically damped, overdamped\n# velocity plot for all drones, acc, jerk ?\n",
"#!/usr/bin/env python\n\nfrom __future__ import division\nimport rospy\nimport tf\nfrom geometry_msgs.msg import PoseStamped, TransformStamped\nfrom math import *\nimport time\nfrom std_srvs.srv import Empty\nfrom tf2_msgs.msg import TFMessage\nfrom time import sleep\nimport message_filters\nimport matplotlib.pyplot as plt\n\nfrom message_filters import TimeSynchronizer, Subscriber\nimport numpy as np\n\nimport swarmlib\n\nfrom crazyflie_driver.msg import FullState\nimport geometry_msgs\nimport tf_conversions\n\nimport crazyflie\nfrom sensor_msgs.msg import Joy\n\nnp.set_printoptions(formatter={'float': '{: 0.2f}'.format})\n\nimp_pose_prev = np.array( [0,0,0] )\nimp_vel_prev = np.array( [0,0,0] )\nimp_time_prev = time.time()\nrotated = False\nimpedance = False\nDIST = 0.5 # start distance between drones\n\n\ndef joy_cb(joystick):\n\tglobal raw\n\tglobal column\n\tglobal width\n\tglobal imp_on\n\tglobal imp_off\n\traw = joystick.buttons[3]\n\tcolumn = joystick.buttons[2]\n\twidth = joystick.axes[1]\n\timp_on = joystick.buttons[0]\n\timp_off = joystick.buttons[1]\n\n\n# def tag_game(human, cf1):\ndef tag_game(human, cf1, cf2):\n\thuman_pose = swarmlib.get_coord(human)\n\tdrone1_pose = swarmlib.get_coord(cf1)\n\tdrone2_pose = swarmlib.get_coord(cf2)\n\n\thum_vel = swarmlib.hum_vel(human_pose)\n\tl = 1.0\n\t# HUMAN IMPEDANCE\n\tglobal imp_pose_prev\n\tglobal imp_vel_prev\n\tglobal imp_time_prev\n\tglobal rotated\n\tglobal impedance\n\tglobal DIST\n\n\t# define joystick buttons state\n\ttry:\n\t\tif raw==1:\n\t\t\trotated = True\n\t\telif column==1:\n\t\t\trotated = False\n\t\tif width == 1:\n\t\t\tDIST += 0.01\n\t\telif width == -1:\n\t\t\tDIST -= 0.01\n\t\telif imp_on == 1:\n\t\t\timpedance = 1\n\t\telif imp_off == 1:\n\t\t\timpedance = 0\n\texcept:\n\t\tpass\n\n\tif impedance:\n\t\tprint('Impedance control')\n\t\timp_pose, imp_vel, imp_time_prev = swarmlib.impedance_human(hum_vel, imp_pose_prev, imp_vel_prev, imp_time_prev)\n\t\timp_pose_prev = imp_pose\n\t\timp_vel_prev = imp_vel\n\telse:\n\t\tprint('No impedance control')\n\t\timp_pose = np.array([0,0,0])\n\n\tif rotated:\n\t\tdrone1_pose_goal = np.array([ human_pose[0] - l,\n\t\t\t\t\t\t\t\t \t human_pose[1],\n\t\t\t\t\t\t\t\t human_pose[2] + 0.1])\n\t\tdrone2_pose_goal = np.array([ drone1_pose_goal[0] - DIST,\n\t\t\t\t\t\t\t\t\t drone1_pose_goal[1],\n\t\t\t\t\t\t\t\t\t human_pose[2] + 0.1])\n\t\tdrone1_pose_goal[0] += abs( imp_pose[1]*0.5 )\n\t\tdrone2_pose_goal[0] -= abs( imp_pose[1]*0.5 )\n\n\telse:\n\t\tdrone1_pose_goal = np.array([ human_pose[0] - l,\n\t\t\t\t\t\t\t\t\t human_pose[1] - DIST/2,\n\t\t\t\t\t\t\t\t\t human_pose[2] + 0.1])\n\t\tdrone2_pose_goal = np.array([ drone1_pose_goal[0],\n\t\t\t\t\t\t\t\t\t drone1_pose_goal[1] + DIST,\n\t\t\t\t\t\t\t\t\t human_pose[2] + 0.1])\n\t\tdrone1_pose_goal[1] -= abs( imp_pose[0]*0.4 )\n\t\tdrone2_pose_goal[1] += abs( imp_pose[0]*0.4 )\n\n\t# TO FLY\n\tswarmlib.publish_goal_pos(drone1_pose_goal, 0, \"/crazyflie13\")\n\tswarmlib.publish_goal_pos(drone2_pose_goal, 0, \"/crazyflie15\")\n\n\t# TO VISUALIZE\n\tswarmlib.publish_pose(drone1_pose_goal, 0, \"drone1_pose_goal\")\n\tswarmlib.publish_pose(drone2_pose_goal, 0, \"drone2_pose_goal\")\n\tswarmlib.publish_pose(human_pose, 0, \"human_pose\")\n\n\ndef follower():\n\tstart_time = time.time()\n\n\thuman_sub = message_filters.Subscriber('/vicon/human/human', TransformStamped)\n\tcf1_sub = message_filters.Subscriber('/vicon/crazyflie13/crazyflie13', TransformStamped)\n\tcf2_sub = message_filters.Subscriber('/vicon/crazyflie15/crazyflie15', TransformStamped)\n\tjoy_sub = rospy.Subscriber(\"/joy\", Joy, joy_cb)\n\t\n\tts = message_filters.ApproximateTimeSynchronizer([human_sub, cf1_sub, cf2_sub], 10, 5)\n\n\tts.registerCallback(tag_game)\n\trospy.spin()\n\n\nif __name__ == '__main__':\n\n\trospy.init_node('follow_multiple', anonymous=True)\n\n\n\trospy.loginfo(\"Takeoff\")\n\tHEIGHT = 0.3\n\tcf1 = crazyflie.Crazyflie(\"crazyflie13\", \"/vicon/crazyflie13/crazyflie13\")\n\tcf1.setParam(\"commander/enHighLevel\", 1)\n\tcf1.takeoff(targetHeight = HEIGHT, duration = 1.0)\n\tcf2 = crazyflie.Crazyflie(\"crazyflie15\", \"/vicon/crazyflie15/crazyflie15\")\n\tcf2.setParam(\"commander/enHighLevel\", 1)\n\tcf2.takeoff(targetHeight = HEIGHT, duration = 1.0)\n\ttime.sleep(3.0)\n\n\n\trospy.loginfo(\"Following human!\")\n\ttry:\n\t\tfollower()\n\texcept KeyboardInterrupt:\n\t\tpass\n\n\n\trospy.loginfo(\"Try to land\")\n\n\ttry:\n\t\tcf1.stop()\n\texcept:\n\t\tpass\n\ttry:\n\t\tcf2.stop()\n\texcept:\n\t\tpass\n\n"
]
| [
[
"numpy.dot",
"matplotlib.pyplot.plot",
"numpy.roll",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"scipy.ndimage.morphology.distance_transform_edt",
"numpy.gradient",
"matplotlib.pyplot.cla",
"numpy.linalg.norm",
"matplotlib.pyplot.draw",
"numpy.random.uniform",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause",
"numpy.vstack"
],
[
"numpy.set_printoptions",
"numpy.array"
]
]
|
Huzzi-Aliaas/Internship_HB2021 | [
"f33bbc1fcee22b54b55d94eba166c9e409e103f9"
]
| [
"results/numExp00_NSE/main_NSE_test.py"
]
| [
"import sys; sys.path.append('../../')\nimport numpy as np\nfrom gnse.solver import SolverBaseClass, SimpleSplitStepSolver\nfrom gnse.tools import plot_evolution\nfrom gnse.config import FTFREQ\n\n\ndef main():\n # -- SET PARAMETERS FOR COMPUTATIONAL DOMAIN\n tMax = 40.0 # (fs) bound for time mesh\n Nt = 2 ** 14 # (-) number of sample points: t-axis\n zMax = np.pi # (micron) upper limit for propagation routine\n Nz = 1000 # (-) number of sample points: z-axis\n nSkip = 2 # (-) keep only every nskip-th system state\n\n # -- SET FIBER PARAMETERS\n b2 = -1.0 # (fs^2/micron)\n beta = lambda w: 0.5 * b2 * w * w # (1/micron)\n beta1 = lambda w: b2 * w # (fs/micron)\n beta2 = lambda w: b2 # (fs^2/micron)\n gamma = 1e-8 # (W/micron)\n\n # -- SET PULSE PARAMETERS\n c0 = 0.29979 # (fs/micron) free space speed of light\n t0 = 1.0 # (fs) pulse duration\n\n P0 = np.abs(beta2(0)) / t0 / t0 / gamma\n u_sol = (\n lambda t, z: 2 * np.sqrt(P0) * np.exp(0.5j * gamma * P0 * z) / np.cosh(t / t0)\n )\n\n # -- INITIALIZE COMPUTATIONAL DOMAIN\n t = np.linspace(-tMax, tMax, Nt, endpoint=False)\n w = FTFREQ(t.size, d=t[1] - t[0]) * 2 * np.pi\n z = np.linspace(0, zMax, Nz + 1)\n\n # -- INITIALIZE SOLVER\n my_solver = SimpleSplitStepSolver(z, t, beta(w), gamma, nSkip=nSkip)\n\n # -- SET INITIAL CONDITION AND RUN\n A0_t = u_sol(t, 0)\n my_solver.solve(A0_t)\n\n # -- SHOW RESULTS\n plot_evolution(\n my_solver.z, my_solver.t, my_solver.utz, tLim=(-10, 10), wLim=(-20, 20)\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.sqrt",
"numpy.exp",
"numpy.cosh",
"numpy.linspace"
]
]
|
robotics-4-all/tektrain-robot-sw | [
"3a420f1c47e1cdcca76361c0a921a678f31e1ec1"
]
| [
"pidevices/sensors/sharp_gp20axxx0f.py"
]
| [
"from ..exceptions import OutOfRange\nfrom .distance_sensor import DistanceSensor\nfrom scipy import interpolate\nimport numpy\nimport time\n\n\nclass GP2Y0AxxxK0F(DistanceSensor):\n \"\"\"Sharp gp2y0axxxk0f family of ir distance sensors extends \n :class:`DistanceSensor`.\n \n Args:\n datasheet_data: A 2d numpy array with the measurements from the \n datasheet. The array goes from max voltage to min voltage and \n every entry is of type voltage, cm.\n adc: Instance of any adc class. \n interval (float): Interval between consecutive read.\n \"\"\"\n\n def __init__(self, datasheet_data, \n adc, interval, \n name='', max_data_length=0):\n \"\"\"Constructor\"\"\"\n\n super(GP2Y0AxxxK0F, self).__init__(name, max_data_length)\n self._min_volt = datasheet_data[len(datasheet_data) - 1, 0]\n self._max_volt = datasheet_data[0, 0]\n self.adc = adc\n self._interpol(datasheet_data)\n self._interval = interval\n\n self.start()\n\n @property\n def adc(self):\n \"\"\"Adc instance of any adc class.\"\"\"\n return self._adc\n\n @adc.setter\n def adc(self, value):\n \"\"\"Set adc\"\"\"\n self._adc = value\n\n def set_channel(self, channel):\n \"\"\"Set the adc channel\"\"\"\n if 0 <= channel and channel < 4:\n self._channel = channel\n else:\n self._channel = 0\n\n def _interpol(self, data):\n self._f_int = interpolate.interp1d(data[:, 0],\n data[:, 1])\n\n def start(self):\n \"\"\"Init hardware and os resources.\"\"\"\n\n if not len(self.adc.hardware_interfaces):\n self.adc.start()\n\n def read(self, n=1):\n \"\"\"Read a measurment.\n \n The result comes from the average of n measurments\n\n Args:\n n (int): The number of consecutive measurments. Defaults to 1.\n\n Returns:\n The measured distance.\n\n Raises:\n OutOfRange: If the measurment is out of min or max distance.\n \"\"\"\n\n adc_val = self.adc.read(channel=self._channel)\n\n # Check thresholds\n if adc_val < self._min_volt:\n adc_val = self._min_volt\n if adc_val > self._max_volt:\n adc_val = self._max_volt\n #raise OutOfRange(\"Out of min distance.\")\n\n adc_val = max(adc_val, self._min_volt)\n adc_val = min(adc_val, self._max_volt)\n\n return round(self._f_int(adc_val).item(0), 4)\n\n def stop(self):\n \"\"\"Free hardware and os resources.\"\"\"\n\n self.adc.stop()\n\n\nclass GP2Y0A21YK0F(GP2Y0AxxxK0F):\n \"\"\"Sharp gp2y0a21yk0f ir distance sensor extends :class:`GP2Y0AxxxK0F`\n \n Args:\n adc: Instance of any adc class. \n \"\"\"\n\n # Distance measuring characteristics from datasheet. (voltage, cm)\n INTER_DATA = numpy.array([[3.3, 7],\n [2.4, 10],\n [1.4, 20],\n [0.95, 30],\n [0.8, 40],\n [0.6, 50],\n [0.52, 60],\n [0.475, 70],\n [0.46, 80]])\n\n _INTERVAL = 0.040\n\n def __init__(self, adc, name='', max_data_length=0):\n \"\"\"Constructor.\"\"\"\n\n super(GP2Y0A21YK0F, self).__init__(self.INTER_DATA, adc,\n self._INTERVAL, name,\n max_data_length)\n\n\nclass GP2Y0A41SK0F(GP2Y0AxxxK0F):\n \"\"\"Shaprt gp2y0a21yk0f ir distance sensor extends :class:`GP2Y0AxxxK0F`\n\n Args:\n adc: Instance of any adc class. \n \"\"\"\n\n # Distance measuring characteristics from datasheet. (voltage, cm)\n INTER_DATA = numpy.array([[3.00, 3],\n [2.70, 4],\n [2.35, 5],\n [2.02, 6],\n [1.78, 7],\n [1.58, 8],\n [1.40, 9],\n [1.28, 10],\n [1.09, 12],\n [0.75, 14],\n [0.80, 16],\n [0.75, 18],\n [0.66, 20],\n [0.55, 25],\n [0.42, 30],\n [0.38, 35],\n [0.31, 40]])\n\n _INTERVAL = 0.018\n\n def __init__(self, adc, name='', max_data_length=0):\n \"\"\"Constructor.\"\"\"\n\n super(GP2Y0A41SK0F, self).__init__(self.INTER_DATA, adc,\n self._INTERVAL, name,\n max_data_length)\n"
]
| [
[
"numpy.array",
"scipy.interpolate.interp1d"
]
]
|
qhduan/zhtts | [
"25d707a8496920055b21e0514e138b728f34d3be"
]
| [
"app.py"
]
| [
"\nfrom zhtts import TTS\n\ntts = TTS()\n\nimport io\nimport time\nfrom pathlib import Path\nimport scipy\nfrom scipy.io import wavfile\n\nfrom flask import Flask, Response, render_template, request\n# from flask_cors import CORS\n\napp = Flask(\"zhtts\")\n# CORS(app)\n\[email protected](\"/api/tts\")\ndef api_tts():\n text = request.args.get(\"text\", \"\").strip()\n audio = tts.synthesis(text)\n\n with io.BytesIO() as out:\n wavfile.write(out, 24000, audio)\n return Response(out.getvalue(), mimetype=\"audio/wav\")\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n"
]
| [
[
"scipy.io.wavfile.write"
]
]
|
SiyanZhou97/VAE | [
"6e19f9dc865155f45554a462f4599f7ebc6a40db"
]
| [
"autoencodercmp/model_eval.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.decomposition import FactorAnalysis, PCA\nimport tensorly as tl\nfrom tensorly import unfold as tl_unfold\nfrom tensorly.decomposition import parafac,non_negative_parafac\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import normalize\nimport copy\nimport os\nimport pickle\nfrom sklearn.feature_selection import mutual_info_classif\nfrom tqdm import tqdm_notebook as tqdm\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nimport keras\nfrom IPython.display import clear_output\nimport pydot\nimport tensorflow as tf\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical, np_utils,plot_model\nfrom keras import backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import Embedding, Dense, TimeDistributed, LSTM, Activation, Flatten\nfrom keras.layers import Dropout, Lambda, RepeatVector,Masking,Input,Bidirectional\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras import objectives\n\n\nfrom models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae\nfrom align_maze import align_maze\nfrom data_generator import DataGenerator\n\n\n#============================================\n# evaluate models by reconstruction mse loss\n#============================================\n\n\ndef ae_eval(bin_training_data,bin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size):\n #epochs: int or str(early_stop)\n _, n_bin, n_neuron = bin_training_data.shape\n ae, _, _ = create_lstm_ae(input_dim=n_neuron, timesteps=n_bin, latent_dim=latent_dim,\n latent_fac=latent_fac)\n if validation == True:\n ae.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0,\n validation_data=(bin_validation_data, bin_validation_data))\n else:\n ae.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)\n\n val_reconstruction = ae.predict(bin_validation_data, verbose=0)\n mse = tf.keras.losses.MeanSquaredError()\n mse_val = mse(bin_validation_data, val_reconstruction).numpy()\n\n train_reconstruction = ae.predict(bin_training_data, verbose=0)\n mse = tf.keras.losses.MeanSquaredError()\n mse_train = mse(bin_training_data, train_reconstruction).numpy()\n\n return mse_val,mse_train\n\n\ndef vae_binned_eval(bin_training_data,bin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size):\n _, n_bin, n_neuron = bin_training_data.shape\n vae_binned, _, _ = create_binned_lstm_vae(input_dim=n_neuron, timesteps=n_bin, batch_size=batch_size,\n intermediate_dim=latent_dim, latent_dim=latent_dim,\n latent_fac=latent_fac, epsilon_std=1.)\n if validation == True:\n vae_binned.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0,\n validation_data=(bin_validation_data, bin_validation_data))\n else:\n vae_binned.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)\n\n val_reconstruction = vae_binned.predict(bin_validation_data, batch_size=batch_size, verbose=0)\n mse = tf.keras.losses.MeanSquaredError()\n mse_val = mse(bin_validation_data, val_reconstruction).numpy()\n\n train_reconstruction = vae_binned.predict(bin_training_data, batch_size=batch_size, verbose=0)\n mse = tf.keras.losses.MeanSquaredError()\n mse_train = mse(bin_training_data, train_reconstruction).numpy()\n\n return mse_val, mse_train\n\n\ndef vae_eval(train_indexes, val_indexes,frame_trial, maze_position,\n nobin_training_data,nobin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size=1):\n n_neuron=nobin_training_data[0].shape[-1]\n training_generator = DataGenerator(nobin_training_data, nobin_training_data, batch_size=batch_size)\n validation_generator = DataGenerator(nobin_validation_data, nobin_validation_data, batch_size=batch_size)\n vae, _, _ = create_lstm_vae(input_dim=n_neuron, timesteps=None, batch_size=batch_size,\n intermediate_dim=latent_dim, latent_dim=latent_dim,\n latent_fac=latent_fac, epsilon_std=1.)\n if validation == True:\n vae.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n epochs=epochs, verbose=0)\n else:\n vae.fit_generator(generator=training_generator,\n epochs=epochs, verbose=0)\n reconstruct_train = []\n for i in range(len(nobin_training_data)):\n shape1, shape2 = nobin_training_data[i].shape\n reconstruct_train.append(vae.predict(nobin_training_data[i].reshape(1, shape1, shape2), verbose=0))\n\n reconstruct_val = []\n for i in range(len(nobin_validation_data)):\n shape1, shape2 = nobin_validation_data[i].shape\n reconstruct_val.append(vae.predict(nobin_validation_data[i].reshape(1, shape1, shape2), verbose=0))\n\n aligned_train_data = align_maze(train_indexes, nobin_training_data,\n frame_trial, maze_position)\n aligned_train_reconstruct = align_maze(train_indexes,reconstruct_train,\n frame_trial, maze_position, reshape=True)\n aligned_train_data[np.isnan(aligned_train_data)] = 0\n aligned_train_reconstruct[np.isnan(aligned_train_reconstruct)] = 0\n mse = tf.keras.losses.MeanSquaredError()\n mse_train = mse(aligned_train_data, aligned_train_reconstruct).numpy()\n\n aligned_val_data = align_maze(val_indexes, nobin_validation_data,\n frame_trial, maze_position)\n aligned_val_reconstruct = align_maze(val_indexes,reconstruct_val,\n frame_trial, maze_position, reshape=True)\n aligned_val_data[np.isnan(aligned_val_data)] = 0\n aligned_val_reconstruct[np.isnan(aligned_val_reconstruct)] = 0\n mse = tf.keras.losses.MeanSquaredError()\n mse_val = mse(aligned_val_data, aligned_val_reconstruct).numpy()\n\n return mse_val, mse_train\n"
]
| [
[
"numpy.isnan",
"tensorflow.keras.losses.MeanSquaredError"
]
]
|
drawsky/tensorflow1 | [
"152a180c64a9142294ca634a0a4c241e0e436fb0"
]
| [
"tensorflow/contrib/autograph/impl/api.py"
]
| [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Public API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import wraps\n\nfrom enum import Enum\n\n# pylint:disable=g-bad-import-order\nimport gast\nimport six\n# pylint:enable=g-bad-import-order\n\nfrom tensorflow.contrib.autograph.impl import config\nfrom tensorflow.contrib.autograph.impl import conversion\nfrom tensorflow.contrib.autograph.pyct import compiler\nfrom tensorflow.contrib.autograph.pyct import inspect_utils\nfrom tensorflow.contrib.autograph.pyct import parser\nfrom tensorflow.contrib.autograph.utils import builtins\nfrom tensorflow.contrib.autograph.utils import py_func\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_inspect\n\n# TODO(mdan): Properly document the type hints.\n# TODO(mdan): Reduce the type hint information to (module, type).\n# (currently we require (module + class name, type))\n\n\ndef convert(recursive=False, verbose=False, arg_types=None):\n \"\"\"Decorator that compiles a function to graph mode.\n\n The decorator is dynamic - invoking compilation whenever the decorated\n function is called. This means the parameter values are known at compilation.\n\n Args:\n recursive: Whether to recusrively convert any functions that the decorator\n function may call.\n verbose: Whether to output the compiled code in the logs.\n arg_types: See to_graph.\n\n Returns:\n A decorator that compiles the given function to graph mode.\n\n Raises:\n ValueError: If any of the arguments are illegal.\n \"\"\"\n if arg_types is None:\n arg_types = {}\n\n def decorator(f):\n \"\"\"Decorator implementation.\"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n return converted_call(f, recursive, verbose, arg_types, *args, **kwargs)\n\n # Sometimes the decorator is just desugared, making it impossible to detect.\n # This attribute makes detection easier.\n setattr(wrapper, '__pyct_is_compile_decorator', True)\n return wrapper\n\n return decorator\n\n\nclass RunMode(Enum):\n GRAPH = 1\n PY_FUNC = 2\n\n\ndef do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):\n \"\"\"Decorator that suppresses compilation of a function.\n\n Args:\n run_as: RunMode value. Whether to run the function as-is, or wrap it into\n a py_func.\n return_dtypes: See autograph.utils.py_func.wrap_py_func. Setting to None or\n empty list or tuple will create a dummy return value that can be used\n to set control dependencies.\n\n Returns:\n A decorator that wraps the original function.\n \"\"\"\n def decorator(f):\n \"\"\"Decorator implementation.\"\"\"\n\n @wraps(f)\n def graph_wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n\n @wraps(f)\n def py_func_wrapper(*args, **kwargs):\n if kwargs:\n raise NotImplementedError(\n 'RunMode.PY_FUNC does not yet support kwargs')\n # TODO(mdan): Add support for kwargs.\n return py_func.wrap_py_func(\n f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)\n\n if run_as == RunMode.GRAPH:\n wrapper = graph_wrapper\n elif run_as == RunMode.PY_FUNC:\n wrapper = py_func_wrapper\n else:\n raise ValueError('unknown value for run_as: %s' % run_as)\n\n # Sometimes the decorator is just desugared, making it impossible to detect.\n # This attribute makes detection easier.\n setattr(wrapper, '__pyct_is_compile_decorator', True)\n return wrapper\n\n return decorator\n\n\ndef converted_call(f, recursive, verbose, arg_types, *args, **kwargs):\n \"\"\"Compiles a function call inline.\"\"\"\n # TODO(mdan): This needs cleanup.\n # In particular, we may want to avoid renaming functions altogether.\n\n if conversion.is_whitelisted_for_graph(f):\n return f(*args, **kwargs)\n\n unknown_arg_value = object() # Sentinel for arguments of unknown value\n\n if tf_inspect.isbuiltin(f):\n return builtins.dynamic_builtin(f, *args, **kwargs)\n\n if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):\n # Regular functions\n target_entity = f\n arg_map_target = f\n effective_args = args\n f_class = inspect_utils.getmethodclass(f)\n\n if f_class is not None:\n partial_types = (f_class,)\n else:\n partial_types = ()\n\n elif tf_inspect.isclass(f):\n # Constructors\n target_entity = f\n arg_map_target = f.__init__\n effective_args = (unknown_arg_value,) + args\n partial_types = ()\n\n elif hasattr(f, '__call__') and hasattr(f, '__class__'):\n # Callable objects\n target_entity = f.__call__\n arg_map_target = f.__call__\n effective_args = (f,) + args\n partial_types = (f.__class__,)\n\n else:\n NotImplementedError('unknown callable type \"%s\"' % type(f))\n\n arg_values = tf_inspect.getcallargs(arg_map_target, *args, **kwargs)\n for name, arg in arg_values.items():\n if arg is unknown_arg_value:\n continue\n arg_class = arg.__class__\n # If arg_value_hints specifies any name, use that instead.\n if name not in arg_types:\n arg_types[name] = (arg_class.__name__, arg_class)\n\n # When called from within a decorator, this is the only indication that\n # the function is a method - it appears that the decorator is applied\n # before the method is bound.\n if not partial_types:\n if 'self' in arg_values:\n if tf_inspect.isclass(arg_values['self'].__class__):\n partial_types = (arg_values['self'].__class__,)\n elif 'cls' in arg_values:\n if tf_inspect.isclass(arg_values['cls']):\n partial_types = (arg_values['cls'],)\n\n converted_f = to_graph(\n target_entity,\n recursive=recursive,\n verbose=verbose,\n arg_values=arg_values,\n arg_types=arg_types,\n partial_types=partial_types)\n return converted_f(*effective_args, **kwargs)\n\n\ndef to_graph(e,\n recursive=True,\n verbose=False,\n arg_values=None,\n arg_types=None,\n partial_types=None):\n \"\"\"Compile a Python entity into equivalent TensorFlow code.\n\n Currently supported entities:\n * functions\n * classes\n\n Classes are handled by converting all their methods into a new class.\n\n Args:\n e: A Python entity.\n recursive: Whether to recusrively convert any functions that the decorator\n function may call.\n verbose: Whether to output the compiled code in the logs.\n arg_values: A dict containing value hints for symbols like function\n parameters.\n arg_types: A dict containing type hints for symbols like function\n parameters.\n partial_types: A set of types (e.g. classes) that will not be converted\n entirely. Calls to member functions for these types will be renamed\n independently.\n\n Returns:\n A function with a signature identical to `o`, but which when executed it\n creates TF a graph that has the same functionality as the original entity.\n \"\"\"\n conversion_map = conversion.ConversionMap(\n recursive=recursive,\n nocompile_decorators=(convert, do_not_convert, converted_call),\n partial_types=partial_types,\n api_module=tf_inspect.getmodule(to_graph))\n _, name = conversion.entity_to_graph(e, conversion_map, arg_values, arg_types)\n\n module = gast.Module([])\n for import_line in config.COMPILED_IMPORT_STATEMENTS:\n module.body.extend(parser.parse_str(import_line).body)\n for dep in conversion_map.dependency_cache.values():\n module.body.append(dep)\n compiled_node, compiled_src = compiler.ast_to_object(module)\n\n # The compiled code should see everything the entry function saw.\n # TODO(mdan): This might not work well if the call tree spans modules?\n if tf_inspect.isfunction(e):\n compiled_node.__dict__.update(inspect_utils.getnamespace(e))\n compiled_fn = getattr(compiled_node, name)\n\n if verbose:\n logging.info('Compiled output of %s:\\n\\n%s\\n', e, compiled_src)\n\n return compiled_fn\n\n\ndef to_code(e,\n recursive=True,\n arg_values=None,\n arg_types=None,\n partial_types=None,\n indentation=' '):\n \"\"\"Return the equivalent of an entity in TensorFlow code.\n\n See `to_graph` for more details.\n\n Args:\n e: A Python entity.\n recursive: See to_graph.\n arg_values: See to_graph.\n arg_types: See to_graph.\n partial_types: See to_graph.\n indentation: String, when to use for each level of indentation.\n\n Returns:\n String.\n \"\"\"\n conversion_map = conversion.ConversionMap(\n recursive=recursive,\n nocompile_decorators=(convert, do_not_convert, converted_call),\n partial_types=partial_types,\n api_module=tf_inspect.getmodule(to_graph))\n conversion.entity_to_graph(e, conversion_map, arg_values, arg_types)\n\n imports = '\\n'.join(config.COMPILED_IMPORT_STATEMENTS)\n code = '\\n'.join(\n compiler.ast_to_source(dep, indentation)\n for dep in reversed(tuple(\n six.itervalues(conversion_map.dependency_cache))))\n\n return imports + '\\n\\n' + code\n"
]
| [
[
"tensorflow.contrib.autograph.pyct.compiler.ast_to_object",
"tensorflow.python.util.tf_inspect.getcallargs",
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.python.util.tf_inspect.isclass",
"tensorflow.contrib.autograph.pyct.inspect_utils.getnamespace",
"tensorflow.contrib.autograph.utils.builtins.dynamic_builtin",
"tensorflow.python.util.tf_inspect.getmodule",
"tensorflow.python.util.tf_inspect.isfunction",
"tensorflow.python.util.tf_inspect.isbuiltin",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.contrib.autograph.pyct.compiler.ast_to_source",
"tensorflow.contrib.autograph.impl.conversion.entity_to_graph",
"tensorflow.contrib.autograph.utils.py_func.wrap_py_func",
"tensorflow.contrib.autograph.pyct.inspect_utils.getmethodclass",
"tensorflow.contrib.autograph.pyct.parser.parse_str",
"tensorflow.contrib.autograph.impl.conversion.is_whitelisted_for_graph"
]
]
|
rastringer/trax | [
"80f80f0e5c99966942e72942463880731dc9fbf2"
]
| [
"trax/data/text_encoder.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Encoders for text data.\n\n* TextEncoder: base class\n* ByteTextEncoder: for ascii text\n* TokenTextEncoder: with user-supplied vocabulary file\n* SubwordTextEncoder: invertible\n* BertEncoder: for compatible tokenizers with original bert\n\"\"\"\n\nimport collections\nimport itertools\nimport math\nimport re\nimport tempfile\nimport time\nimport unicodedata\n\nfrom absl import logging\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom trax.data import tokenizer\n\n# Reserved tokens for things like padding and EOS symbols.\nPAD = \"<pad>\"\nEOS = \"<EOS>\"\nRESERVED_TOKENS = [PAD, EOS]\nNUM_RESERVED_TOKENS = len(RESERVED_TOKENS)\nPAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0\nEOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1\nRESERVED_TOKENS_BYTES = [bytes(PAD, \"ascii\"), bytes(EOS, \"ascii\")]\n\n# Regular expression for unescaping token strings.\n# '\\u' is converted to '_'\n# '\\\\' is converted to '\\'\n# '\\213;' is converted to unichr(213)\n_UNESCAPE_REGEX = re.compile(r\"\\\\u|\\\\\\\\|\\\\([0-9]+);\")\n_ESCAPE_CHARS = set(u\"\\\\_u;0123456789\")\n\n\n# Unicode utility functions that work with Python 2 and 3\ndef native_to_unicode(s):\n if is_unicode(s):\n return s\n try:\n return to_unicode(s)\n except UnicodeDecodeError:\n res = to_unicode(s, ignore_errors=True)\n logging.info(\"Ignoring Unicode error, outputting: %s\", res)\n return res\n\n\ndef is_unicode(s):\n return isinstance(s, six.text_type)\n\n\ndef to_unicode(s, ignore_errors=False):\n if is_unicode(s):\n return s\n error_mode = \"ignore\" if ignore_errors else \"strict\"\n return s.decode(\"utf-8\", errors=error_mode)\n\n\ndef to_unicode_ignore_errors(s):\n return to_unicode(s, ignore_errors=True)\n\n\ndef to_unicode_utf8(s):\n return s.decode(\"utf-8\")\n\n\ndef strip_ids(ids, ids_to_strip):\n \"\"\"Strip ids_to_strip from the end IDs.\"\"\"\n ids = list(ids)\n while ids and ids[-1] in ids_to_strip:\n ids.pop()\n return ids\n\n\nclass TextEncoder:\n \"\"\"Base class for converting from ints to/from human readable strings.\"\"\"\n\n def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):\n self._num_reserved_ids = num_reserved_ids\n\n @property\n def num_reserved_ids(self):\n return self._num_reserved_ids\n\n def encode(self, s):\n \"\"\"Transform a human-readable string into a sequence of int IDs.\n\n The IDs should be in the range [num_reserved_ids, vocab_size). IDs [0,\n num_reserved_ids) are reserved.\n\n EOS is not appended.\n\n Args:\n s: human-readable string to be converted.\n\n Returns:\n ids: list of integers\n \"\"\"\n return [int(w) + self._num_reserved_ids for w in s.split()]\n\n def decode(self, ids, strip_extraneous=False):\n \"\"\"Transform a sequence of int IDs into a human-readable string.\n\n EOS is not expected in IDs.\n\n Args:\n ids: list of integers to be converted.\n strip_extraneous: bool, whether to strip off extraneous tokens (EOS and\n PAD).\n\n Returns:\n s: human-readable string.\n \"\"\"\n if strip_extraneous:\n ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))\n return \" \".join(self.decode_list(ids))\n\n def decode_list(self, ids):\n \"\"\"Transform a sequence of int IDs into a their string versions.\n\n This method supports transforming individual input/output IDs to their\n string versions so that sequence to/from text conversions can be visualized\n in a human readable format.\n\n Args:\n ids: list of integers to be converted.\n\n Returns:\n strs: list of human-readable string.\n \"\"\"\n decoded_ids = []\n for id_ in ids:\n if 0 <= id_ < self._num_reserved_ids:\n decoded_ids.append(RESERVED_TOKENS[int(id_)])\n else:\n decoded_ids.append(id_ - self._num_reserved_ids)\n return [str(d) for d in decoded_ids]\n\n @property\n def vocab_size(self):\n raise NotImplementedError()\n\n\nclass ByteTextEncoder(TextEncoder):\n \"\"\"Encodes each byte to an id. For 8-bit strings only.\"\"\"\n\n def encode(self, s):\n numres = self._num_reserved_ids\n # Python3: explicitly convert to UTF-8\n return [c + numres for c in s.encode(\"utf-8\")]\n\n def decode(self, ids, strip_extraneous=False):\n if strip_extraneous:\n ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))\n numres = self._num_reserved_ids\n decoded_ids = []\n int2byte = six.int2byte\n for id_ in ids:\n if 0 <= id_ < numres:\n decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])\n else:\n decoded_ids.append(int2byte(id_ - numres))\n # Python3: join byte arrays and then decode string\n return b\"\".join(decoded_ids).decode(\"utf-8\", \"replace\")\n\n def decode_list(self, ids):\n numres = self._num_reserved_ids\n decoded_ids = []\n int2byte = six.int2byte\n for id_ in ids:\n if 0 <= id_ < numres:\n decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])\n else:\n decoded_ids.append(int2byte(id_ - numres))\n # Python3: join byte arrays and then decode string\n return decoded_ids\n\n @property\n def vocab_size(self):\n return 2**8 + self._num_reserved_ids\n\n\nclass ClassLabelEncoder(TextEncoder):\n \"\"\"Encoder for class labels.\"\"\"\n\n def __init__(self, class_labels=None, class_labels_fname=None):\n super(ClassLabelEncoder, self).__init__(num_reserved_ids=0)\n\n if class_labels_fname:\n with tf.io.gfile.GFile(class_labels_fname) as f:\n class_labels = [label.strip() for label in f.readlines()]\n\n assert class_labels\n self._class_labels = class_labels\n\n def encode(self, s):\n label_str = s\n return self._class_labels.index(label_str)\n\n def decode(self, ids, strip_extraneous=False):\n del strip_extraneous\n label_id = ids\n if isinstance(label_id, list):\n assert len(label_id) == 1\n label_id, = label_id\n if isinstance(label_id, np.ndarray):\n label_id = np.squeeze(label_id)\n return self._class_labels[label_id]\n\n def decode_list(self, ids):\n return [self._class_labels[i] for i in ids]\n\n @property\n def vocab_size(self):\n return len(self._class_labels)\n\n\nclass OneHotClassLabelEncoder(ClassLabelEncoder):\n \"\"\"One-hot encoder for class labels.\"\"\"\n\n def encode(self, label_str, on_value=1, off_value=0): # pylint: disable=arguments-differ\n e = np.full(self.vocab_size, off_value, dtype=np.int32)\n e[self._class_labels.index(label_str)] = on_value\n return e.tolist()\n\n def decode(self, ids, strip_extraneous=False):\n del strip_extraneous\n label_id = ids\n if isinstance(label_id, np.ndarray):\n label_id = np.squeeze(label_id).astype(np.int8).tolist()\n assert isinstance(label_id, list)\n assert len(label_id) == self.vocab_size\n return self._class_labels[label_id.index(1)]\n\n @property\n def vocab_size(self):\n return len(self._class_labels)\n\n\nclass TokenTextEncoder(TextEncoder):\n \"\"\"Encoder based on a user-supplied vocabulary (file or list).\"\"\"\n\n def __init__(self,\n vocab_filename,\n reverse=False,\n vocab_list=None,\n replace_oov=None,\n num_reserved_ids=NUM_RESERVED_TOKENS):\n \"\"\"Initialize from a file or list, one token per line.\n\n Handling of reserved tokens works as follows:\n - When initializing from a list, we add reserved tokens to the vocab.\n - When initializing from a file, we do not add reserved tokens to the vocab.\n - When saving vocab files, we save reserved tokens to the file.\n\n Args:\n vocab_filename: If not None, the full filename to read vocab from. If this\n is not None, then vocab_list should be None.\n reverse: Boolean indicating if tokens should be reversed during encoding\n and decoding.\n vocab_list: If not None, a list of elements of the vocabulary. If this is\n not None, then vocab_filename should be None.\n replace_oov: If not None, every out-of-vocabulary token seen when encoding\n will be replaced by this string (which must be in vocab).\n num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.\n \"\"\"\n super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)\n self._reverse = reverse\n self._replace_oov = replace_oov\n if vocab_filename:\n self._init_vocab_from_file(vocab_filename)\n else:\n assert vocab_list is not None\n self._init_vocab_from_list(vocab_list)\n\n def encode(self, s):\n \"\"\"Converts a space-separated string of tokens to a list of ids.\"\"\"\n sentence = s\n tokens = sentence.strip().split()\n if self._replace_oov is not None:\n tokens = [\n t if t in self._token_to_id else self._replace_oov for t in tokens\n ]\n ret = [self._token_to_id[tok] for tok in tokens]\n return ret[::-1] if self._reverse else ret\n\n def decode(self, ids, strip_extraneous=False):\n return \" \".join(self.decode_list(ids))\n\n def decode_list(self, ids):\n seq = reversed(ids) if self._reverse else ids\n return [self._safe_id_to_token(i) for i in seq]\n\n @property\n def vocab_size(self):\n return len(self._id_to_token)\n\n def _safe_id_to_token(self, idx):\n return self._id_to_token.get(idx, \"ID_%d\" % idx)\n\n def _init_vocab_from_file(self, filename):\n \"\"\"Load vocab from a file.\n\n Args:\n filename: The file to load vocabulary from.\n \"\"\"\n with tf.io.gfile.GFile(filename) as f:\n tokens = [token.strip() for token in f.readlines()]\n\n def token_gen():\n for token in tokens:\n yield token\n\n self._init_vocab(token_gen(), add_reserved_tokens=False)\n\n def _init_vocab_from_list(self, vocab_list):\n \"\"\"Initialize tokens from a list of tokens.\n\n It is ok if reserved tokens appear in the vocab list. They will be\n removed. The set of tokens in vocab_list should be unique.\n\n Args:\n vocab_list: A list of tokens.\n \"\"\"\n\n def token_gen():\n for token in vocab_list:\n if token not in RESERVED_TOKENS:\n yield token\n\n self._init_vocab(token_gen())\n\n def _init_vocab(self, token_generator, add_reserved_tokens=True):\n \"\"\"Initialize vocabulary with tokens from token_generator.\"\"\"\n\n self._id_to_token = {}\n non_reserved_start_index = 0\n\n if add_reserved_tokens:\n self._id_to_token.update(enumerate(RESERVED_TOKENS))\n non_reserved_start_index = len(RESERVED_TOKENS)\n\n self._id_to_token.update(\n enumerate(token_generator, start=non_reserved_start_index))\n\n # _token_to_id is the reverse of _id_to_token\n self._token_to_id = dict(\n (v, k) for k, v in six.iteritems(self._id_to_token))\n\n def store_to_file(self, filename):\n \"\"\"Write vocab file to disk.\n\n Vocab files have one token per line. The file ends in a newline. Reserved\n tokens are written to the vocab file as well.\n\n Args:\n filename: Full path of the file to store the vocab to.\n \"\"\"\n with tf.io.gfile.GFile(filename, \"w\") as f:\n for i in range(len(self._id_to_token)):\n f.write(self._id_to_token[i] + \"\\n\")\n\n\ndef _escape_token(token, alphabet):\n \"\"\"Escape away underscores and OOV characters and append '_'.\n\n This allows the token to be expressed as the concatenation of a list\n of subtokens from the vocabulary. The underscore acts as a sentinel\n which allows us to invertibly concatenate multiple such lists.\n\n Args:\n token: A unicode string to be escaped.\n alphabet: A set of all characters in the vocabulary's alphabet.\n\n Returns:\n escaped_token: An escaped unicode string.\n\n Raises:\n ValueError: If the provided token is not unicode.\n \"\"\"\n if not isinstance(token, six.text_type):\n raise ValueError(\"Expected string type for token, got %s\" % type(token))\n\n token = token.replace(u\"\\\\\", u\"\\\\\\\\\").replace(u\"_\", u\"\\\\u\")\n ret = [c if c in alphabet and c != u\"\\n\" else r\"\\%d;\" % ord(c) for c in token]\n return u\"\".join(ret) + \"_\"\n\n\ndef _unescape_token(escaped_token):\n \"\"\"Inverse of _escape_token().\n\n Args:\n escaped_token: a unicode string\n\n Returns:\n token: a unicode string\n \"\"\"\n\n def match(m):\n if m.group(1) is None:\n return u\"_\" if m.group(0) == u\"\\\\u\" else u\"\\\\\"\n\n try:\n return six.unichr(int(m.group(1)))\n except (ValueError, OverflowError) as _:\n return u\"\\u3013\" # Unicode for undefined character.\n\n trimmed = escaped_token[:-1] if escaped_token.endswith(\"_\") else escaped_token\n return _UNESCAPE_REGEX.sub(match, trimmed)\n\n\nclass SubwordTextEncoder(TextEncoder):\n \"\"\"Class for invertibly encoding text using a limited vocabulary.\n\n Invertibly encodes a native string as a sequence of subtokens from a limited\n vocabulary.\n\n A SubwordTextEncoder is built from a corpus (so it is tailored to the text in\n the corpus), and stored to a file. See text_encoder_build_subword.py.\n\n It can then be loaded and used to encode/decode any text.\n\n Encoding has four phases:\n\n 1. Tokenize into a list of tokens. Each token is a unicode string of either\n all alphanumeric characters or all non-alphanumeric characters. We drop\n tokens consisting of a single space that are between two alphanumeric\n tokens.\n\n 2. Escape each token. This escapes away special and out-of-vocabulary\n characters, and makes sure that each token ends with an underscore, and\n has no other underscores.\n\n 3. Represent each escaped token as a the concatenation of a list of subtokens\n from the limited vocabulary. Subtoken selection is done greedily from\n beginning to end. That is, we construct the list in order, always picking\n the longest subtoken in our vocabulary that matches a prefix of the\n remaining portion of the encoded token.\n\n 4. Concatenate these lists. This concatenation is invertible due to the\n fact that the trailing underscores indicate when one list is finished.\n\n \"\"\"\n\n def __init__(self, filename=None):\n \"\"\"Initialize and read from a file, if provided.\n\n Args:\n filename: filename from which to read vocab. If None, do not load a vocab\n \"\"\"\n self._alphabet = set()\n self.filename = filename\n if filename is not None:\n self._load_from_file(filename)\n super(SubwordTextEncoder, self).__init__()\n\n def encode(self, s):\n \"\"\"Converts a native string to a list of subtoken IDs.\n\n Args:\n s: a native string.\n\n Returns:\n a list of integers in the range [0, vocab_size)\n \"\"\"\n return self._tokens_to_subtoken_ids(tokenizer.encode(native_to_unicode(s)))\n\n def encode_without_tokenizing(self, token_text):\n \"\"\"Converts string to list of subtoken IDs without calling tokenizer.\n\n This treats `token_text` as a single token and directly converts it\n to subtoken IDs. This may be useful when the default tokenizer doesn't\n do what we want (e.g., when encoding text with tokens composed of lots of\n nonalphanumeric characters). It is then up to the caller to make sure that\n raw text is consistently converted into tokens. Only use this if you are\n sure that `encode` doesn't suit your needs.\n\n Args:\n token_text: A native string representation of a single token.\n\n Returns:\n A list of subword token IDs; i.e., integers in the range [0, vocab_size).\n \"\"\"\n return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])\n\n def decode(self, ids, strip_extraneous=False):\n \"\"\"Converts a sequence of subtoken IDs to a native string.\n\n Args:\n ids: a list of integers in the range [0, vocab_size)\n strip_extraneous: bool, whether to strip off extraneous tokens (EOS and\n PAD).\n\n Returns:\n a native string\n \"\"\"\n if strip_extraneous:\n ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))\n return tokenizer.decode(self._subtoken_ids_to_tokens(ids))\n\n def decode_list(self, ids):\n return [self._subtoken_id_to_subtoken_string(s) for s in ids]\n\n @property\n def vocab_size(self):\n \"\"\"The subtoken vocabulary size.\"\"\"\n return len(self._all_subtoken_strings)\n\n def _tokens_to_subtoken_ids(self, tokens):\n \"\"\"Converts a list of tokens to a list of subtoken IDs.\n\n Args:\n tokens: a list of strings.\n\n Returns:\n a list of integers in the range [0, vocab_size)\n \"\"\"\n ret = []\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n return ret\n\n def _token_to_subtoken_ids(self, token):\n \"\"\"Converts token to a list of subtoken IDs.\n\n Args:\n token: a string.\n\n Returns:\n a list of integers in the range [0, vocab_size)\n \"\"\"\n cache_location = hash(token) % self._cache_size\n cache_key, cache_value = self._cache[cache_location]\n if cache_key == token:\n return cache_value\n ret = self._escaped_token_to_subtoken_ids(\n _escape_token(token, self._alphabet))\n self._cache[cache_location] = (token, ret)\n return ret\n\n def _subtoken_ids_to_tokens(self, subtokens):\n \"\"\"Converts a list of subtoken IDs to a list of tokens.\n\n Args:\n subtokens: a list of integers in the range [0, vocab_size)\n\n Returns:\n a list of strings.\n \"\"\"\n concatenated = \"\".join(\n [self._subtoken_id_to_subtoken_string(s) for s in subtokens])\n split = concatenated.split(\"_\")\n ret = []\n for t in split:\n if t:\n unescaped = _unescape_token(t + \"_\")\n if unescaped:\n ret.append(unescaped)\n return ret\n\n def _subtoken_id_to_subtoken_string(self, subtoken):\n \"\"\"Converts a subtoken integer ID to a subtoken string.\"\"\"\n if 0 <= subtoken < self.vocab_size:\n return self._all_subtoken_strings[subtoken]\n return u\"\"\n\n def _escaped_token_to_subtoken_strings(self, escaped_token):\n \"\"\"Converts an escaped token string to a list of subtoken strings.\n\n Args:\n escaped_token: An escaped token as a unicode string.\n\n Returns:\n A list of subtokens as unicode strings.\n \"\"\"\n # NOTE: This algorithm is greedy; it won't necessarily produce the \"best\"\n # list of subtokens.\n ret = []\n start = 0\n token_len = len(escaped_token)\n while start < token_len:\n for end in range(\n min(token_len, start + self._max_subtoken_len), start, -1):\n subtoken = escaped_token[start:end]\n if subtoken in self._subtoken_string_to_id:\n ret.append(subtoken)\n start = end\n break\n\n else: # Did not break\n # If there is no possible encoding of the escaped token then one of the\n # characters in the token is not in the alphabet. This should be\n # impossible and would be indicative of a bug.\n assert False, \"Token substring not found in subtoken vocabulary.\"\n\n return ret\n\n def _escaped_token_to_subtoken_ids(self, escaped_token):\n \"\"\"Converts an escaped token string to a list of subtoken IDs.\n\n Args:\n escaped_token: An escaped token as a unicode string.\n\n Returns:\n A list of subtoken IDs as integers.\n \"\"\"\n return [\n self._subtoken_string_to_id[subtoken]\n for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)\n ]\n\n @classmethod\n def build_from_generator(cls,\n generator,\n target_size,\n max_subtoken_length=None,\n reserved_tokens=None):\n \"\"\"Builds a SubwordTextEncoder from the generated text.\n\n Args:\n generator: yields text.\n target_size: int, approximate vocabulary size to create.\n max_subtoken_length: Maximum length of a subtoken. If this is not set,\n then the runtime and memory use of creating the vocab is quadratic in\n the length of the longest token. If this is set, then it is instead\n O(max_subtoken_length * length of longest token).\n reserved_tokens: List of reserved tokens. The global variable\n `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this\n argument is `None`, it will use `RESERVED_TOKENS`.\n\n Returns:\n SubwordTextEncoder with `vocab_size` approximately `target_size`.\n \"\"\"\n token_counts = collections.defaultdict(int)\n for item in generator:\n for tok in tokenizer.encode(native_to_unicode(item)):\n token_counts[tok] += 1\n encoder = cls.build_to_target_size(\n target_size,\n token_counts,\n 1,\n 1e3,\n max_subtoken_length=max_subtoken_length,\n reserved_tokens=reserved_tokens)\n return encoder\n\n @classmethod\n def build_to_target_size(cls,\n target_size,\n token_counts,\n min_val,\n max_val,\n max_subtoken_length=None,\n reserved_tokens=None,\n num_iterations=4):\n \"\"\"Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.\n\n Uses simple recursive binary search to find a minimum token count that most\n closely matches the `target_size`.\n\n Args:\n target_size: Desired vocab_size to approximate.\n token_counts: A dictionary of token counts, mapping string to int.\n min_val: An integer; lower bound for the minimum token count.\n max_val: An integer; upper bound for the minimum token count.\n max_subtoken_length: Maximum length of a subtoken. If this is not set,\n then the runtime and memory use of creating the vocab is quadratic in\n the length of the longest token. If this is set, then it is instead\n O(max_subtoken_length * length of longest token).\n reserved_tokens: List of reserved tokens. The global variable\n `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this\n argument is `None`, it will use `RESERVED_TOKENS`.\n num_iterations: An integer; how many iterations of refinement.\n\n Returns:\n A SubwordTextEncoder instance.\n\n Raises:\n ValueError: If `min_val` is greater than `max_val`.\n \"\"\"\n if min_val > max_val:\n raise ValueError(\"Lower bound for the minimum token count \"\n \"is greater than the upper bound.\")\n if target_size < 1:\n raise ValueError(\"Target size must be positive.\")\n\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n\n def bisect(min_val, max_val):\n \"\"\"Bisection to find the right size.\"\"\"\n present_count = (max_val + min_val) // 2\n logging.info(\"Trying min_count %d\", present_count)\n subtokenizer = cls()\n subtokenizer.build_from_token_counts(\n token_counts,\n present_count,\n num_iterations,\n max_subtoken_length=max_subtoken_length,\n reserved_tokens=reserved_tokens)\n\n # Being within 1% of the target size is ok.\n is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size\n # If min_val == max_val, we can't do any better than this.\n if is_ok or min_val >= max_val or present_count < 2:\n return subtokenizer\n\n if subtokenizer.vocab_size > target_size:\n other_subtokenizer = bisect(present_count + 1, max_val)\n else:\n other_subtokenizer = bisect(min_val, present_count - 1)\n\n if other_subtokenizer is None:\n return subtokenizer\n\n if (abs(other_subtokenizer.vocab_size - target_size) <\n abs(subtokenizer.vocab_size - target_size)):\n return other_subtokenizer\n return subtokenizer\n\n return bisect(min_val, max_val)\n\n def build_from_token_counts(self,\n token_counts,\n min_count,\n num_iterations=4,\n reserved_tokens=None,\n max_subtoken_length=None):\n \"\"\"Train a SubwordTextEncoder based on a dictionary of word counts.\n\n Args:\n token_counts: a dictionary of Unicode strings to int.\n min_count: an integer - discard subtokens with lower counts.\n num_iterations: an integer. how many iterations of refinement.\n reserved_tokens: List of reserved tokens. The global variable\n `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this\n argument is `None`, it will use `RESERVED_TOKENS`.\n max_subtoken_length: Maximum length of a subtoken. If this is not set,\n then the runtime and memory use of creating the vocab is quadratic in\n the length of the longest token. If this is set, then it is instead\n O(max_subtoken_length * length of longest token).\n\n Raises:\n ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it\n is not clear what the space is being reserved for, or when it will be\n filled in.\n \"\"\"\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n else:\n # There is not complete freedom in replacing RESERVED_TOKENS.\n for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):\n if default != proposed:\n raise ValueError(\"RESERVED_TOKENS must be a prefix of \"\n \"reserved_tokens.\")\n\n # Initialize the alphabet. Note, this must include reserved tokens or it can\n # result in encoding failures.\n alphabet_tokens = itertools.chain(\n six.iterkeys(token_counts),\n [native_to_unicode(t) for t in reserved_tokens])\n\n self._init_alphabet_from_tokens(alphabet_tokens)\n\n # Bootstrap the initial list of subtokens with the characters from the\n # alphabet plus the escaping characters.\n self._init_subtokens_from_list(\n list(self._alphabet), reserved_tokens=reserved_tokens)\n\n # We build iteratively. On each iteration, we segment all the words,\n # then count the resulting potential subtokens, keeping the ones\n # with high enough counts for our new vocabulary.\n if min_count < 1:\n min_count = 1\n for i in range(num_iterations):\n logging.info(\"Iteration %d\", i)\n\n # Collect all substrings of the encoded token that break along current\n # subtoken boundaries.\n subtoken_counts = collections.defaultdict(int)\n for token, count in six.iteritems(token_counts):\n iter_start_time = time.time()\n escaped_token = _escape_token(token, self._alphabet)\n subtokens = self._escaped_token_to_subtoken_strings(escaped_token)\n start = 0\n for subtoken in subtokens:\n last_position = len(escaped_token) + 1\n if max_subtoken_length is not None:\n last_position = min(last_position, start + max_subtoken_length)\n\n for end in range(start + 1, last_position):\n new_subtoken = escaped_token[start:end]\n subtoken_counts[new_subtoken] += count\n start += len(subtoken)\n iter_time_secs = time.time() - iter_start_time\n if iter_time_secs > 0.1:\n logging.info(\n \"Processing token [%s] took {%d} seconds, consider \"\n \"setting Text2TextProblem.max_subtoken_length to a \"\n \"smaller value.\", token, iter_time_secs)\n\n # Array of sets of candidate subtoken strings, by length.\n len_to_subtoken_strings = []\n for subtoken_string, count in six.iteritems(subtoken_counts):\n lsub = len(subtoken_string)\n if count >= min_count:\n while len(len_to_subtoken_strings) <= lsub:\n len_to_subtoken_strings.append(set())\n len_to_subtoken_strings[lsub].add(subtoken_string)\n\n # Consider the candidates longest to shortest, so that if we accept\n # a longer subtoken string, we can decrement the counts of its prefixes.\n new_subtoken_strings = []\n for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):\n subtoken_strings = len_to_subtoken_strings[lsub]\n for subtoken_string in subtoken_strings:\n count = subtoken_counts[subtoken_string]\n if count >= min_count:\n # Exclude alphabet tokens here, as they must be included later,\n # explicitly, regardless of count.\n if subtoken_string not in self._alphabet:\n new_subtoken_strings.append((count, subtoken_string))\n for l in range(1, lsub):\n subtoken_counts[subtoken_string[:l]] -= count\n\n # Include the alphabet explicitly to guarantee all strings are encodable.\n new_subtoken_strings.extend(\n (subtoken_counts.get(a, 0), a) for a in self._alphabet)\n new_subtoken_strings.sort(reverse=True)\n\n # Reinitialize to the candidate vocabulary.\n new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]\n if reserved_tokens:\n escaped_reserved_tokens = [\n _escape_token(native_to_unicode(t), self._alphabet)\n for t in reserved_tokens\n ]\n new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings\n\n self._init_subtokens_from_list(new_subtoken_strings)\n logging.info(\"vocab_size = %d\", self.vocab_size)\n\n @property\n def all_subtoken_strings(self):\n return tuple(self._all_subtoken_strings)\n\n def dump(self):\n \"\"\"Debugging dump of the current subtoken vocabulary.\"\"\"\n subtoken_strings = [\n (i, s) for s, i in six.iteritems(self._subtoken_string_to_id)\n ]\n print(u\", \".join(\n u\"{0} : '{1}'\".format(i, s) for i, s in sorted(subtoken_strings)))\n\n def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):\n \"\"\"Initialize token information from a list of subtoken strings.\n\n Args:\n subtoken_strings: a list of subtokens\n reserved_tokens: List of reserved tokens. We must have `reserved_tokens`\n as None or the empty list, or else the global variable `RESERVED_TOKENS`\n must be a prefix of `reserved_tokens`.\n\n Raises:\n ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it\n is not clear what the space is being reserved for, or when it will be\n filled in.\n \"\"\"\n if reserved_tokens is None:\n reserved_tokens = []\n\n if reserved_tokens:\n self._all_subtoken_strings = reserved_tokens + subtoken_strings\n else:\n self._all_subtoken_strings = subtoken_strings\n\n # we remember the maximum length of any subtoken to avoid having to\n # check arbitrarily long strings.\n self._max_subtoken_len = max([len(s) for s in subtoken_strings])\n self._subtoken_string_to_id = {\n s: i + len(reserved_tokens) for i, s in enumerate(subtoken_strings) if s\n }\n # Initialize the cache to empty.\n self._cache_size = 2**20\n self._cache = [(None, None)] * self._cache_size\n\n def _init_alphabet_from_tokens(self, tokens):\n \"\"\"Initialize alphabet from an iterable of token or subtoken strings.\"\"\"\n # Include all characters from all tokens in the alphabet to guarantee that\n # any token can be encoded. Additionally, include all escaping characters.\n self._alphabet = {c for token in tokens for c in token} # pylint: disable=g-complex-comprehension\n self._alphabet |= _ESCAPE_CHARS\n\n def _load_from_file_object(self, f):\n \"\"\"Load from a file object.\n\n Args:\n f: File object to load vocabulary from\n \"\"\"\n subtoken_strings = []\n for line in f:\n s = line.rstrip()\n # Some vocab files wrap words in single quotes, but others don't\n if ((s.startswith(\"'\") and s.endswith(\"'\")) or\n (s.startswith(\"\\\"\") and s.endswith(\"\\\"\"))):\n s = s[1:-1]\n subtoken_strings.append(native_to_unicode(s))\n self._init_subtokens_from_list(subtoken_strings)\n self._init_alphabet_from_tokens(subtoken_strings)\n\n def _load_from_file(self, filename):\n \"\"\"Load from a vocab file.\"\"\"\n if not tf.io.gfile.exists(filename):\n raise ValueError(\"File %s not found\" % filename)\n with tf.io.gfile.GFile(filename) as f:\n self._load_from_file_object(f)\n\n def store_to_file(self, filename, add_single_quotes=True):\n with tf.io.gfile.GFile(filename, \"w\") as f:\n for subtoken_string in self._all_subtoken_strings:\n if add_single_quotes:\n f.write(\"'\" + subtoken_string + \"'\\n\")\n else:\n f.write(subtoken_string + \"\\n\")\n\n\nclass ImageEncoder:\n \"\"\"Encoder class for saving and loading images.\"\"\"\n\n def __init__(self, num_reserved_ids=0, height=None, width=None, channels=3):\n assert num_reserved_ids == 0\n self._height = height\n self._width = width\n self._channels = channels\n\n @property\n def num_reserved_ids(self):\n return 0\n\n def encode(self, s):\n \"\"\"Transform a string with a filename into a list of RGB integers.\n\n Args:\n s: path to the file with an image.\n\n Returns:\n ids: list of integers\n \"\"\"\n try:\n import matplotlib.image as im # pylint: disable=g-import-not-at-top\n except ImportError as e:\n logging.warning(\n \"Reading an image requires matplotlib to be installed: %s\", e)\n raise NotImplementedError(\"Image reading not implemented.\")\n return im.imread(s)\n\n def decode(self, ids, strip_extraneous=False):\n \"\"\"Transform a sequence of int IDs into an image file.\n\n Args:\n ids: list of integers to be converted.\n strip_extraneous: unused\n\n Returns:\n Path to the temporary file where the image was saved.\n\n Raises:\n ValueError: if the IDs are not of the appropriate size.\n \"\"\"\n del strip_extraneous\n _, tmp_file_path = tempfile.mkstemp(\"_decode.png\")\n if self._height is None or self._width is None:\n size = int(math.sqrt(len(ids) / self._channels))\n length = size * size * self._channels\n else:\n size = None\n length = self._height * self._width * self._channels\n if len(ids) != length:\n raise ValueError(\"Length of ids (%d) must be height (%d) x width (%d) x \"\n \"channels (%d); %d != %d.\\n Ids: %s\" %\n (len(ids), self._height, self._width, self._channels,\n len(ids), length, \" \".join([str(i) for i in ids])))\n with tf.Graph().as_default():\n raw = tf.constant(ids, dtype=tf.uint8)\n if size is None:\n img = tf.reshape(raw, [self._height, self._width, self._channels])\n else:\n img = tf.reshape(raw, [size, size, self._channels])\n png = tf.image.encode_png(img)\n op = tf.write_file(tmp_file_path, png)\n with tf.Session() as sess:\n sess.run(op)\n return tmp_file_path\n\n def decode_list(self, ids):\n \"\"\"Transform a sequence of int IDs into an image file.\n\n Args:\n ids: list of integers to be converted.\n\n Returns:\n Singleton list: path to the temporary file where the image was saved.\n \"\"\"\n return [self.decode(ids)]\n\n @property\n def vocab_size(self):\n return 256\n\n\nclass RealEncoder:\n \"\"\"Encoder class for saving and loading float values.\"\"\"\n\n def encode(self, s):\n \"\"\"Transform a string (space separated float values) into a float array.\n\n Args:\n s: space separated float values.\n\n Returns:\n Array of float values.\n \"\"\"\n return [float(w) for w in s.split()]\n\n def decode(self, ids, strip_extraneous=False):\n \"\"\"Transform sequence of float values into string (float values).\n\n Args:\n ids: array of floats to be converted.\n strip_extraneous: unused\n\n Returns:\n String having space separated float values.\n\n Raises:\n ValueError: if the IDs are not of the appropriate size.\n \"\"\"\n del strip_extraneous\n return \" \".join([str(i) for i in ids])\n\n\nclass BertEncoder:\n \"\"\"Encoder Class that is compatible with models trained in original BERT library.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True):\n self._vocab = self.load_vocab(vocab_file)\n self._inv_vocab = {v: k for k, v in self._vocab.items()}\n self._basic_tokenizer = BertBasicEncoder(do_lower_case=do_lower_case)\n self._wordpiece_tokenizer = BertWordpieceTokenizer(vocab=self._vocab)\n\n def load_vocab(self, vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.io.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = native_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n def encode(self, text):\n return self._convert_tokens_to_ids(self.tokenize(text))\n\n # Note: Because encoding by BertEncoder is not unique text decoded\n # from token ids is not unique.\n def decode(self, ids):\n \"\"\"Returns a text that encoded would yield provided ids.\"\"\"\n tokens = self._convert_ids_to_tokens(ids)\n if not tokens:\n return \"\"\n retarr = [tokens[0]]\n for token in tokens[1:]:\n if token.startswith(\"##\"):\n retarr.append(token.lstrip(\"#\"))\n else:\n retarr.append(\" \")\n retarr.append(token)\n return \"\".join(retarr)\n\n @property\n def vocab_size(self):\n return len(self._vocab)\n\n def tokenize(self, text):\n split_tokens = []\n for token in self._basic_tokenizer.tokenize(text):\n for sub_token in self._wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n\n def _convert_tokens_to_ids(self, tokens):\n return [self._vocab[token] for token in tokens]\n\n def _convert_ids_to_tokens(self, ids):\n return [self._inv_vocab[token_id] for token_id in ids]\n\n\nclass BertBasicEncoder:\n \"\"\"Part of BertEncoder; tokenization (punctuation splitting, lower casing).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = native_to_unicode(text)\n text = self._clean_text(text)\n\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _bert_is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _bert_is_control(char):\n continue\n if _bert_is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass BertWordpieceTokenizer:\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = native_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _bert_is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _bert_is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _bert_is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n"
]
| [
[
"tensorflow.write_file",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"numpy.squeeze",
"tensorflow.reshape",
"numpy.full",
"tensorflow.image.encode_png",
"matplotlib.image.imread",
"tensorflow.Session"
]
]
|
alowet/spikeextractors | [
"3e5a175a5523a3eb3578f96b0fffebee54108d9f"
]
| [
"spikeextractors/extractors/intanrecordingextractor/intanrecordingextractor.py"
]
| [
"from spikeextractors import RecordingExtractor\nfrom spikeextractors.extraction_tools import check_get_traces_args, check_get_ttl_args\nimport numpy as np\nfrom pathlib import Path\nfrom distutils.version import StrictVersion\n\ntry:\n import pyintan\n if StrictVersion(pyintan.__version__) >= '0.2.1':\n HAVE_INTAN = True\n else:\n print(\"pyintan version requires an update (>=0.2.1). Please upgrade with 'pip install --upgrade pyintan'\")\n HAVE_INTAN = False\nexcept ImportError:\n HAVE_INTAN = False\n\n\nclass IntanRecordingExtractor(RecordingExtractor):\n extractor_name = 'IntanRecording'\n has_default_locations = False\n is_writable = False\n mode = 'file'\n installed = HAVE_INTAN # check at class level if installed or not\n installation_mesg = \"To use the Intan extractor, install pyintan: \\n\\n pip install pyintan\\n\\n\" # error message when not installed\n\n def __init__(self, file_path: str, dtype: str = 'float', verbose: bool = False):\n assert HAVE_INTAN, self.installation_mesg\n RecordingExtractor.__init__(self)\n assert Path(file_path).suffix == '.rhs' or Path(file_path).suffix == '.rhd', \\\n \"Only '.rhd' and '.rhs' files are supported\"\n self._recording_file = file_path\n self._recording = pyintan.File(file_path, verbose)\n self._num_frames = len(self._recording.times)\n self._analog_channels = np.array([ch for ch in self._recording._anas_chan if all([other_ch not in ch['name']\n for other_ch in\n ['ADC', 'VDD', 'AUX']])])\n self._num_channels = len(self._analog_channels)\n self._channel_ids = list(range(self._num_channels))\n self._fs = float(self._recording.sample_rate.rescale('Hz').magnitude)\n\n assert dtype in ['float', 'uint16'], \"'dtype' can be either 'float' or 'uint16'\"\n self._dtype = dtype\n\n if self._dtype == 'uint16':\n for i, ch in enumerate(self._analog_channels):\n self.set_channel_property(i, 'gain', ch['gain'])\n self.set_channel_property(i, 'offset', ch['offset'])\n\n self._kwargs = {'file_path': str(Path(file_path).absolute()), 'verbose': verbose}\n\n def get_channel_ids(self):\n return self._channel_ids\n\n def get_num_frames(self):\n return self._num_frames\n\n def get_sampling_frequency(self):\n return self._fs\n\n @check_get_traces_args\n def get_traces(self, channel_ids=None, start_frame=None, end_frame=None, dtype=None):\n channel_idxs = np.array([self._channel_ids.index(ch) for ch in channel_ids])\n analog_chans = self._analog_channels[channel_idxs]\n if dtype is None:\n return self._recording._read_analog(channels=analog_chans, i_start=start_frame, i_stop=end_frame,\n dtype=self._dtype).T\n else:\n assert dtype in ['float', 'uint16'], \"'dtype' can be either 'float' or 'uint16'\"\n return self._recording._read_analog(channels=analog_chans, i_start=start_frame, i_stop=end_frame,\n dtype=dtype).T\n\n @check_get_ttl_args\n def get_ttl_events(self, start_frame=None, end_frame=None, channel_id=0):\n channels = [np.unique(ev.channels)[0] for ev in self._recording.digital_in_events]\n assert channel_id in channels, f\"Specified 'channel' not found. Available channels are {channels}\"\n ev = self._recording.events[channels.index(channel_id)]\n\n ttl_frames = (ev.times.rescale(\"s\") * self.get_sampling_frequency()).magnitude.astype(int)\n ttl_states = np.sign(ev.channel_states)\n ttl_valid_idxs = np.where((ttl_frames >= start_frame) & (ttl_frames < end_frame))[0]\n return ttl_frames[ttl_valid_idxs], ttl_states[ttl_valid_idxs]\n"
]
| [
[
"numpy.sign",
"numpy.where",
"numpy.unique"
]
]
|
Attolab/pymodaq_femto | [
"2f63132c0e9097c92c33db9bce6e2f168009c744"
]
| [
"src/pymodaq_femto/simulation.py"
]
| [
"from PyQt5.QtCore import QObject\nfrom PyQt5 import QtWidgets\n\nfrom pathlib import Path\nfrom pyqtgraph.parametertree import Parameter, ParameterTree\nfrom pymodaq.daq_utils.parameter import pymodaq_ptypes\nfrom pypret.frequencies import om2wl, wl2om, convert\nfrom pypret import FourierTransform, Pulse, PNPS, lib, MeshData\n\nimport numpy as np\nfrom pymodaq.daq_utils.daq_utils import gauss1D, my_moment, l2w, linspace_step, Axis, normalize\nfrom pymodaq.daq_utils.array_manipulation import linspace_this_image, crop_vector_to_axis, crop_array_to_axis,\\\n linspace_this_vect\nfrom pypret.material import BK7\nfrom pymodaq_femto.materials import FS\nfrom pymodaq_femto.graphics import MplCanvas, NavigationToolbar, MeshDataPlot, PulsePlot\nfrom collections import OrderedDict\nfrom pymodaq_femto import _PNPS_CLASSES\n\n\n\nmethods_tmp = list(_PNPS_CLASSES.keys())\nmethods_tmp.sort()\nmethods = ['frog']\nmethods.extend(methods_tmp)\nnlprocesses = list(_PNPS_CLASSES[methods[0]].keys())\nmaterials = OrderedDict(FS=FS, BK7=BK7)\n\n\n\nclass Simulator(QObject):\n params = [\n {'title': 'Show Pulse', 'name': 'show_pulse', 'type': 'action', 'visible': False},\n {'title': 'Show Trace', 'name': 'show_trace', 'type': 'action', 'visible': False},\n {'title': 'Show both', 'name': 'show_plots', 'type': 'action', 'visible': False},\n {'title': 'Pulse Source:', 'name': 'pulse_source', 'type': 'list', 'values': ['Simulated', 'From File'],\n },\n\n {'title': 'Pulse Settings:', 'name': 'pulse_settings', 'type': 'group', 'children': [\n {'title': 'FWHM (fs):', 'name': 'fwhm_time', 'type': 'float', 'value': 5,\n 'tip': 'Fourier Limited Pulse duration in femtoseconds'},\n {'title': 'Shaping type:', 'name': 'shaping_type', 'type': 'list', 'values': ['Taylor', 'Gaussian'],\n },\n {'title': 'Npulses:', 'name': 'npulses', 'type': 'int', 'value': 1,\n 'tip': 'Number of pulse in a sequence'},\n {'title': 'Pulses separation:', 'name': 'delay_pulses', 'type': 'float', 'value': 100,\n 'tip': 'Delay between pulses in femtosecond', 'visible': False},\n {'title': 'Taylor Phase:', 'name': 'taylor_phase', 'type': 'group', 'children': [\n {'title': 'Delay (fs):', 'name': 'GD', 'type': 'float', 'value': 0,\n 'tip': 'Group Delay in femtosecond'},\n {'title': 'GDD (fs2):', 'name': 'GDD', 'type': 'float', 'value': 50,\n 'tip': 'Group Delay Dispersion in femtosecond square'},\n {'title': 'TOD (fs3):', 'name': 'TOD', 'type': 'float', 'value': 500,\n 'tip': 'Third Order Dispersion in femtosecond cube'},\n ]},\n {'title': 'Gaussian Phase:', 'name': 'gaussian_phase', 'type': 'group', 'visible': False, 'children': [\n {'title': 'Amplitude (rad):', 'name': 'gauss_amp', 'type': 'float', 'value': 6,\n 'tip': 'Amplitude of the gaussian phase in radian'},\n {'title': 'dt (fs):', 'name': 'dtime', 'type': 'float', 'value': 10,\n 'tip': 'FWHM (in fs) of the gaussian temporal phase'},\n ]},\n\n {'title': 'Data File:', 'name': 'data_file_path', 'type': 'browsepath', 'filetype': True,\n 'visible': False,\n 'value': str(Path(__file__).parent.parent.parent.joinpath('data/spectral_data.csv')),\n 'tip': 'Path to a CSV file containing in columns: wavelength(nm), Normalized Sprectral Intensity and phase'\n ' in radians'},\n ]},\n {'title': 'Algorithm Options:', 'name': 'algo', 'type': 'group', 'children': [\n {'title': 'Method:', 'name': 'method', 'type': 'list',\n 'values': methods,\n 'tip': 'Characterization Method'},\n {'title': 'NL process:', 'name': 'nlprocess', 'type': 'list',\n 'values': nlprocesses,\n 'tip': 'Non Linear process used in the experiment'},\n {'title': 'Alpha (rad):', 'name': 'alpha', 'type': 'float', 'value': 1,\n 'tip': 'amplitude of the phase pattern (in rad)', 'visible': False},\n {'title': 'Gamma (Hz):', 'name': 'gamma', 'type': 'float', 'value': 10,\n 'tip': 'frequency of the phase pattern (in Hz)', 'visible': False},\n {'title': 'Material:', 'name': 'material', 'type': 'list',\n 'values': list(materials.keys()), 'visible': False,\n 'tip': 'Material used for the Dscan measurement'},\n {'title': 'Dscan Parameter Scan:', 'name': 'dscan_parameter', 'type': 'group', 'visible': False,\n 'children': [\n {'title': 'Insertion min (mm):', 'name': 'min', 'type': 'float', 'value': -10.,\n 'tip': 'Minimum of the scanned parameter in mm'},\n {'title': 'Insertion max (mm):', 'name': 'max', 'type': 'float', 'value': 10.,\n 'tip': 'Minimum of the scanned parameter in mm'},\n {'title': 'Insertion step (mm):', 'name': 'step', 'type': 'float', 'value': 0.025,\n 'tip': 'Step size of the scanned parameter in mm'},\n ]},\n {'title': 'MIIPS Parameter Scan:', 'name': 'miips_parameter', 'type': 'group', 'visible': False,\n 'children': [\n {'title': 'Phase min (rad):', 'name': 'min', 'type': 'float', 'value': 0,\n 'tip': 'Minimum of the scanned parameter in radians'},\n {'title': 'Phase max (rad):', 'name': 'max', 'type': 'float', 'value': 2 * np.pi,\n 'tip': 'Minimum of the scanned parameter in radian'},\n {'title': 'Phase setp (rad):', 'name': 'step', 'type': 'float', 'value': 2 * np.pi / 100,\n 'tip': 'Step size of the scanned parameter in radians'},\n ]},\n ]},\n {'title': 'Grid settings:', 'name': 'grid_settings', 'type': 'group', 'children': [\n {'title': 'lambda0 (nm):', 'name': 'wl0', 'type': 'float', 'value': 750,\n 'tip': 'Central Wavelength of the Pulse spectrum and frequency grid'},\n {'title': 'Npoints:', 'name': 'npoints', 'type': 'list', 'values': [2 ** n for n in range(8, 16)],\n 'value': 1024,\n 'tip': 'Number of points for the temporal and Fourier Transform Grid'},\n {'title': 'Time resolution (fs):', 'name': 'time_resolution', 'type': 'float', 'value': 0.5,\n 'tip': 'Time spacing between 2 points in the time grid'},\n ]},\n {'title': 'Plot settings:', 'name': 'plot_settings', 'type': 'group', 'children': [\n {'title': 'Units:', 'name': 'units', 'type': 'list', 'values': ['nm', 'Hz'],\n 'tip': 'Plot ad a function of the wavelength (in nm) or as a function of the angular frequency (in Hz)'},\n {'title': 'Autolimits?:', 'name': 'autolimits', 'type': 'bool', 'value': True,\n 'tip': 'Restrict the data plot to limits given from marginals and threshold'},\n {'title': 'Set Limits?:', 'name': 'setlimits', 'type': 'bool', 'value': False,\n 'tip': 'Restrict the data plot to limits given from marginals and threshold'},\n {'title': 'Autolimits Threshold:', 'name': 'autolim_thresh', 'type': 'float', 'value': 1e-2,\n 'tip': 'Threshold for the determination of the plotting limits'},\n {'title': 'Limit min:', 'name': 'limit_min', 'type': 'float', 'value': 500,\n 'tip': 'Min value of the frequency axis for plotting (Hz or nm)', 'visible': False},\n {'title': 'Limit max:', 'name': 'limit_max', 'type': 'float', 'value': 1100,\n 'tip': 'Max value of the frequency axis for plotting (Hz or nm)', 'visible': False},\n {'title': 'Npts:', 'name': 'Npts', 'type': 'list',\n 'values': [2 ** n for n in range(8, 16)], 'value': 512,\n 'tip': 'Number of points to display the frequency axis'},\n ]},\n ]\n\n def __init__(self, parent=None, show_ui=True):\n super().__init__()\n\n if parent is None:\n parent = QtWidgets.QWidget()\n\n self.parent = parent\n self.figs = []\n self.pnps = None\n self.max_pnps = 1\n self.pulse = None\n\n self.settings = Parameter.create(name='dataIN_settings', type='group', children=self.params)\n self.settings.sigTreeStateChanged.connect(self.settings_changed)\n\n if show_ui:\n self.setupUI()\n self.settings.child('show_plots').sigActivated.connect(self.show_pulse)\n self.settings.child('show_plots').show()\n self.settings.child('show_pulse').show()\n self.settings.child('show_trace').show()\n self.settings.child('show_plots').sigActivated.connect(self.show_trace)\n self.settings.child('show_trace').sigActivated.connect(self.show_trace)\n self.settings.child('show_pulse').sigActivated.connect(self.show_pulse)\n else:\n self.settings.child('show_plots').hide()\n self.settings.child('show_pulse').hide()\n self.settings.child('show_trace').hide()\n\n self.update_pulse()\n self.update_pnps()\n\n\n\n\n @property\n def trace(self):\n return self.pnps.trace\n\n @property\n def parameter(self):\n return self.pnps.parameter\n\n\n def setupUI(self):\n self.settings_tree = ParameterTree()\n self.settings_tree.setParameters(self.settings, showTop=False)\n self.settings_tree.setMaximumWidth(300)\n\n mplotlib_widget = QtWidgets.QWidget()\n self.pulse_canvas = MplCanvas(mplotlib_widget, width=5, height=4, dpi=100)\n # Create toolbar, passing canvas as first parament, parent (self, the MainWindow) as second.\n toolbar_pulse = NavigationToolbar(self.pulse_canvas, mplotlib_widget)\n\n self.trace_canvas = MplCanvas(mplotlib_widget, width=5, height=4, dpi=100)\n # Create toolbar, passing canvas as first parament, parent (self, the MainWindow) as second.\n toolbar_trace = NavigationToolbar(self.trace_canvas, mplotlib_widget)\n\n self.parent.setLayout(QtWidgets.QHBoxLayout())\n self.parent.layout().addWidget(self.settings_tree)\n\n mplotlib_widget.setLayout(QtWidgets.QVBoxLayout())\n mplotlib_widget.layout().addWidget(toolbar_pulse)\n mplotlib_widget.layout().addWidget(self.pulse_canvas)\n mplotlib_widget.layout().addWidget(toolbar_trace)\n mplotlib_widget.layout().addWidget(self.trace_canvas)\n self.parent.layout().addWidget(mplotlib_widget)\n\n self.set_tight_layout(True)\n\n def settings_changed(self, param, changes):\n for param, change, data in changes:\n path = self.settings.childPath(param)\n if change == 'childAdded':\n pass\n elif change == 'parent':\n pass\n elif change == 'value':\n if param.name() == 'pulse_source':\n for child in self.settings.child('pulse_settings').children():\n if child.name() == 'data_file_path':\n child.show(param.value() == 'From File')\n else:\n child.show(param.value() != 'From File')\n\n elif param.name() == 'autolimits':\n if param.value():\n self.settings.child('plot_settings', 'autolim_thresh').show()\n self.settings.child('plot_settings', 'limit_min').hide()\n self.settings.child('plot_settings', 'limit_max').hide()\n self.settings.child('plot_settings', 'setlimits').setValue(False)\n\n elif param.name() == 'setlimits':\n if param.value():\n self.settings.child('plot_settings', 'autolim_thresh').hide()\n self.settings.child('plot_settings', 'limit_min').show()\n self.settings.child('plot_settings', 'limit_max').show()\n self.settings.child('plot_settings', 'autolimits').setValue(False)\n\n elif param.name() == 'method':\n self.settings.child('algo', 'nlprocess').setLimits(list(_PNPS_CLASSES[param.value()].keys()))\n\n if param.value() == 'miips':\n self.settings.child('algo', 'alpha').show()\n self.settings.child('algo', 'gamma').show()\n self.settings.child('algo', 'miips_parameter').show()\n else:\n self.settings.child('algo', 'alpha').hide()\n self.settings.child('algo', 'gamma').hide()\n self.settings.child('algo', 'miips_parameter').hide()\n\n if param.value() == 'dscan':\n self.settings.child('algo', 'material').show()\n self.settings.child('algo', 'dscan_parameter').show()\n else:\n self.settings.child('algo', 'material').hide()\n self.settings.child('algo', 'dscan_parameter').hide()\n\n elif param.name() == 'shaping_type':\n if param.value() == 'Taylor':\n self.settings.child('pulse_settings', 'taylor_phase').show()\n self.settings.child('pulse_settings', 'gaussian_phase').hide()\n elif param.value() == 'Gaussian':\n self.settings.child('pulse_settings', 'gaussian_phase').show()\n self.settings.child('pulse_settings', 'taylor_phase').hide()\n\n elif param.name() == 'npulses':\n self.settings.child('pulse_settings', 'delay_pulses').show(param.value() > 1)\n\n\n def set_tight_layout(self, tight=True):\n self.pulse_canvas.figure.set_tight_layout(tight)\n self.trace_canvas.figure.set_tight_layout(tight)\n\n def show_pulse(self):\n self.update_pulse()\n self.pulse_canvas.figure.clf()\n if self.settings.child('plot_settings', 'units').value() == 'nm':\n PulsePlot(self.pulse, self.pulse_canvas.figure, xaxis='wavelength',\n limit=self.settings.child('plot_settings', 'autolimits').value())\n else:\n PulsePlot(self.pulse, self.pulse_canvas.figure, xaxis='frequency',\n limit=self.settings.child('plot_settings', 'autolimits').value())\n self.pulse_canvas.draw()\n\n def spectrum_exp(self, Npts=512, wl_lim=None):\n spectrum = normalize(lib.abs2(self.pulse.spectrum))\n wl = self.pulse.wl\n if wl_lim is not None:\n wl, spectrum = crop_vector_to_axis(wl, spectrum, wl_lim)\n wl_lin, spectrum_lin = linspace_this_vect(wl[::-1], spectrum[::-1], Npts)\n\n return Axis(data=wl_lin, label='Wavelength', units='m'), spectrum_lin\n\n def trace_exp(self, threshold=None, Npts=512, wl_lim=None):\n \"\"\" Experimental trace on linear wavelength grid of the simulated trace\n Parameters\n ----------\n threshold: (None or float)\n Npts: (int)\n wl_lim: (None or list of 2 floats)\n\n Returns\n -------\n meshdata: (MeshData)\n \"\"\"\n md = self.pnps.trace.copy()\n md.normalize()\n md = self.get_trace_wl(md, Npts)\n md.axes[0] = md.axes[0][::-1]\n md.data = md.data[::-1, :]\n\n if threshold is not None:\n md.autolimit(threshold=threshold)\n elif wl_lim is not None:\n delay_c, wlc, trace_croped = crop_array_to_axis(md.axes[0], md.axes[1], md.data.T,\n (np.min(md.axes[0]), np.max(md.axes[0]), wl_lim[0], wl_lim[1]))\n wl_lin, data_wl = linspace_this_image(wlc, trace_croped.T, axis=1,\n Npts=Npts)\n md.data = data_wl\n md.axes[1] = wl_lin\n # md.data = trace_croped.T\n # md.axes[1] = wlc\n return md.data, Axis(data=md.axes[1], label=md.labels[1], units=md.units[1]),\\\n Axis(data=md.axes[0], label=md.labels[0], units=md.units[0])\n\n def get_trace_wl(self, md, Npts=512):\n wl = l2w(md.axes[1] * 1e-15) * 1e-9\n wl = wl[::-1]\n md.data = md.data[:, ::-1]\n md.scale(1/wl**2) # conversion has to be scaled by the Jacobian\n wl_lin, data_wl = linspace_this_image(wl, md.data, axis=1,\n Npts=Npts)\n\n md = MeshData(data_wl, *[md.axes[0], wl_lin], uncertainty=md.uncertainty,\n labels=[md.labels[0], 'Wavelength'], units=[md.units[0], 'm'])\n md.normalize()\n return md\n\n def show_trace(self):\n self.update_pnps()\n self.trace_canvas.figure.clf()\n md = self.pnps.trace.copy()\n md.normalize()\n Npts = self.settings.child('plot_settings', 'Npts').value()\n if self.settings.child('plot_settings', 'units').value() == 'nm':\n md = self.get_trace_wl(md, Npts=Npts)\n\n if self.settings.child('plot_settings', 'autolimits').value():\n md.autolimit(threshold=self.settings.child('plot_settings', 'autolim_thresh').value())\n\n if self.settings.child('plot_settings', 'setlimits').value():\n lims = np.array([self.settings.child('plot_settings', 'limit_min').value(),\n self.settings.child('plot_settings', 'limit_max').value()])\n if self.settings.child('plot_settings', 'units').value() == 'nm':\n lims *= 1e-9\n else:\n lims *= 1e15\n delay_c, xc, trace_croped = crop_array_to_axis(md.axes[0], md.axes[1], md.data.T,\n (np.min(md.axes[0]), np.max(md.axes[0]), lims[0],\n lims[1]))\n xlin, data_line = linspace_this_image(xc, trace_croped.T, axis=1, Npts=Npts)\n md.data = data_line\n md.axes[1] = xlin\n\n MeshDataPlot(md, self.trace_canvas.figure)\n self.trace_canvas.draw()\n\n def update_grid(self):\n Nt = self.settings.child('grid_settings', 'npoints').value()\n dt = self.settings.child('grid_settings', 'time_resolution').value() * 1e-15\n wl0 = self.settings.child('grid_settings', 'wl0').value() * 1e-9\n self.ft = FourierTransform(Nt, dt=dt, w0=wl2om(-wl0 - 300e-9))\n\n def update_pnps(self):\n\n pulse = self.update_pulse()\n method = self.settings.child('algo', 'method').value()\n process = self.settings.child('algo', 'nlprocess').value()\n\n if method == 'dscan':\n material = materials[self.settings.child('algo', 'material').value()]\n self.pnps = PNPS(pulse, method, process, material=material)\n parameter = linspace_step(self.settings.child('algo', 'dscan_parameter', 'min').value(),\n self.settings.child('algo', 'dscan_parameter', 'max').value(),\n self.settings.child('algo', 'dscan_parameter', 'step').value())\n parameter *= 1e-3\n elif method == 'miips':\n alpha = self.settings.child('algo', 'alpha').value()\n gamma = self.settings.child('algo', 'gamma').value()\n self.pnps = PNPS(pulse, method, process, alpha=alpha, gamma=gamma)\n parameter = linspace_step(self.settings.child('algo', 'miips_parameter', 'min').value(),\n self.settings.child('algo', 'miips_parameter', 'max').value(),\n self.settings.child('algo', 'miips_parameter', 'step').value())\n else:\n self.pnps = PNPS(pulse, method, process)\n parameter = np.linspace(self.ft.t[-1], self.ft.t[0], len(self.ft.t))\n self.pnps.calculate(pulse.spectrum, parameter)\n self.max_pnps = np.max(self.pnps.Tmn)\n return self.pnps\n\n def update_pulse(self):\n self.update_grid()\n wl0 = self.settings.child('grid_settings', 'wl0').value() * 1e-9\n w0 = convert(wl0, 'wl', 'om')\n pulse = Pulse(self.ft, wl0)\n\n if self.settings.child('pulse_source').value() == 'Simulated':\n fwhm = self.settings.child('pulse_settings', 'fwhm_time').value()\n domega = 4 * np.log(2) / fwhm\n pulse.spectrum = gauss1D(pulse.w, x0=0., dx=domega * 1e15) # x0=0 because the frequency axis is already\n\n if self.settings.child('pulse_settings', 'shaping_type').value() == 'Taylor':\n GD = self.settings.child('pulse_settings', 'taylor_phase','GD').value()\n GDD = self.settings.child('pulse_settings', 'taylor_phase', 'GDD').value()\n TOD = self.settings.child('pulse_settings', 'taylor_phase', 'TOD').value()\n phase = GD * 1e-15 * pulse.w +\\\n GDD * 1e-30 * pulse.w ** 2 / 2 +\\\n TOD * 1e-45 * pulse.w ** 3 / 6\n pulse.spectrum = pulse.spectrum * np.exp(1j * phase)\n elif self.settings.child('pulse_settings', 'shaping_type').value() == 'Gaussian':\n amp = self.settings.child('pulse_settings', 'gaussian_phase', 'gauss_amp').value()\n dtime = self.settings.child('pulse_settings', 'gaussian_phase', 'dtime').value() *1e-15\n phase = amp * gauss1D(pulse.t, 0, dtime)\n pulse.field = pulse.field * np.exp(1j * phase)\n\n Npulses = self.settings.child('pulse_settings', 'npulses').value()\n if Npulses > 1:\n delta_t = self.settings.child('pulse_settings', 'delay_pulses').value()\n spectrum = np.zeros_like(pulse.spectrum)\n for ind in range(Npulses):\n spectrum += 1 / Npulses * pulse.spectrum * np.exp(1j * pulse.w * (-Npulses/2+ind) * delta_t * 1e-15)\n pulse.spectrum = spectrum\n\n # # recenter pulse in time domain\n # idx = np.argmax(pulse.intensity)\n # pulse.spectrum = pulse.spectrum * np.exp(-1j * pulse.t[idx] * (pulse.w - pulse.w0))\n\n else:\n data_path = self.settings.child('pulse_settings', 'data_file_path').value()\n data = np.genfromtxt(data_path, delimiter=',', skip_header=1)\n in_wl, in_int, in_phase = (data[:, i] for i in range(3))\n\n in_int = np.interp(pulse.wl, in_wl * 1e-9, np.maximum(0, in_int), left=0, right=0)\n in_phase = np.interp(pulse.wl, in_wl * 1e-9, in_phase, left=0, right=0)\n pulse.spectrum = in_int * np.exp(1j * in_phase)\n\n self.pulse = pulse\n return pulse\n\n\ndef main():\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n win = QtWidgets.QWidget()\n win.setWindowTitle('PyMoDAQ Femto Simulator')\n prog = Simulator(win, show_ui=True)\n\n win.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.log",
"numpy.maximum",
"numpy.min",
"numpy.genfromtxt",
"numpy.max",
"numpy.zeros_like",
"numpy.interp",
"numpy.exp"
]
]
|
DarioHett/entsoe-client | [
"bb424fa54966d3be49daa1edb9e0fd40ed00ac15"
]
| [
"entsoe_client/Parsers/Parser.py"
]
| [
"from io import BytesIO\nfrom typing import List\nfrom zipfile import ZipFile\n\nimport pandas as pd\nimport requests\nfrom lxml import etree, objectify\n\nfrom entsoe_client.Parsers.Balacing_MarketDocument_Parser import (\n Balancing_MarketDocument_Parser,\n Balancing_MarketDocument_FinancialExpensesAndIncomeForBalancing_Parser,\n)\nfrom entsoe_client.Parsers.GL_MarketDocument_Parser import GL_MarketDocument_Parser\nfrom entsoe_client.Parsers.Publication_MarketDocument_Parser import (\n Publication_MarketDocument_Parser,\n)\nfrom entsoe_client.Parsers.TransmissionNetwork_MarketDocument_Parser import (\n TransmissionNetwork_MarketDocument_Parser,\n)\n\n\nclass Parser:\n @staticmethod\n def parse(response: requests.Response):\n response_type = response.headers[\"Content-Type\"]\n content = response.content\n if response_type == \"text/xml\":\n parser = XMLParser()\n elif response_type == \"application/zip\":\n parser = ZipParser()\n else:\n raise NotImplementedError\n df = parser.parse(content)\n return df\n\n def __call__(self, response: requests.Response):\n return self.parse(response)\n\n\nclass ZipParser:\n @staticmethod\n def unpack_archive(response_content: bytes) -> List[bytes]:\n archive = ZipFile(BytesIO(response_content), \"r\")\n xml_document_list = [archive.read(file) for file in archive.infolist()]\n return xml_document_list\n\n def parse(self, zip_archive: bytes):\n parser = XMLParser()\n xml_documents = self.unpack_archive(zip_archive)\n dfs = [parser.parse(xml_document) for xml_document in xml_documents]\n df = pd.concat(dfs, axis=0)\n return df\n\n\nclass XMLParser:\n @staticmethod\n def deserialize_xml(response_content: bytes) -> objectify.ObjectifiedElement:\n objectified_xml = objectify.fromstring(response_content)\n for elem in objectified_xml.getiterator():\n elem.tag = etree.QName(elem).localname\n etree.cleanup_namespaces(objectified_xml)\n return objectified_xml\n\n def parse(self, xml_document: bytes):\n object_content = self.deserialize_xml(xml_document)\n parser = factory.get_parser(object_content.tag, object_content.type.text)\n parser.set_objectified_input_xml(object_content)\n return parser.parse()\n\n\nclass ParserFactory:\n @staticmethod\n def get_parser(tag: str, document_type: str):\n if tag in [\"GL_MarketDocument\"]:\n if document_type in [\"A65\", \"A70\"]: # Load\n return GL_MarketDocument_Parser()\n elif document_type in [\n \"A71\",\n \"A72\",\n \"A73\",\n \"A68\",\n \"A69\",\n \"A74\",\n \"A75\",\n ]: # Generation\n return GL_MarketDocument_Parser()\n else:\n raise ValueError(document_type)\n elif tag in [\"TransmissionNetwork_MarketDocument\"]:\n if document_type in [\"A90\", \"A63\", \"A91\", \"A92\"]:\n return TransmissionNetwork_MarketDocument_Parser()\n else:\n raise ValueError(document_type)\n elif tag in [\"Publication_MarketDocument\"]:\n if document_type in [\n \"A61\",\n \"A31\",\n \"A93\",\n \"A25\",\n \"A26\",\n \"A44\",\n \"A09\",\n \"A11\",\n \"A94\",\n ]:\n return Publication_MarketDocument_Parser()\n else:\n raise ValueError(document_type)\n elif tag in [\"Balancing_MarketDocument\"]:\n if document_type in [\n \"A81\",\n \"A82\",\n \"A83\",\n \"A84\",\n \"A88\",\n \"A89\",\n ]: # XML Responses\n return Balancing_MarketDocument_Parser()\n elif document_type in [\"A85\", \"A86\"]: # Zip Responses\n return Balancing_MarketDocument_Parser()\n elif document_type in [\"A87\"]: # Special \"Point\" Type.\n return (\n Balancing_MarketDocument_FinancialExpensesAndIncomeForBalancing_Parser()\n )\n else:\n raise ValueError(document_type)\n else:\n raise NotImplementedError(tag)\n\n\nfactory = ParserFactory()\n"
]
| [
[
"pandas.concat"
]
]
|
ProtikAcharjay/Python-ML | [
"27b4a010c463c13e42ac8109bfa587fc5fd15374"
]
| [
"Mean_Median_Mode.py"
]
| [
"import numpy\r\nimport statistics\r\n\r\ndataset1= [1,3,8,9,11,15,17]\r\n\r\n#Mean\r\nmean1= numpy.mean(dataset1)\r\nprint(\"Mean using numpy: \", mean1)\r\nmean2= statistics.mean(dataset1)\r\nprint(\"Mean using statistics: \", mean2)\r\n\r\n#Median\r\nmedian1= numpy.median(dataset1)\r\nprint(\"Median using numpy: \", median1)\r\nmedian2= statistics.median(dataset1)\r\nprint(\"Median using statistics: \", median2)\r\n\r\n#We can't get mode using numpy\r\n#There are statistics and scipy.stats library for doing that\r\n#Mode\r\nimport scipy.stats\r\n#statistics is already imported before\r\ndataset2= [1,5,7,4,6,9,8,7,2,4,6,22,7,7,8,3,7]\r\nmode1= statistics.mode(dataset2)\r\nprint(\"Mode using statistics: \", mode1)\r\nmode2= scipy.stats.mode(dataset2)\r\nprint(\"Mode using Scipy.stats: \", mode2)\r\n#using scipy.stats will also show how much frequency the mode have\r\n"
]
| [
[
"numpy.median",
"numpy.mean"
]
]
|
frzfrsfra4/phylanx | [
"001fe7081f3a24e56157cdb21b2d126b8953ff5d"
]
| [
"tests/unit/python/execution_tree/parallel.py"
]
| [
"# Copyright (c) 2017-2018 Hartmut Kaiser\n# Copyright (c) 2018 Steven R. Brandt\n# Copyright (c) 2018 R. Tohid\n#\n# Distributed under the Boost Software License, Version 1.0. (See accompanying\n# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\nimport numpy as np\nfrom phylanx import Phylanx, parallel\n\n\ndef fib0(n):\n if n < 2:\n return n\n return fib0(n - 1) + fib0(n - 2)\n\n\n@Phylanx\ndef fib_(n, a, b):\n if n < 2:\n return n\n elif n < 12:\n a[n] = fib_(n - 1, a, b)\n b[n] = fib_(n - 2, a, b)\n return a[n] + b[n]\n else:\n with parallel:\n a[n] = fib_(n - 1, a, b)\n b[n] = fib_(n - 2, a, b)\n return a[n] + b[n]\n\n\ndef fib(n):\n return fib_(n, np.zeros(n + 1), np.zeros(n + 1))\n\n\nn = 15\nassert fib(n) == fib0(n)\n"
]
| [
[
"numpy.zeros"
]
]
|
Mnsy-Syl/new_20201103 | [
"9ee39f1c69a4cba896b30f007560fcbe8ac89c02"
]
| [
"networks/deeplab_xception.py"
]
| [
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\n\n\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):\n super(SeparableConv2d, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,\n groups=inplanes, bias=bias)\n self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pointwise(x)\n return x\n\n\ndef fixed_padding(inputs, kernel_size, rate):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))\n return padded_inputs\n\n\nclass SeparableConv2d_same(nn.Module):\n def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):\n super(SeparableConv2d_same, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,\n groups=inplanes, bias=bias)\n self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = fixed_padding(x, self.conv1.kernel_size[0], rate=self.conv1.dilation[0])\n x = self.conv1(x)\n x = self.pointwise(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):\n super(Block, self).__init__()\n\n if planes != inplanes or stride != 1:\n self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)\n self.skipbn = nn.BatchNorm2d(planes)\n else:\n self.skip = None\n\n self.relu = nn.ReLU(inplace=True)\n rep = []\n\n filters = inplanes\n if grow_first:\n rep.append(self.relu)\n rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(planes))\n filters = planes\n\n for i in range(reps - 1):\n rep.append(self.relu)\n rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(filters))\n\n if not grow_first:\n rep.append(self.relu)\n rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(planes))\n\n if not start_with_relu:\n rep = rep[1:]\n\n if stride != 1:\n rep.append(SeparableConv2d_same(planes, planes, 3, stride=2))\n\n if stride == 1 and is_last:\n rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))\n\n\n self.rep = nn.Sequential(*rep)\n\n def forward(self, inp):\n x = self.rep(inp)\n\n if self.skip is not None:\n skip = self.skip(inp)\n skip = self.skipbn(skip)\n else:\n skip = inp\n\n x += skip\n\n return x\n\n\nclass Xception(nn.Module):\n \"\"\"\n Modified Alighed Xception\n \"\"\"\n def __init__(self, inplanes=3, os=16, pretrained=False):\n super(Xception, self).__init__()\n\n if os == 16:\n entry_block3_stride = 2\n middle_block_rate = 1\n exit_block_rates = (1, 2)\n elif os == 8:\n entry_block3_stride = 1\n middle_block_rate = 2\n exit_block_rates = (2, 4)\n else:\n raise NotImplementedError\n\n\n # Entry flow\n self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(64)\n\n self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)\n self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)\n self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True,\n is_last=True)\n\n # Middle flow\n self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)\n\n # Exit flow\n self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0],\n start_with_relu=True, grow_first=False, is_last=True)\n\n self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_rates[1])\n self.bn3 = nn.BatchNorm2d(1536)\n\n self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_rates[1])\n self.bn4 = nn.BatchNorm2d(1536)\n\n self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_rates[1])\n self.bn5 = nn.BatchNorm2d(2048)\n\n # Init weights\n self.__init_weight()\n\n # Load pretrained model\n if pretrained:\n self.__load_xception_pretrained()\n\n def forward(self, x):\n # Entry flow\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.block1(x)\n low_level_feat = x\n x = self.block2(x)\n x = self.block3(x)\n\n # Middle flow\n x = self.block4(x)\n x = self.block5(x)\n x = self.block6(x)\n x = self.block7(x)\n x = self.block8(x)\n x = self.block9(x)\n x = self.block10(x)\n x = self.block11(x)\n x = self.block12(x)\n x = self.block13(x)\n x = self.block14(x)\n x = self.block15(x)\n x = self.block16(x)\n x = self.block17(x)\n x = self.block18(x)\n x = self.block19(x)\n\n # Exit flow\n x = self.block20(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.relu(x)\n\n x = self.conv5(x)\n x = self.bn5(x)\n x = self.relu(x)\n\n return x, low_level_feat\n\n def __init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def __load_xception_pretrained(self):\n pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')\n model_dict = {}\n state_dict = self.state_dict()\n\n for k, v in pretrain_dict.items():\n print(k)\n if k in state_dict:\n if 'pointwise' in k:\n v = v.unsqueeze(-1).unsqueeze(-1)\n if k.startswith('block12'):\n model_dict[k.replace('block12', 'block20')] = v\n elif k.startswith('block11'):\n model_dict[k.replace('block11', 'block12')] = v\n elif k.startswith('conv3'):\n model_dict[k] = v\n elif k.startswith('bn3'):\n model_dict[k] = v\n model_dict[k.replace('bn3', 'bn4')] = v\n elif k.startswith('conv4'):\n model_dict[k.replace('conv4', 'conv5')] = v\n elif k.startswith('bn4'):\n model_dict[k.replace('bn4', 'bn5')] = v\n else:\n model_dict[k] = v\n state_dict.update(model_dict)\n self.load_state_dict(state_dict)\n\nclass ASPP_module(nn.Module):\n def __init__(self, inplanes, planes, rate):\n super(ASPP_module, self).__init__()\n if rate == 1:\n kernel_size = 1\n padding = 0\n else:\n kernel_size = 3\n padding = rate\n self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=rate, bias=False)\n self.bn = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU()\n\n self.__init_weight()\n\n def forward(self, x):\n x = self.atrous_convolution(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def __init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nclass DeepLabv3_plus(nn.Module):\n def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):\n if _print:\n print(\"Constructing DeepLabv3+ model...\")\n print(\"Number of classes: {}\".format(n_classes))\n print(\"Output stride: {}\".format(os))\n print(\"Number of Input Channels: {}\".format(nInputChannels))\n super(DeepLabv3_plus, self).__init__()\n\n # Atrous Conv\n self.xception_features = Xception(nInputChannels, os, pretrained)\n\n # ASPP\n if os == 16:\n rates = [1, 6, 12, 18]\n elif os == 8:\n rates = [1, 12, 24, 36]\n else:\n raise NotImplementedError\n\n self.aspp1 = ASPP_module(2048, 256, rate=rates[0])\n self.aspp2 = ASPP_module(2048, 256, rate=rates[1])\n self.aspp3 = ASPP_module(2048, 256, rate=rates[2])\n self.aspp4 = ASPP_module(2048, 256, rate=rates[3])\n\n self.relu = nn.ReLU()\n\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(2048, 256, 1, stride=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU())\n\n self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(256)\n\n # adopt [1x1, 48] for channel reduction.\n self.conv2 = nn.Conv2d(128, 48, 1, bias=False)\n self.bn2 = nn.BatchNorm2d(48)\n\n self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, n_classes, kernel_size=1, stride=1))\n\n def forward(self, input):\n x, low_level_features = self.xception_features(input)\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n x5 = self.global_avg_pool(x)\n x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = F.upsample(x, size=(int(math.ceil(input.size()[-2]/4)),\n int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)\n\n low_level_features = self.conv2(low_level_features)\n low_level_features = self.bn2(low_level_features)\n low_level_features = self.relu(low_level_features)\n\n\n x = torch.cat((x, low_level_features), dim=1)\n x = self.last_conv(x)\n x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)\n\n return x\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def __init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n # torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\ndef get_1x_lr_params(model):\n \"\"\"\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n \"\"\"\n b = [model.xception_features]\n for i in range(len(b)):\n for k in b[i].parameters():\n if k.requires_grad:\n yield k\n\n\ndef get_10x_lr_params(model):\n \"\"\"\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n \"\"\"\n b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]\n for j in range(len(b)):\n for k in b[j].parameters():\n if k.requires_grad:\n yield k\n\n\nif __name__ == \"__main__\":\n model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=True, _print=True)\n model.eval()\n image = torch.randn(1, 3, 512, 512)\n with torch.no_grad():\n output = model.forward(image)\n print(output.size())\n\n\n\n"
]
| [
[
"torch.nn.Sequential",
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.functional.pad",
"torch.nn.init.kaiming_normal_"
]
]
|
Sleepychord/cogdata | [
"529892512b11bac3e765490e46876bea88c14179"
]
| [
"cogdata/utils/cogview/vqvae_zc.py"
]
| [
"import warnings\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n# import distributed as dist_fn\n\n# Copyright 2018 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\n# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch\n\n\nclass Quantize(nn.Module):\n def __init__(self, dim, n_embed, decay=0.99, eps=1e-5):\n super().__init__()\n\n self.dim = dim\n self.n_embed = n_embed\n self.decay = decay\n self.eps = eps\n\n embed = torch.randn(dim, n_embed)\n torch.nn.init.xavier_uniform_(\n embed, gain=torch.nn.init.calculate_gain('tanh'))\n self.register_buffer(\"embed\", embed)\n self.register_buffer(\"cluster_size\", torch.zeros(n_embed))\n self.register_buffer(\"embed_avg\", embed.clone())\n\n def forward_(self, input, continuous_relax=False, temperature=1., hard=False):\n flatten = input.reshape(-1, self.dim)\n dist = (\n flatten.pow(2).sum(1, keepdim=True)\n - 2 * flatten @ self.embed\n + self.embed.pow(2).sum(0, keepdim=True)\n ) # dist map, shape=[*, n_embed]\n\n if not continuous_relax:\n # argmax + lookup\n _, embed_ind = (-dist).max(1)\n embed_onehot = F.one_hot(\n embed_ind, self.n_embed).type(flatten.dtype)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n elif not hard:\n # gumbel softmax weighted sum\n embed_soft, embed_ind = gumbel_softmax(\n -dist, tau=temperature, hard=False)\n embed_ind = embed_ind.view(*input.shape[:-1])\n embed_soft = embed_soft.view(*input.shape[:-1], self.n_embed)\n quantize = embed_soft @ self.embed.transpose(0, 1)\n else:\n # gumbel softmax hard lookup\n embed_onehot, embed_ind = gumbel_softmax(\n -dist, tau=temperature, hard=True)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n\n if self.training and ((continuous_relax and hard) or (not continuous_relax)):\n embed_onehot_sum = embed_onehot.sum(0)\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\n\n dist_fn.all_reduce(embed_onehot_sum)\n dist_fn.all_reduce(embed_sum)\n\n self.cluster_size.data.mul_(self.decay).add_(\n embed_onehot_sum, alpha=1 - self.decay\n )\n self.embed_avg.data.mul_(self.decay).add_(\n embed_sum, alpha=1 - self.decay)\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) /\n (n + self.n_embed * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.embed.data.copy_(embed_normalized)\n if not continuous_relax:\n diff = (quantize.detach() - input).pow(2).mean()\n quantize = input + (quantize - input).detach()\n else:\n # maybe need replace a KL term here\n qy = (-dist).softmax(-1)\n diff = torch.sum(\n qy * torch.log(qy * self.n_embed + 1e-20), dim=-1).mean() # KL\n # diff = (quantize - input).pow(2).mean().detach() # gumbel softmax do not need diff\n quantize = quantize.to(memory_format=torch.channels_last)\n return quantize, diff, embed_ind\n\n def embed_code(self, embed_id):\n return F.embedding(embed_id, self.embed.transpose(0, 1))\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channel, channel, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride, embed_dim, n_embed, simple):\n super().__init__()\n\n if stride == 6:\n if simple:\n blocks = [\n nn.Conv2d(in_channel, channel, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 4, stride=2, padding=1),\n ]\n else:\n blocks = [\n nn.Conv2d(in_channel, channel // 4,\n 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 4, channel //\n 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),\n ]\n\n elif stride == 4:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 3, padding=1),\n ]\n\n elif stride == 2:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 3, padding=1),\n ]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n blocks.append(nn.Conv2d(channel, embed_dim, 1))\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input).permute(0, 2, 3, 1)\n\n\nclass Decoder(nn.Module):\n def __init__(\n self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride, simple\n ):\n super().__init__()\n blocks = [\n nn.ConvTranspose2d(in_channel, channel, 4, stride=2, padding=1),\n ]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n if stride == 4 and simple:\n blocks.extend(\n [\n nn.ConvTranspose2d(channel, channel, 4,\n stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(\n channel, channel, 4, stride=2, padding=1\n ),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, out_channel, 1)\n ]\n )\n elif stride == 4:\n blocks.extend(\n [\n nn.ConvTranspose2d(channel, channel, 4,\n stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(channel, channel // 2, 1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(\n channel // 2, out_channel, 4, stride=2, padding=1\n ),\n ]\n )\n\n elif stride == 2:\n blocks.append(\n nn.ConvTranspose2d(channel, out_channel,\n 4, stride=2, padding=1)\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass VQVAE(nn.Module):\n def __init__(\n self,\n in_channel=3,\n channel=128,\n n_res_block=2,\n n_res_channel=32,\n embed_dim=64,\n n_embed=1024,\n stride=4,\n simple=True,\n decay=0.99\n ):\n super().__init__()\n if channel == 2048:\n n_res_block = 0\n self.enc_b = Encoder(in_channel, channel, n_res_block,\n n_res_channel, stride, embed_dim, n_embed, simple)\n self.quantize_t = Quantize(embed_dim, n_embed)\n self.dec = Decoder(\n in_channel=embed_dim,\n out_channel=in_channel,\n channel=channel,\n n_res_block=n_res_block,\n n_res_channel=n_res_channel,\n stride=stride-2,\n simple=simple\n )\n\n def forward(self, input, continuous_relax=False, temperature=1., hard=False, KL=False):\n quant_t, diff, _, = self.encode(\n input, continuous_relax, temperature, hard, KL)\n dec = self.dec(quant_t)\n\n return dec, diff\n\n def encode(self, input, continuous_relax=False, temperature=1., hard=False, KL=False):\n logits = self.enc_b(input)\n quant_t, diff_t, id_t = self.quantize_t.forward_(\n logits, continuous_relax, temperature, hard)\n quant_t = quant_t.permute(0, 3, 1, 2)\n if not continuous_relax or KL:\n diff_t = diff_t.unsqueeze(0)\n else:\n diff_t = torch.zeros_like(diff_t).unsqueeze(\n 0) # placeholder to return right shape\n return quant_t, diff_t, id_t\n\n def decode(self, code):\n return self.dec(code)\n\n def decode_code(self, code_t):\n quant_t = self.quantize_t.embed_code(code_t)\n quant_t = quant_t.permute(0, 3, 1, 2)\n dec = self.dec(quant_t)\n\n return dec\n quant_t, diff_t, id_t = self.quant\n\n\ntry:\n from torch.overrides import has_torch_function, handle_torch_function\nexcept ImportError as e:\n from torch._overrides import has_torch_function, handle_torch_function\n\n\nTensor = torch.Tensor\n\n\ndef gumbel_softmax(logits, tau=1, hard=False, eps=1e-10, dim=-1):\n # type: (Tensor, float, bool, float, int) -> Tensor\n r\"\"\"\n Samples from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretizes.\n\n Args:\n logits: `[..., num_features]` unnormalized log probabilities\n tau: non-negative scalar temperature\n hard: if ``True``, the returned samples will be discretized as one-hot vectors,\n but will be differentiated as if it is the soft sample in autograd\n dim (int): A dimension along which softmax will be computed. Default: -1.\n\n Returns:\n Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.\n If ``hard=True``, the returned samples will be one-hot, otherwise they will\n be probability distributions that sum to 1 across `dim`.\n\n .. note::\n This function is here for legacy reasons, may be removed from nn.Functional in the future.\n\n .. note::\n The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft`\n\n It achieves two things:\n - makes the output value exactly one-hot\n (since we add then subtract y_soft value)\n - makes the gradient equal to y_soft gradient\n (since we strip all other gradients)\n\n Examples::\n >>> logits = torch.randn(20, 32)\n >>> # Sample soft categorical using reparametrization trick:\n >>> F.gumbel_softmax(logits, tau=1, hard=False)\n >>> # Sample hard categorical using \"Straight-through\" trick:\n >>> F.gumbel_softmax(logits, tau=1, hard=True)\n\n .. _Link 1:\n https://arxiv.org/abs/1611.00712\n .. _Link 2:\n https://arxiv.org/abs/1611.01144\n \"\"\"\n if not torch.jit.is_scripting():\n if type(logits) is not Tensor and has_torch_function((logits,)):\n return handle_torch_function(\n gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim)\n if eps != 1e-10:\n warnings.warn(\"`eps` parameter is deprecated and has no effect.\")\n\n # ~Gumbel(0,1)\n gumbels = - \\\n torch.empty_like(\n logits, memory_format=torch.legacy_contiguous_format).exponential_().log()\n gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)\n y_soft = gumbels.softmax(dim)\n\n if hard:\n # Straight through.\n index = y_soft.max(dim, keepdim=True)[1]\n y_hard = torch.zeros_like(\n logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)\n ret = y_hard - y_soft.detach() + y_soft\n return ret, index\n else:\n # Reparametrization trick.\n ret = y_soft\n index = y_soft.max(dim, keepdim=True)[1]\n return ret, index\n"
]
| [
[
"torch.nn.Sequential",
"torch.nn.init.calculate_gain",
"torch.empty_like",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.randn",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch._overrides.handle_torch_function",
"torch._overrides.has_torch_function",
"torch.log",
"torch.jit.is_scripting",
"torch.nn.functional.one_hot",
"torch.nn.ReLU"
]
]
|
x1155665/VGG16-Visualizer | [
"d2febaefb7cb5c0bf759880cf20a97141a8c5070"
]
| [
"CNN_Vis_Demo_Model.py"
]
| [
"import numpy as np\nfrom PyQt5.QtCore import Qt, pyqtSignal, QObject\nfrom PyQt5.QtGui import QPixmap\nimport os, sys\nimport cv2\nfrom enum import Enum\nimport time\nfrom Settings import Settings\n\n\n# TODO: Improve the notification mechanism between model and view\nclass CNN_Vis_Demo_Model(QObject):\n # These indices are used to notify the view about the changes\n data_idx_model_names = 0\n data_idx_layer_names = 1\n data_idx_layer_output_sizes = 2\n data_idx_layer_activation = 3\n data_idx_probs = 4\n data_idx_input_image_names = 5\n data_idx_input_image = 6\n data_idx_labels = 7\n data_idx_new_input = 128\n data_idx_input_image_path = 8\n\n dataChanged = pyqtSignal(int)\n\n settings = None\n\n class BackpropModeOption(Enum):\n GRADIENT = 'Gradient'\n ZF = 'ZF Deconv'\n GUIDED = 'Guided Backprop'\n\n def __init__(self):\n super(QObject, self).__init__()\n self.settings = Settings() # Read settings from files\n\n self.caffevis_caffe_root = self.settings.caffevis_caffe_root\n sys.path.insert(0, os.path.join(self.caffevis_caffe_root, 'python'))\n import caffe\n if self.settings.use_GPU:\n caffe.set_mode_gpu()\n caffe.set_device(self.settings.gpu_id)\n print('Loaded caffe in GPU mode, using device', self.settings.gpu_id)\n else:\n caffe.set_mode_cpu()\n print('Loaded caffe in CPU mode')\n self.camera_id = self.settings.camera_id\n self.cap = cv2.VideoCapture(self.camera_id)\n\n self._layer_list = [] # to be read from prototxt\n self._layer_output_sizes = {} # to be read from prototxt\n\n self.online = False # indicates if the network has finished classifying an image\n\n def set_model(self, model_name):\n \"\"\"\n set the network model\n :param model_name:\n :return:\n \"\"\"\n if self.settings.model_names.__contains__(model_name):\n self.settings.load_settings(model_name)\n self.online = False\n self.load_net(model_name)\n\n def load_net(self, model_name):\n sys.path.insert(0, os.path.join(self.caffevis_caffe_root, 'python'))\n import caffe\n\n self._model_name = model_name\n self._model_def = self.settings.prototxt\n self._model_weights = self.settings.network_weights\n self._labels = np.loadtxt(self.settings.label_file, str, delimiter='\\n')\n\n processed_prototxt = self._process_network_proto(self._model_def) # enable deconvolution\n self._net = caffe.Classifier(processed_prototxt, self._model_weights, mean=self.settings.mean, raw_scale=255.0,\n channel_swap=self.settings.channel_swap)\n current_input_shape = self._net.blobs[self._net.inputs[0]].shape\n current_input_shape[0] = 1\n self._net.blobs[self._net.inputs[0]].reshape(*current_input_shape)\n self._net.reshape()\n self._get_layers_info()\n self.dataChanged.emit(self.data_idx_layer_names)\n\n # get the names of demo-images\n self._input_image_names = [image_name for image_name in os.listdir(self.settings.input_image_path)]\n self.dataChanged.emit(self.data_idx_input_image_names)\n\n self._transformer = caffe.io.Transformer(\n {self._data_blob_name: self._net.blobs[self._data_blob_name].data.shape})\n self._transformer.set_transpose(self._data_blob_name, (2, 0, 1)) # move image channels to outermost dimension\n self._transformer.set_mean(self._data_blob_name,\n self.settings.mean) # subtract the dataset-mean value in each channel\n self._transformer.set_raw_scale(self._data_blob_name, 255) # rescale from [0, 1] to [0, 255]\n self._transformer.set_channel_swap(self._data_blob_name, self.settings.channel_swap)\n\n def set_input_and_forward(self, input_image_name, video=False):\n \"\"\"\n use static image file or camera as input to forward the network.\n View will be informed to resfresh the content.\n If video is set, input_image_name will be ignored.\n :param input_image_name: The file name of the local image file\n :param video: set True to use camera as input\n \"\"\"\n sys.path.insert(0, os.path.join(self.caffevis_caffe_root, 'python'))\n import caffe\n\n def _forward_image(_image):\n input_image = caffe.io.resize(_image, self._input_dims, mode='constant', cval=0)\n self._input_image = (input_image * 255).astype(np.uint8)\n transformed_image = self._transformer.preprocess(self._data_blob_name, input_image)\n self._net.blobs[self._data_blob_name].data[...] = transformed_image\n self._net.forward()\n self.online = True\n self.dataChanged.emit(self.data_idx_new_input)\n\n def _square(_image):\n \"\"\"\n adjust image dimensions so that the image will be expanded to the largest side padding order: top, bottom, left, right\n :param _image: image to be processed\n :return: processed image\n \"\"\"\n [height, width, _] = _image.shape\n # icon portrait mode\n if width < height:\n pad_size = height - width\n if pad_size % 2 == 0:\n icon_squared = cv2.copyMakeBorder(_image, 0, 0, pad_size / 2, pad_size / 2, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n else:\n icon_squared = cv2.copyMakeBorder(_image, 0, 0, pad_size / 2 + 1, pad_size / 2, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n return icon_squared\n # icon landscape mode\n elif height < width:\n pad_size = width - height\n if pad_size % 2 == 0:\n # top, bottom, left, right\n icon_squared = cv2.copyMakeBorder(_image, pad_size / 2, pad_size / 2, 0, 0, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n else:\n icon_squared = cv2.copyMakeBorder(_image, pad_size / 2 + 1, pad_size / 2, 0, 0, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n return icon_squared\n elif height == width:\n return _image\n\n def _crop_max_square(_image):\n \"\"\"\n crop the biggest square at the center of a image\n :param _image: image to be processed\n :return: processed image\n \"\"\"\n h, w, c = _image.shape\n l = min(h, w)\n if (h + l) % 2 != 0:\n _image = _image[(h - l + 1) / 2:(h + l + 1) / 2, :, :]\n elif (w + l) % 2 != 0:\n _image = _image[:, (w - l + 1) / 2:(w + l + 1) / 2, :]\n else:\n _image = _image[(h - l) / 2:(h + l) / 2, (w - l) / 2:(w + l) / 2, :]\n return _image\n\n if video:\n ret, frame = self.cap.read()\n squared_image = _crop_max_square(frame)\n _forward_image(cv2.flip(squared_image[:, :, (2, 1, 0)], 1)) # RGB\n else:\n if self._input_image_names.__contains__(input_image_name):\n self._input_image_path = os.path.join(self.settings.input_image_path, input_image_name)\n image = caffe.io.load_image(self._input_image_path) # RGB\n image = _square(image)\n _forward_image(image)\n\n def get_data(self, data_idx):\n \"\"\"\n Use the data index to get the data.\n The intend was to add control logic in access. But, this seems to be useless.\n :param data_idx:\n :return: Desired data\n \"\"\"\n if data_idx == self.data_idx_model_names:\n return self.settings.model_names\n elif data_idx == self.data_idx_layer_names:\n return self._layer_list\n elif data_idx == self.data_idx_layer_output_sizes:\n return self._layer_output_sizes\n elif data_idx == self.data_idx_probs:\n return self._net.blobs[self._props_blob_name].data.flatten()\n elif data_idx == self.data_idx_input_image_names:\n return self._input_image_names\n elif data_idx == self.data_idx_labels:\n return self._labels\n elif data_idx == self.data_idx_input_image_path:\n return self._input_image_path\n elif data_idx == self.data_idx_input_image:\n return self._input_image\n\n def get_activations(self, layer_name):\n \"\"\"\n Get all the activations of one layer\n :param layer_name:\n :return: activations (N, H, W)\n \"\"\"\n if self.online and self._layer_list.__contains__(layer_name):\n activations = self._net.blobs[layer_name].data[0]\n return activations\n\n def get_activation(self, layer_name, unit_index):\n \"\"\"\n Get the activation of a neuron\n :param layer_name:\n :return: activations (H, W)\n \"\"\"\n if self.online and self._layer_list.__contains__(layer_name) and unit_index < \\\n self._layer_output_sizes[layer_name][0]:\n activation = self._net.blobs[layer_name].data[0][unit_index]\n return activation\n\n def get_top_k_images_of_unit(self, layer_name, unit_index, k, get_deconv):\n \"\"\"\n Get k images with highest acivation to one certain neuron.\n :param layer_name:\n :param unit_index:\n :param k:\n :param get_deconv: Get the deconv results of the top images\n :return: Desired top k images\n \"\"\"\n if self.online and self.settings.deepvis_outputs_path and self._layer_list.__contains__(layer_name) \\\n and unit_index < self._layer_output_sizes[layer_name][0]:\n unit_dir = os.path.join(self.settings.deepvis_outputs_path, layer_name, 'unit_%04d' % unit_index)\n assert k <= 9\n if get_deconv:\n type = 'deconvnorm'\n else:\n type = 'maxim'\n pixmaps = []\n for i in range(k):\n file_name = '%s_%03d.png' % (type, i)\n file_path = os.path.join(unit_dir, file_name)\n if os.path.exists(file_path):\n pixmaps.append(QPixmap(file_path))\n else:\n print(file_path + \" not exists.\")\n return pixmaps\n\n def get_top_1_images_of_layer(self, layer_name):\n \"\"\"\n Get the top 1 images of all units in one layers\n :param layer_name:\n :return: The images with highest activations to the units\n \"\"\"\n if self.online and self.settings.deepvis_outputs_path and self._layer_list.__contains__(layer_name):\n channel_number = self._layer_output_sizes[layer_name][0]\n pixmaps = []\n for unit_index in range(channel_number):\n unit_dir = os.path.join(self.settings.deepvis_outputs_path, layer_name, 'unit_%04d' % unit_index)\n file_name = 'maxim_000.png'\n file_path = os.path.join(unit_dir, file_name)\n pixmaps.append(QPixmap(file_path))\n return pixmaps\n\n def get_deconv(self, layer_name, unit_index, backprop_mode):\n \"\"\"\n Compute the backprop/deconv of one unit\n :param layer_name:\n :param unit_index:\n :param backprop_mode: Avaliable options: self.BackpropModeOption\n :return: result\n \"\"\"\n diffs = self._net.blobs[layer_name].diff[0]\n diffs = diffs * 0\n data = self._net.blobs[layer_name].data[0]\n diffs[unit_index] = data[unit_index]\n diffs = np.expand_dims(diffs, 0) # add batch dimension\n layer_name = str(layer_name)\n\n if backprop_mode == self.BackpropModeOption.GRADIENT.value:\n result = self._net.backward_from_layer(layer_name, diffs, zero_higher=True)\n elif backprop_mode == self.BackpropModeOption.ZF.value:\n result = self._net.deconv_from_layer(layer_name, diffs, zero_higher=True, deconv_type='Zeiler & Fergus')\n elif backprop_mode == self.BackpropModeOption.GUIDED.value:\n result = self._net.deconv_from_layer(layer_name, diffs, zero_higher=True, deconv_type='Guided Backprop')\n else:\n result = None\n if result is not None:\n result = np.transpose(result[self._net.inputs[0]][0], (1, 2, 0))\n return result\n\n def _process_network_proto(self, prototxt):\n processed_prototxt = prototxt + \".processed_by_deepvis\"\n\n # check if force_backwards is missing\n found_force_backwards = False\n with open(prototxt, 'r') as proto_file:\n for line in proto_file:\n fields = line.strip().split()\n if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':\n found_force_backwards = True\n break\n\n # write file, adding force_backward if needed\n with open(prototxt, 'r') as proto_file:\n with open(processed_prototxt, 'w') as new_proto_file:\n if not found_force_backwards:\n new_proto_file.write('force_backward: true\\n')\n for line in proto_file:\n new_proto_file.write(line)\n\n # run upgrade tool on new file name (same output file)\n upgrade_tool_command_line = self.caffevis_caffe_root + '/build/tools/upgrade_net_proto_text.bin ' + processed_prototxt + ' ' + processed_prototxt\n os.system(upgrade_tool_command_line)\n\n return processed_prototxt\n\n def switch_camera(self, on):\n if on:\n self.cap.open(self.camera_id)\n else:\n self.cap.release()\n\n def _get_layers_info(self):\n \"\"\"\n Get the layer names / output sizes / 'data' blob name / input dimension\n :return:\n \"\"\"\n self._layer_list = []\n self._layer_output_sizes = {}\n # go over layers\n all_layer_list = list(self._net._layer_names)\n total_layer_number = len(all_layer_list)\n for idx in range(total_layer_number):\n layer_name = all_layer_list[idx]\n # skip input, output and inplace layers. eg. relu\n if idx == 0 or idx == total_layer_number - 1 or (\n len(self._net.top_names[layer_name]) == 1 and len(self._net.bottom_names[layer_name]) == 1 and\n self._net.top_names[layer_name][0] == self._net.bottom_names[layer_name][0]):\n continue\n\n self._layer_list.append(layer_name)\n\n # get layer output size\n top_shape = self._net.blobs[layer_name].data[0].shape\n\n self._layer_output_sizes.update({layer_name: top_shape})\n\n # get data blob name\n self._data_blob_name = self._net.top_names[all_layer_list[0]][0]\n # get input dims\n self._input_dims = self._net.blobs[self._data_blob_name].data.shape[2:4]\n # get prob blob name\n self._props_blob_name = self._net.top_names[all_layer_list[-1]][0]\n"
]
| [
[
"numpy.expand_dims",
"numpy.loadtxt",
"numpy.transpose"
]
]
|
hendrycks/error-detection | [
"276d605bfa9a9bd7701bd88937c537c3fcab94cf"
]
| [
"ASR/CTC/CTC_eval.py"
]
| [
"from tensorflow.python.ops import ctc_ops as ctc\n# from tensorflow.contrib.ctc import ctc_ops as ctc # depreciated in future\nimport tensorflow as tf\nimport numpy as np\nfrom utils import load_batched_data, target_list_to_sparse_tensor\nimport pickle\nimport sklearn.metrics as sk\nimport gc\n\n####Learning Parameters\nnEpochs = 60\nbatchSize = 100\n\n####Network Parameters\nnFeatures = 39 # MFCC coefficients, energy, delta, delta delta\nnHidden = 256\nnClasses = 40 # 40 because of 39 phones, plus the \"blank\" for CTC\n\n####Load data\nprint('Loading data')\ndata = pickle.load(open(\"TIMIT_data_prepared_for_CTC_clean.pkl\", 'rb'), encoding='latin1')\n\n# 6300 x 776 x 39\n\n# we will the last 1300 examples from the 6300\ndata_list = []\nfor i in range(1300//batchSize):\n offset = 5000 + batchSize * i\n target_list = []\n for j in range(batchSize):\n target_list.append(data['y_phones'][offset+j])\n data_list.append(\n (data['x'][offset:offset+batchSize,:,:],\n target_list_to_sparse_tensor(target_list),\n data['mask'][offset:offset+batchSize]))\n\ndel data\n\nbatchedData, maxTimeSteps, totalN = data_list, 776, 13\n\n\ndef clipped_gelu(x):\n return tf.minimum(0.5 * x * (1 + tf.tanh(x)), 6)\n\n####Define graph\nprint('Defining graph')\ngraph = tf.Graph()\nwith graph.as_default():\n\n ####NOTE: try variable-steps inputs and dynamic bidirectional rnn, when it's implemented in tensorflow\n\n ####Graph input\n inputX = tf.placeholder(tf.float32, shape=(batchSize, maxTimeSteps, nFeatures))\n\n #Prep input data to fit requirements of rnn.bidirectional_rnn\n # Reshape to 2-D tensor (nTimeSteps*batchSize, nfeatures)\n inputXrs = tf.reshape(tf.transpose(inputX, [1, 0, 2]), [-1, nFeatures])\n # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)\n inputList = tf.split(0, maxTimeSteps, inputXrs)\n targetIxs = tf.placeholder(tf.int64)\n targetVals = tf.placeholder(tf.int32)\n targetShape = tf.placeholder(tf.int64)\n targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)\n seqLengths = tf.placeholder(tf.int32, shape=(batchSize))\n # print(inputX, targetIxs, targetVals, targetShape, seqLengths)\n\n ####Weights & biases\n weightsOutH1 = tf.Variable(tf.truncated_normal([2, nHidden],\n stddev=np.sqrt(2.0 / (2*nHidden))))\n biasesOutH1 = tf.Variable(tf.zeros([nHidden]))\n weightsOutH2 = tf.Variable(tf.truncated_normal([2, nHidden],\n stddev=np.sqrt(2.0 / (2*nHidden))))\n biasesOutH2 = tf.Variable(tf.zeros([nHidden]))\n weightsClasses = tf.Variable(tf.truncated_normal([nHidden, nClasses],\n stddev=np.sqrt(2.0 / nHidden)))\n biasesClasses = tf.Variable(tf.zeros([nClasses]))\n\n ####Network\n lstm_cell = tf.nn.rnn_cell.LSTMCell(nHidden, state_is_tuple=True, activation=clipped_gelu)\n\n cell_fw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2, state_is_tuple=True)\n cell_bw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2, state_is_tuple=True)\n\n fbH1, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, inputList, dtype=tf.float32,\n scope='BDLSTM_H1')\n fbH1rs = [tf.reshape(t, [batchSize, 2, nHidden]) for t in fbH1]\n outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]\n\n logits = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]\n\n ####Optimizing\n logits3d = tf.pack(logits)\n loss = tf.reduce_mean(ctc.ctc_loss(logits3d, targetY, seqLengths))\n\n lr = tf.Variable(0.005, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5)\n opt = tf.train.RMSPropOptimizer(lr)\n optimizer = opt.apply_gradients(zip(grads, tvars))\n\n ####Evaluating\n predictions = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqLengths)[0][0])\n errorRate = tf.reduce_sum(tf.edit_distance(predictions, targetY, normalize=False)) / \\\n tf.to_float(tf.size(targetY.values))\n\nsession = tf.InteractiveSession(graph=graph)\ntf.initialize_all_variables().run()\nsaver = tf.train.Saver(max_to_keep=1)\nsaver.restore(session, \"./bdlstm-timit-clean.ckpt\")\nprint('Model Restored')\n\nkl_all = []\npred_all = []\n\ndef softmax(x):\n e_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n\nbatchErrors = np.zeros(len(batchedData))\nbatchRandIxs = np.random.permutation(len(batchedData)) # randomize batch order\nfor batch, batchOrigI in enumerate(batchRandIxs):\n batchInputs, batchTargetSparse, batchSeqLengths = batchedData[batchOrigI]\n batchTargetIxs, batchTargetVals, batchTargetShape = batchTargetSparse\n feedDict = {inputX: batchInputs, targetIxs: batchTargetIxs, targetVals: batchTargetVals.tolist(),\n targetShape: batchTargetShape, seqLengths: batchSeqLengths}\n er, preds = session.run([errorRate, logits3d], feed_dict=feedDict)\n\n for i in range(preds.shape[1]):\n preds_cut_by_time = preds[:int(batchSeqLengths[i]), i, :]\n # remove example where blank is predicted\n s_pred_blanks_removed = softmax(preds_cut_by_time[:,:39])\n\n kl = np.mean(np.log(nFeatures-1) + np.sum(s_pred_blanks_removed * np.log(s_pred_blanks_removed + 1e-11), axis=1))\n\n kl_all.append(kl)\n pred_all.append(np.mean(np.max(s_pred_blanks_removed, axis=1)))\n\n batchErrors[batch] = er*len(batchSeqLengths)\nepochErrorRate = batchErrors.sum() / len(batchedData)\n\nprint('Edit distance', epochErrorRate, 'Softmax Confidence (mean, std)', np.mean(pred_all), np.std(pred_all))\n\ndel data_list; del batchedData; del batch # save memory\n\ngc.collect()\n\nfor oos_name in ['airport', 'babble', 'car', 'exhibition', 'restaurant', 'street', 'subway', 'train']:\n print('Loading OOD data')\n data = pickle.load(open(\"TIMIT_data_prepared_for_CTC_\" + oos_name + \".pkl\", 'rb'), encoding='latin1')\n\n # 6300 x 776 x 39\n\n # we will the last 1300 examples from the 6300\n data_list = []\n for i in range(1300//batchSize):\n offset = 5000 + batchSize * i\n target_list = []\n for j in range(batchSize):\n target_list.append(data['y_phones'][offset+j])\n data_list.append(\n (data['x'][offset:offset+batchSize,:,:],\n target_list_to_sparse_tensor(target_list),\n data['mask'][offset:offset+batchSize]))\n\n del data\n\n batchedData, maxTimeSteps, totalN = data_list, 776, 13\n\n kl_ood = []\n pred_ood = []\n\n def softmax(x):\n e_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n\n batchErrors = np.zeros(len(batchedData))\n batchRandIxs = np.random.permutation(len(batchedData)) # randomize batch order\n for batch, batchOrigI in enumerate(batchRandIxs):\n batchInputs, batchTargetSparse, batchSeqLengths = batchedData[batchOrigI]\n batchTargetIxs, batchTargetVals, batchTargetShape = batchTargetSparse\n feedDict = {inputX: batchInputs, targetIxs: batchTargetIxs, targetVals: batchTargetVals.tolist(),\n targetShape: batchTargetShape, seqLengths: batchSeqLengths}\n er, preds = session.run([errorRate, logits3d], feed_dict=feedDict)\n\n for i in range(preds.shape[1]):\n preds_cut_by_time = preds[:int(batchSeqLengths[i]), i, :]\n # remove example where blank is predicted\n s_pred_blanks_removed = softmax(preds_cut_by_time[:,:39])\n\n kl = np.mean(np.log(nFeatures-1) + np.sum(s_pred_blanks_removed * np.log(s_pred_blanks_removed + 1e-11), axis=1))\n\n kl_ood.append(kl)\n pred_ood.append(np.mean(np.max(s_pred_blanks_removed, axis=1)))\n\n batchErrors[batch] = er*len(batchSeqLengths)\n epochErrorRate = batchErrors.sum() / len(batchedData)\n\n print(oos_name, 'edit distance', epochErrorRate, 'Softmax Confidence (mean, std)', np.mean(pred_ood), np.std(pred_ood))\n\n print('\\n' + oos_name, 'KL[p||u]: In/out distribution distinction')\n in_sample, oos = kl_all, kl_ood\n labels = np.zeros((len(in_sample) + len(oos)), dtype=np.int32)\n labels[:len(in_sample)] += 1\n examples = np.squeeze(np.vstack((np.array(in_sample).reshape((-1,1)), np.array(oos).reshape((-1,1)))))\n print('AUPR', sk.average_precision_score(labels, examples))\n print('AUROC', sk.roc_auc_score(labels, examples))\n\n print('\\n' + oos_name, 'Prediction Prob: In/out distribution distinction')\n in_sample, oos = pred_all, pred_ood\n labels = np.zeros((len(in_sample) + len(oos)), dtype=np.int32)\n labels[:len(in_sample)] += 1\n examples = np.squeeze(np.vstack((np.array(in_sample).reshape((-1,1)), np.array(oos).reshape((-1,1)))))\n print('AUPR', sk.average_precision_score(labels, examples))\n print('AUROC', sk.roc_auc_score(labels, examples))\n\n del data_list; del batchedData; del batch # save memory; it's possible that this doesn't work at all\n gc.collect()\n"
]
| [
[
"sklearn.metrics.roc_auc_score",
"numpy.sqrt",
"tensorflow.zeros",
"numpy.max",
"tensorflow.tanh",
"numpy.mean",
"tensorflow.python.ops.ctc_ops.ctc_loss",
"tensorflow.pack",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.gradients",
"numpy.std",
"tensorflow.initialize_all_variables",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.python.ops.ctc_ops.ctc_beam_search_decoder",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.matmul",
"numpy.log",
"tensorflow.InteractiveSession",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.placeholder",
"tensorflow.split",
"numpy.array",
"numpy.sum",
"tensorflow.size",
"tensorflow.transpose",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.mul",
"tensorflow.SparseTensor",
"tensorflow.edit_distance",
"sklearn.metrics.average_precision_score",
"tensorflow.nn.bidirectional_rnn"
]
]
|
unicoooon/Image-Segmentation-for-Keras | [
"6906706b4c9e51152e4482ede1a29b63822660d6"
]
| [
"model/deeplabv3p.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom keras.models import Model\nfrom keras import layers\nfrom keras.layers import Input\nfrom keras.layers import Activation\nfrom keras.layers import Concatenate\nfrom keras.layers import Add\nfrom keras.layers import Dropout\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Conv2D\nfrom keras.layers import DepthwiseConv2D\nfrom keras.layers import ZeroPadding2D\nfrom keras.layers import AveragePooling2D\nfrom keras.engine import Layer\nfrom keras.engine import InputSpec\nfrom keras.engine.topology import get_source_inputs\nfrom keras import backend as K\nfrom keras.applications import imagenet_utils\nfrom keras.utils import conv_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.optimizers import Adam\nfrom keras.backend import binary_crossentropy\nGHTS_PATH_X = \"/home/langyan/keras-image-segmentation/deeplabv3h5/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\"\n\nWEIGHTS_PATH_MOBILE = \"/home/langyan/keras-image-segmentation/deeplabv3h5/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5\"\nsmooth = 1e-12\ndef dice_coef(y_true,y_pred):\n return (2.*K.sum(y_true*y_pred)+1.)/(K.sum(y_true)+K.sum(y_pred)+1.)\ndef jaccard_coef(y_true, y_pred):\n intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])\n sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])\n\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n\n return K.mean(jac)\n\n\ndef jaccard_coef_int(y_true, y_pred):\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n\n intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])\n sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])\n\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n\n return K.mean(jac)\n\n\ndef jaccard_coef_loss(y_true, y_pred):\n return -K.log(jaccard_coef(y_true, y_pred)) + binary_crossentropy(y_pred, y_true)\n\n\nclass BilinearUpsampling(Layer):\n \"\"\"Just a simple bilinear upsampling layer. Works only with TF.\n Args:\n upsampling: tuple of 2 numbers > 0. The upsampling ratio for h and w\n output_size: used instead of upsampling arg if passed!\n \"\"\"\n\n def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs):\n\n super(BilinearUpsampling, self).__init__(**kwargs)\n\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(ndim=4)\n if output_size:\n self.output_size = conv_utils.normalize_tuple(\n output_size, 2, 'output_size')\n self.upsampling = None\n else:\n self.output_size = None\n self.upsampling = conv_utils.normalize_tuple(\n upsampling, 2, 'upsampling')\n\n def compute_output_shape(self, input_shape):\n if self.upsampling:\n height = self.upsampling[0] * \\\n input_shape[1] if input_shape[1] is not None else None\n width = self.upsampling[1] * \\\n input_shape[2] if input_shape[2] is not None else None\n else:\n height = self.output_size[0]\n width = self.output_size[1]\n return (input_shape[0],\n height,\n width,\n input_shape[3])\n\n def call(self, inputs):\n if self.upsampling:\n return K.tf.image.resize_bilinear(inputs, (inputs.shape[1] * self.upsampling[0],\n inputs.shape[2] * self.upsampling[1]),\n align_corners=True)\n else:\n return K.tf.image.resize_bilinear(inputs, (self.output_size[0],\n self.output_size[1]),\n align_corners=True)\n\n def get_config(self):\n config = {'upsampling': self.upsampling,\n 'output_size': self.output_size,\n 'data_format': self.data_format}\n base_config = super(BilinearUpsampling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):\n \"\"\" SepConv with BN between depthwise & pointwise. Optionally add activation after BN\n Implements right \"same\" padding for even kernel sizes\n Args:\n x: input tensor\n filters: num of filters in pointwise convolution\n prefix: prefix before name\n stride: stride at depthwise conv\n kernel_size: kernel size for depthwise convolution\n rate: atrous rate for depthwise convolution\n depth_activation: flag to use activation between depthwise & poinwise convs\n epsilon: epsilon to use in BN layer\n \"\"\"\n\n if stride == 1:\n depth_padding = 'same'\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n depth_padding = 'valid'\n\n if not depth_activation:\n x = Activation('relu')(x)\n x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),\n padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)\n x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n x = Conv2D(filters, (1, 1), padding='same',\n use_bias=False, name=prefix + '_pointwise')(x)\n x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n\n return x\n\n\ndef _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):\n \"\"\"Implements right 'same' padding for even kernel sizes\n Without this there is a 1 pixel drift when stride = 2\n Args:\n x: input tensor\n filters: num of filters in pointwise convolution\n prefix: prefix before name\n stride: stride at depthwise conv\n kernel_size: kernel size for depthwise convolution\n rate: atrous rate for depthwise convolution\n \"\"\"\n if stride == 1:\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='same', use_bias=False,\n dilation_rate=(rate, rate),\n name=prefix)(x)\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='valid', use_bias=False,\n dilation_rate=(rate, rate),\n name=prefix)(x)\n\n\ndef _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,\n rate=1, depth_activation=False, return_skip=False):\n \"\"\" Basic building block of modified Xception network\n Args:\n inputs: input tensor\n depth_list: number of filters in each SepConv layer. len(depth_list) == 3\n prefix: prefix before name\n skip_connection_type: one of {'conv','sum','none'}\n stride: stride at last depthwise conv\n rate: atrous rate for depthwise convolution\n depth_activation: flag to use activation between depthwise & pointwise convs\n return_skip: flag to return additional tensor after 2 SepConvs for decoder\n \"\"\"\n residual = inputs\n for i in range(3):\n residual = SepConv_BN(residual,\n depth_list[i],\n prefix + '_separable_conv{}'.format(i + 1),\n stride=stride if i == 2 else 1,\n rate=rate,\n depth_activation=depth_activation)\n if i == 1:\n skip = residual\n if skip_connection_type == 'conv':\n shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',\n kernel_size=1,\n stride=stride)\n shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)\n outputs = layers.add([residual, shortcut])\n elif skip_connection_type == 'sum':\n outputs = layers.add([residual, inputs])\n elif skip_connection_type == 'none':\n outputs = residual\n if return_skip:\n return outputs, skip\n else:\n return outputs\n\n\ndef relu6(x):\n return K.relu(x, max_value=6)\n\n\ndef _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\ndef _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):\n in_channels = inputs._keras_shape[-1]\n pointwise_conv_filters = int(filters * alpha)\n pointwise_filters = _make_divisible(pointwise_conv_filters, 8)\n x = inputs\n prefix = 'expanded_conv_{}_'.format(block_id)\n if block_id:\n # Expand\n\n x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',\n use_bias=False, activation=None,\n name=prefix + 'expand')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'expand_BN')(x)\n x = Activation(relu6, name=prefix + 'expand_relu')(x)\n else:\n prefix = 'expanded_conv_'\n # Depthwise\n x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,\n use_bias=False, padding='same', dilation_rate=(rate, rate),\n name=prefix + 'depthwise')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'depthwise_BN')(x)\n\n x = Activation(relu6, name=prefix + 'depthwise_relu')(x)\n\n # Project\n x = Conv2D(pointwise_filters,\n kernel_size=1, padding='same', use_bias=False, activation=None,\n name=prefix + 'project')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'project_BN')(x)\n\n if skip_connection:\n return Add(name=prefix + 'add')([inputs, x])\n\n # if in_channels == pointwise_filters and stride == 1:\n # return Add(name='res_connect_' + str(block_id))([inputs, x])\n\n return x\n\n\ndef Deeplabv3(weights=None, input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2', OS=16, alpha=1.):\n \"\"\" Instantiates the Deeplabv3+ architecture\n Optionally loads weights pre-trained\n on PASCAL VOC. This model is available for TensorFlow only,\n and can only be used with inputs following the TensorFlow\n data format `(width, height, channels)`.\n # Arguments\n weights: one of 'pascal_voc' (pre-trained on pascal voc)\n or None (random initialization)\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: shape of input image. format HxWxC\n PASCAL VOC model was trained on (512,512,3) images\n classes: number of desired classes. If classes != 21,\n last layer is initialized randomly\n backbone: backbone to use. one of {'xception','mobilenetv2'}\n OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.\n Used only for xception backbone.\n alpha: controls the width of the MobileNetV2 network. This is known as the\n width multiplier in the MobileNetV2 paper.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n Used only for mobilenetv2 backbone\n # Returns\n A Keras model instance.\n # Raises\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n ValueError: in case of invalid argument for `weights` or `backbone`\n \"\"\"\n\n if not (weights in {'pascal_voc', None}):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `pascal_voc` '\n '(pre-trained on PASCAL VOC)')\n\n if K.backend() != 'tensorflow':\n raise RuntimeError('The Deeplabv3+ model is only available with '\n 'the TensorFlow backend.')\n\n if not (backbone in {'xception', 'mobilenetv2'}):\n raise ValueError('The `backbone` argument should be either '\n '`xception` or `mobilenetv2` ')\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n if backbone == 'xception':\n if OS == 8:\n entry_block3_stride = 1\n middle_block_rate = 2 # ! Not mentioned in paper, but required\n exit_block_rates = (2, 4)\n atrous_rates = (12, 24, 36)\n else:\n entry_block3_stride = 2\n middle_block_rate = 1\n exit_block_rates = (1, 2)\n atrous_rates = (6, 12, 18)\n\n x = Conv2D(32, (3, 3), strides=(2, 2),\n name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)\n x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)\n x = Activation('relu')(x)\n\n x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)\n x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)\n x = Activation('relu')(x)\n\n x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',\n skip_connection_type='conv', stride=2,\n depth_activation=False)\n x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',\n skip_connection_type='conv', stride=2,\n depth_activation=False, return_skip=True)\n\n x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',\n skip_connection_type='conv', stride=entry_block3_stride,\n depth_activation=False)\n for i in range(16):\n x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),\n skip_connection_type='sum', stride=1, rate=middle_block_rate,\n depth_activation=False)\n\n x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',\n skip_connection_type='conv', stride=1, rate=exit_block_rates[0],\n depth_activation=False)\n x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',\n skip_connection_type='none', stride=1, rate=exit_block_rates[1],\n depth_activation=True)\n\n else:\n OS = 8\n first_block_filters = _make_divisible(32 * alpha, 8)\n x = Conv2D(first_block_filters,\n kernel_size=3,\n strides=(2, 2), padding='same',\n use_bias=False, name='Conv')(img_input)\n x = BatchNormalization(\n epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)\n x = Activation(relu6, name='Conv_Relu6')(x)\n\n x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,\n expansion=1, block_id=0, skip_connection=False)\n\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,\n expansion=6, block_id=1, skip_connection=False)\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,\n expansion=6, block_id=2, skip_connection=True)\n\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,\n expansion=6, block_id=3, skip_connection=False)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=4, skip_connection=True)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=5, skip_connection=True)\n\n # stride in block 6 changed from 2 -> 1, so we need to use rate = 2\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!\n expansion=6, block_id=6, skip_connection=False)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=7, skip_connection=True)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=8, skip_connection=True)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=9, skip_connection=True)\n\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=10, skip_connection=False)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=11, skip_connection=True)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=12, skip_connection=True)\n\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!\n expansion=6, block_id=13, skip_connection=False)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=14, skip_connection=True)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=15, skip_connection=True)\n\n x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=16, skip_connection=False)\n\n # end of feature extractor\n\n # branching for Atrous Spatial Pyramid Pooling\n\n # Image Feature branch\n #out_shape = int(np.ceil(input_shape[0] / OS))\n b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)\n b4 = Conv2D(256, (1, 1), padding='same',\n use_bias=False, name='image_pooling')(b4)\n b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)\n b4 = Activation('relu')(b4)\n b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)\n\n # simple 1x1\n b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)\n b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)\n b0 = Activation('relu', name='aspp0_activation')(b0)\n\n # there are only 2 branches in mobilenetV2. not sure why\n if backbone == 'xception':\n # rate = 6 (12)\n b1 = SepConv_BN(x, 256, 'aspp1',\n rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)\n # rate = 12 (24)\n b2 = SepConv_BN(x, 256, 'aspp2',\n rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)\n # rate = 18 (36)\n b3 = SepConv_BN(x, 256, 'aspp3',\n rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)\n\n # concatenate ASPP branches & project\n x = Concatenate()([b4, b0, b1, b2, b3])\n else:\n x = Concatenate()([b4, b0])\n\n x = Conv2D(256, (1, 1), padding='same',\n use_bias=False, name='concat_projection')(x)\n x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)\n x = Activation('relu')(x)\n x = Dropout(0.1)(x)\n\n # DeepLab v.3+ decoder\n\n if backbone == 'xception':\n # Feature projection\n # x4 (x2) block\n x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),\n int(np.ceil(input_shape[1] / 4))))(x)\n dec_skip1 = Conv2D(48, (1, 1), padding='same',\n use_bias=False, name='feature_projection0')(skip1)\n dec_skip1 = BatchNormalization(\n name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)\n dec_skip1 = Activation('relu')(dec_skip1)\n x = Concatenate()([x, dec_skip1])\n x = SepConv_BN(x, 256, 'decoder_conv0',\n depth_activation=True, epsilon=1e-5)\n x = SepConv_BN(x, 256, 'decoder_conv1',\n depth_activation=True, epsilon=1e-5)\n\n # you can use it with arbitary number of classes\n if classes == 21:\n last_layer_name = 'logits_semantic'\n else:\n last_layer_name = 'custom_logits_semantic'\n\n x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)\n x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)\n #x=BilinearUpsampling((4,4))(x)\n x = Activation(\"softmax\")(x)\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n #if input_tensor is not None:\n # inputs = get_source_inputs(input_tensor)\n #else:\n inputs = img_input\n\n model = Model(inputs, x, name='deeplabv3+')\n\n # load weights\n\n if weights == 'pascal_voc':\n if backbone == 'xception':\n #weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH_X,\n # cache_subdir='models')\n model.load_weights(WEIGHTS_PATH_X,by_name=True)\n else:\n #weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH_MOBILE,\n # cache_subdir='models')\n model.load_weights(WEIGHTS_PATH_MOBILE,by_name=True)\n model.compile(optimizer=Adam(),\n loss=jaccard_coef_loss,\n metrics=[jaccard_coef_int])\n return model\n\n\ndef preprocess_input(x):\n \"\"\"Preprocesses a numpy array encoding a batch of images.\n # Arguments\n x: a 4D numpy array consists of RGB values within [0, 255].\n # Returns\n Input array scaled to [-1.,1.]\n \"\"\"\n return imagenet_utils.preprocess_input(x, mode='tf')\n"
]
| [
[
"numpy.ceil"
]
]
|
archanatikayatray/Learn-Discrete-Mathematics-1 | [
"9b180a7fba574b39f859c7e5a16def989d13d111"
]
| [
"Chapter 5/TossingManyCoinsExample.py"
]
| [
"# Import packages with the functions we need\nimport scipy.special\nimport matplotlib.pyplot as plt\n\nprobabilities = []\n\nfor n in range(51):\n # Calculate probability of n heads\n probability = scipy.special.binom(50, n) / (2 ** 50)\n\n # Convert to a string with 6 decimal places\n probString = \"{:.6f}\".format(probability)\n\n # Print probability\n print('Probability of ' + str(n) + ' heads: ' + probString)\n\n # Add probability to list\n probabilities.append(probability)\n\n# Plot the probabilites\nplt.plot(range(51), probabilities, '-o')\nplt.axis([0, 50, 0, 0.15])\nplt.show()"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
]
|
saadlabyad/aslsd | [
"95a1cc660079972b45a77ec6dc587d9225489212"
]
| [
"events/time_ordering.py"
]
| [
"# License: BSD 3 clause\r\n\r\nimport bisect\r\nimport itertools\r\n\r\nimport numpy as np\r\n\r\n\r\ndef get_kappa_varpi(list_times, T_f):\r\n \"\"\"\r\n Compute the functions :math:`\\\\kappa` and :math:`\\\\varpi`, needed for\r\n chronological ordering of event times across dimensions, and return them as\r\n arrays.\r\n\r\n * :math:`\\\\kappa(j,i,m)` is the index of the latest event of type :math:`j`\r\n preceding event time :math:`t_m^i`.\r\n\r\n * :math:`\\\\varpi(i,j,h)` is the smallest index :math:`m` such that there\r\n exists at least h events of type :math:`j` preceding event time\r\n :math:`t_m^i`.\r\n\r\n Parameters\r\n ----------\r\n list_times : `list` of `numpy.ndarray`\r\n list_times[i] is the sorted array of events of type :math:`i`.\r\n\r\n T_f : `float`\r\n Terminal time.\r\n\r\n Returns\r\n -------\r\n kappa : `list` of `list` of `numpy.ndarray`\r\n kappa[j][i][m] is the index of the latest event of type :math:`j`\r\n preceding event time :math:`t_m^i`.\r\n\r\n varpi : `list` of `list` of `numpy.ndarray`\r\n varpi[i][j][h] is the smallest index :math:`m` such that there\r\n exists at least h events of type :math:`j` preceding event time\r\n :math:`t_m^i`.\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `T_f` is smaller than `list_times[i][-1]` for some `i`.\r\n\r\n \"\"\"\r\n\r\n d = len(list_times)\r\n sizes = [len(list_times[i]) for i in range(d)]\r\n\r\n kappa = [[[] for j in range(d)] for i in range(d)]\r\n for i, j in itertools.product(range(d), range(d)):\r\n if i == j:\r\n kappa[i][j] = np.arange(sizes[i]+1, dtype=np.int64)-1\r\n else:\r\n kappa[i][j] = [-1 for n in range(sizes[j]+1)]\r\n old_index = 0\r\n new_index = 0\r\n for n in range(sizes[j]):\r\n old_index = max(new_index, 0)\r\n new_index = bisect.bisect_left(list_times[i], list_times[j][n],\r\n lo=old_index, hi=sizes[i])-1\r\n kappa[i][j][n] = new_index\r\n kappa[i][j][-1] = bisect.bisect_left(list_times[i], T_f, lo=0,\r\n hi=sizes[i]-1)\r\n kappa[i][j] = np.array(kappa[i][j], dtype=np.int64)\r\n\r\n varpi = [[[] for j in range(d)] for i in range(d)]\r\n for i, j in itertools.product(range(d), range(d)):\r\n if i == j:\r\n varpi[i][j] = np.arange(sizes[i], dtype=np.int64)\r\n if i != j:\r\n varpi[i][j] = [0 for h in range(kappa[j][i][-2]+2)]\r\n old_index = 0\r\n new_index = -1\r\n lo_val = 0\r\n for h in range(1, kappa[j][i][-2]+2):\r\n lo_val = max(0, new_index)\r\n new_index = bisect.bisect_left(kappa[j][i], h-1, lo=lo_val,\r\n hi=len(kappa[j][i]))\r\n varpi[i][j][h] = new_index\r\n varpi[i][j] = np.array(varpi[i][j], dtype=np.int64)\r\n\r\n return kappa, varpi\r\n\r\n\r\ndef get_lag_sizes(list_times, kappa, varpi):\r\n \"\"\"\r\n Compute the number of event of type :math:`i` with at least :math:`h`\r\n predecessors of type :math:`j`, for all :math:`i,j \\\\in [d]`,\r\n :math:`h \\\\in [\\\\kappa(j,i,N^i_T)]`.\r\n\r\n Parameters\r\n ----------\r\n list_times : `list` of np.ndarray\r\n list_times[i] is the sorted array of events of type :math:`i`.\r\n\r\n kappa : `list` of `list` of `numpy.ndarray`\r\n DESCRIPTION.\r\n\r\n varpi : `list` of `list` of `numpy.ndarray`\r\n DESCRIPTION.\r\n\r\n Returns\r\n -------\r\n lag_sizes : `list` of `list` of `numpy.ndarray`\r\n lag_sizes[i][j][h] is the number of events of type :math:`i` with at\r\n least :math:`h` predecessors of type :math:`j`.\r\n By convention, matrix_lag_sizes[i][j][0]=0.\r\n\r\n \"\"\"\r\n\r\n d = len(list_times)\r\n lag_sizes = [[[None]*(kappa[j][i][len(list_times[i])-1]+2)\r\n for j in range(d)] for i in range(d)]\r\n for i, j in itertools.product(range(d), range(d)):\r\n lag_sizes[i][j][0] = 0\r\n lag_sizes[i][j][1: kappa[j][i][len(list_times[i])-1]+2] = len(list_times[i])-varpi[i][j][1: kappa[j][i][len(list_times[i])-1]+2]\r\n return lag_sizes\r\n"
]
| [
[
"numpy.arange",
"numpy.array"
]
]
|
ady95/EfficientDet | [
"3f2fc8b9717ebd6e3cbf853dc4c28da3a4e1cdc6"
]
| [
"generators/common.py"
]
| [
"import numpy as np\nimport random\nimport warnings\nimport cv2\nfrom tensorflow import keras\n\nfrom utils.anchors import anchors_for_shape, anchor_targets_bbox, AnchorParameters\n\n\nclass Generator(keras.utils.Sequence):\n \"\"\"\n Abstract generator class.\n \"\"\"\n\n def __init__(\n self,\n phi=0,\n image_sizes=(512, 640, 768, 896, 1024, 1280, 1408),\n misc_effect=None,\n visual_effect=None,\n batch_size=1,\n group_method='random', # one of 'none', 'random', 'ratio'\n shuffle_groups=True,\n detect_text=False,\n detect_quadrangle=False,\n detect_carplate=False\n ):\n \"\"\"\n Initialize Generator object.\n\n Args:\n batch_size: The size of the batches to generate.\n group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).\n shuffle_groups: If True, shuffles the groups each epoch.\n image_sizes:\n \"\"\"\n self.misc_effect = misc_effect\n self.visual_effect = visual_effect\n self.batch_size = int(batch_size)\n self.group_method = group_method\n self.shuffle_groups = shuffle_groups\n self.detect_text = detect_text\n self.detect_quadrangle = detect_quadrangle\n self.detect_carplate = detect_carplate\n\n self.image_size = image_sizes[phi]\n self.groups = None\n # self.anchor_parameters = AnchorParameters.default if not self.detect_text else AnchorParameters(\n # ratios=(0.25, 0.5, 1., 2.),\n # sizes=(16, 32, 64, 128, 256))\n self.anchor_parameters = AnchorParameters.default\n\n print(\"detect_carplate:\", self.detect_carplate)\n\n if self.detect_carplate == True:\n self.anchor_parameters = AnchorParameters(\n ratios=(0.25, 0.5, 1.),\n sizes=(16, 32, 64, 128, 256))\n elif self.detect_text == True:\n self.anchor_parameters = AnchorParameters(\n ratios=(0.25, 0.5, 1., 2.),\n sizes=(16, 32, 64, 128, 256))\n\n self.anchors = anchors_for_shape((self.image_size, self.image_size), anchor_params=self.anchor_parameters)\n self.num_anchors = self.anchor_parameters.num_anchors()\n\n # Define groups\n self.group_images()\n\n # Shuffle when initializing\n if self.shuffle_groups:\n random.shuffle(self.groups)\n\n def on_epoch_end(self):\n if self.shuffle_groups:\n random.shuffle(self.groups)\n\n def size(self):\n \"\"\"\n Size of the dataset.\n \"\"\"\n raise NotImplementedError('size method not implemented')\n\n def get_anchors(self):\n \"\"\"\n loads the anchors from a txt file\n \"\"\"\n with open(self.anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n # (N, 2), wh\n return np.array(anchors).reshape(-1, 2)\n\n def num_classes(self):\n \"\"\"\n Number of classes in the dataset.\n \"\"\"\n raise NotImplementedError('num_classes method not implemented')\n\n def has_label(self, label):\n \"\"\"\n Returns True if label is a known label.\n \"\"\"\n raise NotImplementedError('has_label method not implemented')\n\n def has_name(self, name):\n \"\"\"\n Returns True if name is a known class.\n \"\"\"\n raise NotImplementedError('has_name method not implemented')\n\n def name_to_label(self, name):\n \"\"\"\n Map name to label.\n \"\"\"\n raise NotImplementedError('name_to_label method not implemented')\n\n def label_to_name(self, label):\n \"\"\"\n Map label to name.\n \"\"\"\n raise NotImplementedError('label_to_name method not implemented')\n\n def image_aspect_ratio(self, image_index):\n \"\"\"\n Compute the aspect ratio for an image with image_index.\n \"\"\"\n raise NotImplementedError('image_aspect_ratio method not implemented')\n\n def load_image(self, image_index):\n \"\"\"\n Load an image at the image_index.\n \"\"\"\n raise NotImplementedError('load_image method not implemented')\n\n def load_annotations(self, image_index):\n \"\"\"\n Load annotations for an image_index.\n \"\"\"\n raise NotImplementedError('load_annotations method not implemented')\n\n def load_annotations_group(self, group):\n \"\"\"\n Load annotations for all images in group.\n \"\"\"\n annotations_group = [self.load_annotations(image_index) for image_index in group]\n for annotations in annotations_group:\n assert (isinstance(annotations,\n dict)), '\\'load_annotations\\' should return a list of dictionaries, received: {}'.format(\n type(annotations))\n assert (\n 'labels' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n assert (\n 'bboxes' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n\n return annotations_group\n\n def filter_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n invalid_indices = np.where(\n (annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |\n (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |\n (annotations['bboxes'][:, 0] < 0) |\n (annotations['bboxes'][:, 1] < 0) |\n (annotations['bboxes'][:, 2] <= 0) |\n (annotations['bboxes'][:, 3] <= 0) |\n (annotations['bboxes'][:, 2] > image.shape[1]) |\n (annotations['bboxes'][:, 3] > image.shape[0])\n )[0]\n\n # delete invalid indices\n if len(invalid_indices):\n warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(\n group[index],\n image.shape,\n annotations['bboxes'][invalid_indices, :]\n ))\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)\n # if annotations['bboxes'].shape[0] == 0:\n # warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(\n # group[index],\n # image.shape,\n # ))\n return image_group, annotations_group\n\n def clip_transformed_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n filtered_image_group = []\n filtered_annotations_group = []\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n image_height = image.shape[0]\n image_width = image.shape[1]\n # x1\n annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)\n # y1\n annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)\n # x2\n annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)\n # y2\n annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n small_indices = np.where(\n (annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 3) |\n (annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 3)\n )[0]\n\n # delete invalid indices\n if len(small_indices):\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)\n # import cv2\n # for invalid_index in small_indices:\n # x1, y1, x2, y2 = annotations['bboxes'][invalid_index]\n # label = annotations['labels'][invalid_index]\n # class_name = self.labels[label]\n # print('width: {}'.format(x2 - x1))\n # print('height: {}'.format(y2 - y1))\n # cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)\n # cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)\n # cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n # cv2.imshow('image', image)\n # cv2.waitKey(0)\n filtered_image_group.append(image)\n filtered_annotations_group.append(annotations_group[index])\n\n return filtered_image_group, filtered_annotations_group\n\n def load_image_group(self, group):\n \"\"\"\n Load images for all images in a group.\n \"\"\"\n return [self.load_image(image_index) for image_index in group]\n\n def random_visual_effect_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # apply visual effect\n image = self.visual_effect(image)\n return image, annotations\n\n def random_visual_effect_group(self, image_group, annotations_group):\n \"\"\"\n Randomly apply visual effect on each image.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n if self.visual_effect is None:\n # do nothing\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # apply effect on a single group entry\n image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(\n image_group[index], annotations_group[index]\n )\n\n return image_group, annotations_group\n\n def random_misc_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # randomly transform both image and annotations\n image, annotations = self.misc_effect(image, annotations)\n return image, annotations\n\n def random_misc_group(self, image_group, annotations_group):\n \"\"\"\n Randomly transforms each image and its annotations.\n \"\"\"\n\n assert (len(image_group) == len(annotations_group))\n\n if self.misc_effect is None:\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # transform a single group entry\n image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def preprocess_group_entry(self, image, annotations):\n \"\"\"\n Preprocess image and its annotations.\n \"\"\"\n\n # preprocess the image\n image, scale = self.preprocess_image(image)\n\n # apply resizing to annotations too\n annotations['bboxes'] *= scale\n if self.detect_quadrangle:\n annotations['quadrangles'] *= scale\n return image, annotations\n\n def preprocess_group(self, image_group, annotations_group):\n \"\"\"\n Preprocess each image and its annotations in its group.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n for index in range(len(image_group)):\n # preprocess a single group entry\n image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def group_images(self):\n \"\"\"\n Order the images according to self.order and makes groups of self.batch_size.\n \"\"\"\n # determine the order of the images\n\n order = list(range(self.size()))\n if self.group_method == 'random':\n random.shuffle(order)\n elif self.group_method == 'ratio':\n order.sort(key=lambda x: self.image_aspect_ratio(x))\n\n # divide into groups, one group = one batch\n self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in\n range(0, len(order), self.batch_size)]\n\n def compute_inputs(self, image_group, annotations_group):\n \"\"\"\n Compute inputs for the network using an image_group.\n \"\"\"\n batch_images = np.array(image_group).astype(np.float32)\n return [batch_images]\n\n def compute_alphas_and_ratios(self, annotations_group):\n for i, annotations in enumerate(annotations_group):\n quadrangles = annotations['quadrangles']\n alphas = np.zeros((quadrangles.shape[0], 4), dtype=np.float32)\n xmin = np.min(quadrangles, axis=1)[:, 0]\n ymin = np.min(quadrangles, axis=1)[:, 1]\n xmax = np.max(quadrangles, axis=1)[:, 0]\n ymax = np.max(quadrangles, axis=1)[:, 1]\n # alpha1, alpha2, alpha3, alpha4\n alphas[:, 0] = (quadrangles[:, 0, 0] - xmin) / (xmax - xmin)\n alphas[:, 1] = (quadrangles[:, 1, 1] - ymin) / (ymax - ymin)\n alphas[:, 2] = (xmax - quadrangles[:, 2, 0]) / (xmax - xmin)\n alphas[:, 3] = (ymax - quadrangles[:, 3, 1]) / (ymax - ymin)\n annotations['alphas'] = alphas\n # ratio\n area1 = 0.5 * alphas[:, 0] * (1 - alphas[:, 3])\n area2 = 0.5 * alphas[:, 1] * (1 - alphas[:, 0])\n area3 = 0.5 * alphas[:, 2] * (1 - alphas[:, 1])\n area4 = 0.5 * alphas[:, 3] * (1 - alphas[:, 2])\n annotations['ratios'] = 1 - area1 - area2 - area3 - area4\n\n def compute_targets(self, image_group, annotations_group):\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n\n batches_targets = anchor_targets_bbox(\n self.anchors,\n image_group,\n annotations_group,\n num_classes=self.num_classes(),\n detect_quadrangle=self.detect_quadrangle\n )\n return list(batches_targets)\n\n def compute_inputs_targets(self, group, debug=False):\n \"\"\"\n Compute inputs and target outputs for the network.\n \"\"\"\n\n # load images and annotations\n # list\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly apply visual effect\n image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)\n\n # randomly transform data\n # image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n\n # randomly apply misc effect\n image_group, annotations_group = self.random_misc_group(image_group, annotations_group)\n\n # perform preprocessing steps\n image_group, annotations_group = self.preprocess_group(image_group, annotations_group)\n\n # check validity of annotations\n image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)\n\n assert len(image_group) != 0\n assert len(image_group) == len(annotations_group)\n\n if self.detect_quadrangle:\n # compute alphas and ratio for targets\n self.compute_alphas_and_ratios(annotations_group)\n\n # compute network inputs\n inputs = self.compute_inputs(image_group, annotations_group)\n\n # compute network targets\n targets = self.compute_targets(image_group, annotations_group)\n\n if debug:\n return inputs, targets, annotations_group\n\n return inputs, targets\n\n def __len__(self):\n \"\"\"\n Number of batches for generator.\n \"\"\"\n\n return len(self.groups)\n\n def __getitem__(self, index):\n \"\"\"\n Keras sequence method for generating batches.\n \"\"\"\n group = self.groups[index]\n inputs, targets = self.compute_inputs_targets(group)\n return inputs, targets\n\n def preprocess_image(self, image):\n # image, RGB\n image_height, image_width = image.shape[:2]\n if image_height > image_width:\n scale = self.image_size / image_height\n resized_height = self.image_size\n resized_width = int(image_width * scale)\n else:\n scale = self.image_size / image_width\n resized_height = int(image_height * scale)\n resized_width = self.image_size\n\n image = cv2.resize(image, (resized_width, resized_height))\n image = image.astype(np.float32)\n image /= 255.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n image -= mean\n image /= std\n pad_h = self.image_size - resized_height\n pad_w = self.image_size - resized_width\n image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')\n return image, scale\n\n def get_augmented_data(self, group):\n \"\"\"\n Compute inputs and target outputs for the network.\n \"\"\"\n\n # load images and annotations\n # list\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly apply visual effect\n # image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)\n\n # randomly transform data\n # image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n\n # randomly apply misc effect\n # image_group, annotations_group = self.random_misc_group(image_group, annotations_group)\n\n # perform preprocessing steps\n image_group, annotations_group = self.preprocess_group(image_group, annotations_group)\n\n # check validity of annotations\n image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)\n\n assert len(image_group) != 0\n assert len(image_group) == len(annotations_group)\n\n # compute alphas for targets\n self.compute_alphas_and_ratios(annotations_group)\n\n return image_group, annotations_group\n"
]
| [
[
"numpy.pad",
"numpy.clip",
"numpy.min",
"numpy.max",
"numpy.delete",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
]
|
trinh-hoang-hiep/iching | [
"e1feae5741c3cbde535d7a275b01d4f0cf9e21ed"
]
| [
"fas/bktr/bktr_engine.py"
]
| [
"#\r\nimport datetime as dt\r\nimport pandas as pd\r\nimport sys\r\nfrom fas.bktr.position import Position\r\nfrom fas.bktr.mean_reverting_strategy import MeanRevertingStrategy\r\nfrom fas.bktr.order import Order\r\nfrom fas.bktr.market_data import MarketData\r\nfrom fas.bktr.market_data_source import MarketDataSource\r\n\r\n\r\nclass BktrEngine(object):\r\n def __init__(self, symbol, start_date, end_date):\r\n self.name = 'fas.bktr.BktrEngine'\r\n self.symbol = symbol\r\n self.start_date = start_date\r\n self.end_date = end_date\r\n self.market_data = MarketData()\r\n self.market_data_sources = [] # 所有的市场数据源\r\n self.strategy = None \r\n self.unfilled_orders = []\r\n self.positions = {}\r\n self.current_prices = None\r\n self.rpnl, self.upnl = pd.DataFrame(), pd.DataFrame()\r\n self.issued_orders = []\r\n self.filled_orders = []\r\n\r\n def start_engine(self):\r\n print('易经量化回测引擎 v0.0.2')\r\n self.strategy = MeanRevertingStrategy(self.symbol)\r\n self.strategy.event_send_order = self.evthandler_order\r\n mds = MarketDataSource(self.symbol)\r\n current_date = dt.datetime.strptime(self.start_date, '%Y-%m-%d')\r\n end_date_dt = dt.datetime.strptime(self.end_date, '%Y-%m-%d')\r\n while True:\r\n market_ts = current_date.strftime('%Y-%m-%d')\r\n delta_date = dt.timedelta(days=1)\r\n current_date += delta_date\r\n if current_date > end_date_dt:\r\n break\r\n tick_data = mds.get_tick_date(self.symbol, market_ts)\r\n if tick_data is not None:\r\n self.market_data.set_tick_data(self.symbol, tick_data)\r\n self.evthandler_tick(self.market_data)\r\n\r\n def get_timestamp(self):\r\n tick_data = self.current_prices.get_tick_data(self.symbol)\r\n return tick_data.timestamp\r\n\r\n def get_trade_date(self):\r\n return str(self.get_timestamp())[:10]\r\n\r\n def get_position(self, symbol):\r\n if symbol not in self.positions:\r\n position = Position()\r\n position.symbol = symbol\r\n self.positions[symbol] = position\r\n return self.positions[symbol]\r\n\r\n def update_filled_position(self, symbol, quant, is_buy, price, timestamp, market_data):\r\n position = self.get_position(self.symbol)\r\n position.event_fill(timestamp, is_buy, quant, price)\r\n self.strategy.event_position(self.positions)\r\n self.rpnl.loc[timestamp, \"rpnl\"] = position.realized_pnl\r\n order_msg = ' 执行订单:{0}; 股票:{1}; 操作:{2}; 数量:{3}; 价格:{4};'.format(\r\n self.get_trade_date(), symbol, 'BUY' if is_buy else 'SELL',\r\n quant, price\r\n )\r\n self.filled_orders.append(order_msg)\r\n\r\n def evthandler_order(self, market_data, order):\r\n tick_data = market_data.get_tick_data(self.symbol)\r\n self.unfilled_orders.append(order)\r\n order_msg = ' 发布订单: {0}; 股票代码: {1}; 操作: {2}; 数量:{3}; 收盘价:{4};'.format(\r\n self.get_trade_date(), order.symbol, \r\n 'BUY' if order.is_buy else 'SELL', order.quant,\r\n tick_data.close\r\n )\r\n self.issued_orders.append(order_msg)\r\n\r\n def match_order_book(self, prices):\r\n if len(self.unfilled_orders) > 0:\r\n self.unfilled_orders = \\\r\n [order for order in self.unfilled_orders\r\n if self.is_order_unmatched(order, prices)]\r\n\r\n def is_order_unmatched(self, order, prices):\r\n symbol = order.symbol\r\n #timestamp = prices.get_timestamp(symbol)\r\n tick_data = prices.get_tick_data(symbol)\r\n timestamp = tick_data.timestamp\r\n if order.order_type==Order.OT_MARKET_ORDER and timestamp > order.timestamp:\r\n # Order is matched and filled.\r\n order.is_filled = True\r\n tick_data = prices.get_tick_data(symbol)\r\n open_price = tick_data.open # 是否改为以收盘价成交?\r\n order.filled_timestamp = timestamp\r\n order.filled_price = open_price\r\n self.update_filled_position(symbol,\r\n order.quant,\r\n order.is_buy,\r\n open_price,\r\n timestamp,\r\n prices\r\n )\r\n self.strategy.event_order(order)\r\n return False\r\n return True\r\n\r\n def update_position_status(self, symbol, market_data):\r\n if symbol in self.positions:\r\n position = self.positions[symbol]\r\n tick_data = market_data.get_tick_data(symbol)\r\n close_price = tick_data.close\r\n position.update_unrealized_pnl(close_price)\r\n self.upnl.loc[self.get_timestamp(), \"upnl\"] = \\\r\n position.unrealized_pnl\r\n\r\n def evthandler_tick(self, prices):\r\n # prices 实际上是market_data\r\n self.current_prices = prices\r\n self.strategy.event_tick(self, prices)\r\n self.match_order_book(prices)\r\n self.update_position_status(self.symbol, prices)\r\n self.display_current_status(prices)\r\n\r\n def display_current_status(self, market_data):\r\n if self.symbol in self.positions:\r\n position = self.positions[self.symbol]\r\n tick_data = market_data.get_tick_data(self.symbol)\r\n print('日期:{0}; 价格:{1:0.2f}, {2:0.2f}, {3:0.2f}, {4:0.2f};持有:{5}; '\\\r\n '资金:{6:0.2f}; 未实现损益:{7:0.2f}; 已实现损益:{8:0.2f};'.format(\r\n self.get_trade_date(), \r\n tick_data.open, tick_data.high,\r\n tick_data.low, tick_data.close,\r\n position.net_quants, position.position_value,\r\n position.unrealized_pnl, position.realized_pnl\r\n ))\r\n for order_msg in self.issued_orders:\r\n print(order_msg)\r\n self.issued_orders = []\r\n for order_msg in self.filled_orders:\r\n print(order_msg)\r\n self.filled_orders = []\r\n"
]
| [
[
"pandas.DataFrame"
]
]
|
AlastairMelville/machine_learning | [
"a2bcb78de9b235faf49cabb1bd38bbed08b04f42"
]
| [
"Experiments/FindSceneDepth.py"
]
| [
"import numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nfrom win32 import win32gui\r\nimport win32ui, win32con, win32api\r\n\r\n\r\n\r\ndef grab_screen(region=None):\r\n hwin = win32gui.GetDesktopWindow()\r\n if region:\r\n left,top,x2,y2 = region\r\n width = x2 - left + 1\r\n height = y2 - top + 1\r\n else:\r\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\r\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\r\n left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)\r\n top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)\r\n\r\n hwindc = win32gui.GetWindowDC(hwin)\r\n srcdc = win32ui.CreateDCFromHandle(hwindc)\r\n memdc = srcdc.CreateCompatibleDC()\r\n bmp = win32ui.CreateBitmap()\r\n bmp.CreateCompatibleBitmap(srcdc, width, height)\r\n memdc.SelectObject(bmp)\r\n memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)\r\n\r\n signedIntsArray = bmp.GetBitmapBits(True)\r\n img = np.fromstring(signedIntsArray, dtype='uint8')\r\n img.shape = (height,width,4)\r\n\r\n srcdc.DeleteDC()\r\n memdc.DeleteDC()\r\n win32gui.ReleaseDC(hwin, hwindc)\r\n win32gui.DeleteObject(bmp.GetHandle())\r\n\r\n return img#cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)\r\n\r\n\r\ndef draw_lines(img, lines):\r\n try:\r\n for line in lines:\r\n coords = line[0];\r\n LeftPoint = (coords[0],coords[1]);\r\n RightPoint = (coords[2],coords[3]);\r\n ChangeInX = RightPoint[0] - LeftPoint[0];\r\n ChangeInY = RightPoint[1] - LeftPoint[1];\r\n #if (abs(ChangeInX) > 0.1 and abs(ChangeInY) > 0.1):\r\n # continue;\r\n \r\n cv2.line(img, LeftPoint, RightPoint, [255,255,255], 1);\r\n #cv2.line(np.zeros(img.shape), LeftPoint, RightPoint, [255,255,255], 1); # draw on a new/blank image\r\n except:\r\n #print(\"error\");\r\n pass\r\n\r\n\r\n\r\n\r\n\r\ndef FindDepthUsingSaturation(Screen):\r\n Hsv = cv2.cvtColor(Screen, cv2.COLOR_BGR2HSV);\r\n Saturation = Hsv[:, :, 1];\r\n \r\n BlurWithEdges = cv2.bilateralFilter(Saturation,9,75,75)\r\n \r\n Edges = cv2.Canny(BlurWithEdges, threshold1=150, threshold2=250)\r\n \r\n Lines = cv2.HoughLinesP(Edges, 1, np.pi/180, 40, np.array([]), minLineLength=150, maxLineGap=10);\r\n draw_lines(BlurWithEdges, Lines);\r\n \r\n # Round pixel values to make things easier\r\n #Multiple = 150;\r\n #BlurWithEdges = np.floor((BlurWithEdges * Multiple) + 0.5) / Multiple;\r\n Mask1 = (BlurWithEdges <= 51);\r\n Mask2 = ((BlurWithEdges > 51) & (BlurWithEdges <= 102));\r\n Mask3 = ((BlurWithEdges > 102) & (BlurWithEdges <= 153));\r\n Mask4 = ((BlurWithEdges > 153) & (BlurWithEdges <= 204));\r\n Mask5 = ((BlurWithEdges > 204) & (BlurWithEdges <= 255));\r\n BlurWithEdges[Mask2] = 0;\r\n BlurWithEdges[Mask2] = 51;\r\n BlurWithEdges[Mask3] = 102;\r\n BlurWithEdges[Mask4] = 153;\r\n BlurWithEdges[Mask5] = 204;\r\n \r\n #Grey = cv2.cvtColor(Screen, cv2.COLOR_BGR2GRAY);\r\n #Blur = cv2.adaptiveThreshold(Saturation,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\r\n # cv2.THRESH_BINARY,11,2) \r\n \r\n cv2.imshow('Hsv', BlurWithEdges)\r\n\r\n\r\nPreviousSceen = None;\r\n\r\nwhile(True):\r\n Screen = grab_screen(region = (0, 40, 1024, 768))\r\n \r\n #FindDepthUsingSaturation(Screen);\r\n \r\n #if cv2.waitKey(25) & 0xFF == ord('q'):\r\n # cv2.destroyAllWindows()\r\n # break\r\n \r\n #continue;\r\n \r\n \r\n ScreenGrey = cv2.cvtColor(Screen,cv2.COLOR_BGR2GRAY)\r\n #imgL = cv2.imread('tsukuba_l.png',0)\r\n #imgR = cv2.imread('tsukuba_r.png',0)\r\n \r\n if (PreviousSceen is None):\r\n PreviousSceen = ScreenGrey;\r\n continue;\r\n\r\n stereo = cv2.StereoBM_create(numDisparities=80, blockSize=45)\r\n disparity = stereo.compute(PreviousSceen, ScreenGrey)\r\n \r\n # Overwirte previous screen\r\n PreviousSceen = ScreenGrey;\r\n \r\n plt.imshow(disparity,'gray')\r\n plt.ion()\r\n plt.show()\r\n plt.draw()\r\n plt.pause(0.001)\r\n \r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break\r\n "
]
| [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.draw",
"numpy.fromstring",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ion"
]
]
|
IntheGrass/citeomatic_learning | [
"b0fee3c2c9b6462e1878eb5aa3060bee8c86f923"
]
| [
"tests/test_corpus.py"
]
| [
"#!/usr/bin/env python\nimport json\nimport logging\nimport os\nimport random\nimport time\n\nimport numpy as np\n\nfrom citeomatic import features\nfrom citeomatic.common import FieldNames\nfrom citeomatic.corpus import Corpus\n\n\ndef _time(op):\n st = time.time()\n r = op()\n ed = time.time()\n print(op, ed - st)\n return r\n\n\nWORDS = '''\naccretion\nagreeably\nanguishing\narmor\navenues\nbassoon\nbier\nbobs\nbrightest\nbystander\ncarpetbags\ncharbroiling\ncivilian\ncollaboration\ncondition\nconvincingly\ncrankcases\ncurtsying\ndeeper\ndesignate\ndisbursements\ndivorce\nduckbill\nelliptical\nenviously\nexiling\nfateful\nfixture\nforces\nfulcra\ngeologic\ngraffiti\ngyration\nhearten\nhomeyness\nhyphenated\ninbreed\ninjections\ninundate\njubilantly\nlamebrain\nliberalism\nloss\nmanna\nmemorials\nmiscasting\nmortifies\nnaturalistic\nnoses\nopened\noverpopulation\nparqueted\nperform\npillow\npolitest\npreferable\npronoun\npyjamas\nrattles\nreferees\nrepresentation\nrhino\nrumples\nscarcity\nseldom\nshipments\nsizes\nsneeringly\nspeakers\nstake\nstratums\nsummoning\nsynthetic\ntenderness\ntingle\ntransiting\nturncoat\nuneasily\nurchin\nviolets\nwayfaring\nwintertime\nzaniest\n'''.split('\\n')\n\nWORDS = WORDS * 100\nprint(len(WORDS))\n\n\ndef build_test_corpus(source_file, target_file):\n try:\n os.unlink(target_file)\n except:\n pass\n\n with open(source_file, 'w') as tf:\n for i in range(100):\n json.dump({\n FieldNames.TITLE: ' '.join(random.sample(WORDS, 10)),\n FieldNames.ABSTRACT: ' '.join(random.sample(WORDS, 1000)),\n FieldNames.AUTHORS: [],\n FieldNames.OUT_CITATIONS: [\n str(x) for x in random.sample(range(100), 2)\n ],\n FieldNames.IN_CITATION_COUNT: len([\n str(x) for x in random.sample(range(100), 2)\n ]),\n FieldNames.KEY_PHRASES: random.sample(WORDS, 3),\n FieldNames.YEAR: 2011,\n FieldNames.PAPER_ID: str(i),\n FieldNames.VENUE: 'v-{}'.format(random.randint(1, 5))\n }, tf\n )\n tf.write('\\n')\n\n Corpus.build(target_file, source_file)\n\n\ndef test_corpus_conversion():\n build_test_corpus('./tmp/foo.json', './tmp/foo.sqlite')\n\n\ndef test_corpus_iterator():\n corpus = Corpus.load('./tmp/foo.sqlite')\n iter_ids = []\n for doc in corpus:\n iter_ids.append(doc.id)\n overlap_n = len(set(iter_ids).intersection(set(corpus.all_ids)))\n assert overlap_n == corpus.n_docs\n\n\ndef test_featurizer_and_data_gen():\n build_test_corpus('./tmp/foo.json', './tmp/foo.sqlite')\n corpus = Corpus.load('./tmp/foo.sqlite')\n featurizer = features.Featurizer()\n featurizer.fit(corpus, max_df_frac=1.0)\n\n dg = features.DataGenerator(corpus, featurizer)\n gen = dg.triplet_generator(\n paper_ids=corpus.all_ids,\n candidate_ids=corpus.all_ids,\n batch_size=128,\n neg_to_pos_ratio=5\n )\n\n # make sure we can get features\n for i in range(10):\n print(i)\n X, y = next(gen)\n\n # correct batch size\n assert len(y) >= 128\n # positives, hard negatives, easy negatives\n assert len(np.unique(y)) == 3\n # correct padding\n assert X['query-abstract-txt'].shape[1] == featurizer.max_abstract_len\n assert X['query-title-txt'].shape[1] == featurizer.max_title_len\n # no new words\n assert set(featurizer.word_indexer.word_to_index.keys()).difference(WORDS) == set()\n\n q, ex, labels = next(dg._listwise_examples(\n corpus.all_ids,\n corpus.all_ids\n ))\n\n # query id should not be in candidates\n assert q.id not in [i.id for i in ex]\n\n # pos ids should be out_citations\n pos_docs = [i.id for i, j in zip(ex, labels) if j == np.max(labels)]\n assert set(pos_docs) == set(q.out_citations)\n\n # neg ids should be NOT out_citations\n neg_docs = [i.id for i, j in zip(ex, labels) if j < np.max(labels)]\n assert np.all([i not in neg_docs for i in q.out_citations])\n\n # test variable margin off\n dg = features.DataGenerator(corpus, featurizer, use_variable_margin=False)\n gen = dg.triplet_generator(\n paper_ids=corpus.all_ids,\n candidate_ids=corpus.all_ids,\n batch_size=128,\n neg_to_pos_ratio=5\n )\n\n X, y = next(gen)\n print(dg.margins_offset_dict)\n assert len(np.unique(y)) == 2\n\n\n\ndef test_data_isolation():\n build_test_corpus('./tmp/foo.json', './tmp/foo.sqlite')\n corpus = Corpus.load('./tmp/foo.sqlite')\n\n assert len(set(corpus.train_ids).intersection(set(corpus.valid_ids))) == 0\n assert len(set(corpus.train_ids).intersection(set(corpus.test_ids))) == 0\n assert len(set(corpus.valid_ids).intersection(set(corpus.test_ids))) == 0\n\n featurizer = features.Featurizer()\n featurizer.fit(corpus, max_df_frac=1.0)\n dg = features.DataGenerator(corpus, featurizer)\n\n query, examples, labels = next(dg._listwise_examples(corpus.train_ids))\n examples_ids = [doc.id for doc in examples]\n\n assert len(set(examples_ids).intersection(set(corpus.valid_ids))) == 0\n assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0\n\n dg = features.DataGenerator(corpus, featurizer)\n query, examples, labels = next(dg._listwise_examples(paper_ids=corpus.valid_ids,\n candidate_ids=corpus.valid_ids + corpus.train_ids))\n examples_ids = [doc.id for doc in examples]\n\n assert len(set(examples_ids).intersection(set(corpus.train_ids))) > 0\n assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0\n\n dg = features.DataGenerator(corpus, featurizer)\n query, examples, labels = next(dg._listwise_examples(paper_ids=corpus.test_ids,\n candidate_ids=corpus.valid_ids + corpus.train_ids))\n examples_ids = [doc.id for doc in examples]\n assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0\n\n dg = features.DataGenerator(corpus, featurizer)\n query, examples, labels = next(\n dg._listwise_examples(paper_ids=corpus.test_ids,\n candidate_ids=corpus.valid_ids + corpus.train_ids + corpus.test_ids))\n examples_ids = [doc.id for doc in examples]\n #assert len(set(examples_ids).intersection(set(corpus.test_ids))) != 0\n\n\nif __name__ == '__main__':\n import pytest\n\n pytest.main([__file__, '-s'])\n"
]
| [
[
"numpy.all",
"numpy.max",
"numpy.unique"
]
]
|
denred0/pytorchvideo | [
"366138bc36e636816e28a4204d9c2c38285128c1"
]
| [
"pytorchvideo/transforms/functional.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport math\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\n\n\ntry:\n import cv2\nexcept ImportError:\n _HAS_CV2 = False\nelse:\n _HAS_CV2 = True\n\n\ndef uniform_temporal_subsample(\n x: torch.Tensor, num_samples: int, temporal_dim: int = -3\n) -> torch.Tensor:\n \"\"\"\n Uniformly subsamples num_samples indices from the temporal dimension of the video.\n When num_samples is larger than the size of temporal dimension of the video, it\n will sample frames based on nearest neighbor interpolation.\n\n Args:\n x (torch.Tensor): A video tensor with dimension larger than one with torch\n tensor type includes int, long, float, complex, etc.\n num_samples (int): The number of equispaced samples to be selected\n temporal_dim (int): dimension of temporal to perform temporal subsample.\n\n Returns:\n An x-like Tensor with subsampled temporal dimension.\n \"\"\"\n t = x.shape[temporal_dim]\n assert num_samples > 0 and t > 0\n # Sample by nearest neighbor interpolation if num_samples > t.\n indices = torch.linspace(0, t - 1, num_samples)\n indices = torch.clamp(indices, 0, t - 1).long()\n return torch.index_select(x, temporal_dim, indices)\n\n\[email protected]\ndef _interpolate_opencv(\n x: torch.Tensor, size: Tuple[int, int], interpolation: str\n) -> torch.Tensor:\n \"\"\"\n Down/up samples the input torch tensor x to the given size with given interpolation\n mode.\n Args:\n input (Tensor): the input tensor to be down/up sampled.\n size (Tuple[int, int]): expected output spatial size.\n interpolation: model to perform interpolation, options include `nearest`,\n `linear`, `bilinear`, `bicubic`.\n \"\"\"\n if not _HAS_CV2:\n raise ImportError(\n \"opencv is required to use opencv transforms. Please \"\n \"install with 'pip install opencv-python'.\"\n )\n\n _opencv_pytorch_interpolation_map = {\n \"nearest\": cv2.INTER_NEAREST,\n \"linear\": cv2.INTER_LINEAR,\n \"bilinear\": cv2.INTER_AREA,\n \"bicubic\": cv2.INTER_CUBIC,\n }\n assert interpolation in _opencv_pytorch_interpolation_map\n new_h, new_w = size\n img_array_list = [\n img_tensor.squeeze(0).numpy()\n for img_tensor in x.permute(1, 2, 3, 0).split(1, dim=0)\n ]\n resized_img_array_list = [\n cv2.resize(\n img_array,\n (new_w, new_h), # The input order for OpenCV is w, h.\n interpolation=_opencv_pytorch_interpolation_map[interpolation],\n )\n for img_array in img_array_list\n ]\n img_array = np.concatenate(\n [np.expand_dims(img_array, axis=0) for img_array in resized_img_array_list],\n axis=0,\n )\n img_tensor = torch.from_numpy(np.ascontiguousarray(img_array))\n img_tensor = img_tensor.permute(3, 0, 1, 2)\n return img_tensor\n\n\ndef short_side_scale(\n x: torch.Tensor,\n size: int,\n interpolation: str = \"bilinear\",\n backend: str = \"pytorch\",\n) -> torch.Tensor:\n \"\"\"\n Determines the shorter spatial dim of the video (i.e. width or height) and scales\n it to the given size. To maintain aspect ratio, the longer side is then scaled\n accordingly.\n\n Args:\n x (torch.Tensor): A video tensor of shape (C, T, H, W) and type torch.float32.\n size (int): The size the shorter side is scaled to.\n interpolation (str): Algorithm used for upsampling,\n options: nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area'\n backend (str): backend used to perform interpolation. Options includes\n `pytorch` as default, and `opencv`. Note that opencv and pytorch behave\n differently on linear interpolation on some versions.\n https://discuss.pytorch.org/t/pytorch-linear-interpolation-is-different-from-pil-opencv/71181\n\n Returns:\n An x-like Tensor with scaled spatial dims.\n \"\"\" # noqa\n assert len(x.shape) == 4\n assert x.dtype == torch.float32\n assert backend in (\"pytorch\", \"opencv\")\n c, t, h, w = x.shape\n if w < h:\n new_h = int(math.floor((float(h) / w) * size))\n new_w = size\n else:\n new_h = size\n new_w = int(math.floor((float(w) / h) * size))\n if backend == \"pytorch\":\n return torch.nn.functional.interpolate(\n x, size=(new_h, new_w), mode=interpolation, align_corners=False\n )\n elif backend == \"opencv\":\n return _interpolate_opencv(x, size=(new_h, new_w), interpolation=interpolation)\n else:\n raise NotImplementedError(f\"{backend} backend not supported.\")\n\n\ndef uniform_temporal_subsample_repeated(\n frames: torch.Tensor, frame_ratios: Tuple[int], temporal_dim: int = -3\n) -> Tuple[torch.Tensor]:\n \"\"\"\n Prepare output as a list of tensors subsampled from the input frames. Each tensor\n maintain a unique copy of subsampled frames, which corresponds to a unique\n pathway.\n\n Args:\n frames (tensor): frames of images sampled from the video. Expected to have\n torch tensor (including int, long, float, complex, etc) with dimension\n larger than one.\n frame_ratios (tuple): ratio to perform temporal down-sampling for each pathways.\n temporal_dim (int): dimension of temporal.\n\n Returns:\n frame_list (tuple): list of tensors as output.\n \"\"\"\n temporal_length = frames.shape[temporal_dim]\n frame_list = []\n for ratio in frame_ratios:\n pathway = uniform_temporal_subsample(\n frames, temporal_length // ratio, temporal_dim\n )\n frame_list.append(pathway)\n\n return frame_list\n\n\ndef convert_to_one_hot(\n targets: torch.Tensor,\n num_class: int,\n label_smooth: float = 0.0,\n) -> torch.Tensor:\n \"\"\"\n This function converts target class indices to one-hot vectors, given the number of classes.\n\n Args:\n targets (torch.Tensor): Index labels to be converted.\n num_class (int): Total number of classes.\n label_smooth (float): Label smooth value for non-target classes. Label smooth\n is disabled by default (0).\n \"\"\"\n assert (\n torch.max(targets).item() < num_class\n ), \"Class Index must be less than number of classes\"\n assert 0 <= label_smooth < 1.0, \"Label smooth value needs to be between 0 and 1.\"\n\n non_target_value = label_smooth / num_class\n target_value = 1.0 - label_smooth + non_target_value\n one_hot_targets = torch.full(\n (targets.shape[0], num_class),\n non_target_value,\n dtype=torch.long if label_smooth == 0.0 else None,\n device=targets.device,\n )\n one_hot_targets.scatter_(1, targets.long().view(-1, 1), target_value)\n return one_hot_targets\n\n\ndef uniform_crop(frames: torch.Tensor, size: int, spatial_idx: int = 1) -> torch.Tensor:\n \"\"\"\n Perform uniform spatial sampling on the frames based on three-crop setting.\n If width is larger than height, take left, center and right crop.\n If height is larger than width, take top, center, and bottom crop.\n\n Args:\n frames (tensor): A video tensor of shape (C, T, H, W) to perform uniform crop.\n size (int): Desired height and weight size to crop the frames.\n spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width\n is larger than height. Or 0, 1, or 2 for top, center, and bottom\n crop if height is larger than width.\n\n Returns:\n cropped (tensor): A cropped video tensor of shape (C, T, size, size).\n \"\"\"\n\n assert spatial_idx in [0, 1, 2]\n height = frames.shape[2]\n width = frames.shape[3]\n\n y_offset = int(math.ceil((height - size) / 2))\n x_offset = int(math.ceil((width - size) / 2))\n\n if height > width:\n if spatial_idx == 0:\n y_offset = 0\n elif spatial_idx == 2:\n y_offset = height - size\n else:\n if spatial_idx == 0:\n x_offset = 0\n elif spatial_idx == 2:\n x_offset = width - size\n cropped = frames[:, :, y_offset : y_offset + size, x_offset : x_offset + size]\n\n return cropped\n"
]
| [
[
"torch.linspace",
"numpy.expand_dims",
"torch.max",
"torch.full",
"numpy.ascontiguousarray",
"torch.nn.functional.interpolate",
"torch.clamp",
"torch.index_select"
]
]
|
james-s-willis/kotekan | [
"155e874bb039702cec72c1785362a017548aa00a"
]
| [
"tests/test_replacevis.py"
]
| [
"# === Start Python 2/3 compatibility\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom future.builtins import * # noqa pylint: disable=W0401, W0614\nfrom future.builtins.disabled import * # noqa pylint: disable=W0401, W0614\n\n# === End Python 2/3 compatibility\n\nimport pytest\nimport numpy as np\n\nfrom kotekan import runner\n\n\nreplace_params = {\n \"num_elements\": 16,\n \"num_ev\": 0,\n \"total_frames\": 128,\n \"cadence\": 5.0,\n \"fill_ij\": True,\n \"freq\": list(range(16)),\n \"buffer_depth\": 5,\n \"dataset_manager\": {\"use_dataset_broker\": False},\n}\n\n\[email protected](scope=\"module\")\ndef replace_data(tmpdir_factory):\n\n tmpdir = tmpdir_factory.mktemp(\"replace\")\n\n fakevis_buffer = runner.FakeVisBuffer(\n freq=replace_params[\"freq\"], num_frames=replace_params[\"total_frames\"]\n )\n\n dump_buffer = runner.DumpVisBuffer(str(tmpdir))\n\n test = runner.KotekanStageTester(\n \"ReplaceVis\", {}, fakevis_buffer, dump_buffer, replace_params\n )\n\n test.run()\n\n yield dump_buffer.load()\n\n\ndef test_replace(replace_data):\n\n # for frame in replace_data:\n # print frame.metadata.freq_id, frame.metadata.fpga_seq\n # print frame.vis\n\n for frame in replace_data:\n assert (frame.vis.real[0::2] == frame.metadata.freq_id).all()\n assert (\n frame.vis.real[1::2] == np.array(frame.metadata.fpga_seq).astype(np.float32)\n ).all()\n assert (frame.vis.imag == np.arange(frame.metadata.num_prod)).all()\n"
]
| [
[
"numpy.arange",
"numpy.array"
]
]
|
green-cabbage/dilepton_transformer | [
"d8a74630386040b97dccc57c0cbe17f43bbd4fea"
]
| [
"DileptonMLInput.py"
]
| [
"#!/usr/bin/env python\n\"\"\"\ntaken from \nhttps://github.com/jthiema/Dilepton_PyROOT\nauthor: Jason Thema\n\"\"\"\nimport ROOT\nimport math\nimport numpy as np\n\ndef deltaPhi(phi1, phi2):\n dphi = phi1 - phi2\n if dphi > math.pi:\n dphi = dphi - 2*math.pi \n if dphi < -math.pi:\n dphi = dphi + 2*math.pi \n return dphi\n\ndef deltaR(phi1, eta1, phi2, eta2):\n dphi = deltaPhi(phi1,phi2)\n deta = eta1 - eta2\n return math.sqrt(deta*deta + dphi*dphi) \n \nfilename = \"/mnt/hadoop/store/group/local/cmstop/jthiema/ntuples2018/production_2018_TAG_V001/production_2018_TAG_V001/ttbarsignalplustau_fromDilepton_0.root\"\n\nfile = ROOT.TFile.Open(filename, \"read\")\n\n# Beginning RECO branches\n\n# The NTuple TTree is located in the writeNTuple TDirectory for the TFile\ntree = file.Get(\"writeNTuple/NTuple\")\n\n# The total number of events (int) in the TTree\nnEvents = tree.GetEntries()\n\n# The Lepton PDG ID identifies whether the particle is a electron, positron, muon, or anit-muon\nv_lepPdgId = ROOT.std.vector('int')()\ntree.SetBranchAddress(\"lepPdgId\",v_lepPdgId)\n\n# Vector of four-vectors for the leptons (electrons and muons)\nv_leptons = ROOT.std.vector('ROOT::Math::LorentzVector<ROOT::Math::PtEtaPhiM4D<float>>')()\ntree.SetBranchAddress(\"leptons\",v_leptons)\n\n# Vector of Isolation scores for leptons\nv_lepPfIso = ROOT.std.vector('float')()\ntree.SetBranchAddress(\"lepPfIso\",v_lepPfIso)\n\n# Super cluster eta of calorimeter activity\nv_lepSCEta = ROOT.std.vector('float')()\ntree.SetBranchAddress(\"lepSCEta\",v_lepSCEta)\n\n# Integer for the Muon selection\nv_lepID_MuonTight = ROOT.std.vector('int')()\ntree.SetBranchAddress(\"lepID_MuonTight\",v_lepID_MuonTight)\n\n# Integer for the Electron selection\nv_lepID_ElecCutBased = ROOT.std.vector('int')()\ntree.SetBranchAddress(\"lepID_ElecCutBased\",v_lepID_ElecCutBased)\n\n# Vector of four-vectors for jets\nv_jets = ROOT.std.vector('ROOT::Math::LorentzVector<ROOT::Math::PtEtaPhiM4D<float>>')()\ntree.SetBranchAddress(\"jets\",v_jets)\n\n# Integer for jet selection\nv_jetPFID = ROOT.std.vector('int')()\ntree.SetBranchAddress(\"jetPFID\",v_jetPFID)\n\n# BTag Score for the jet\nv_jetBTagDeepCSV = ROOT.std.vector('float')()\ntree.SetBranchAddress(\"jetBTagDeepCSV\",v_jetBTagDeepCSV)\n\n# Vector of four-vector for MET\nmet = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"met\",met)\n\n# Beginning GEN branches\n\n# Vector of four-vector for generated top\nGenTop = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenTop\",GenTop)\n\n# Vector of four-vector for generated anti-top\nGenAntiTop = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenAntiTop\",GenAntiTop)\n\n# Vector of four-vector for generated b-quark\nGenB = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenB\",GenB)\n\n# Vector of four-vector for generated anti b-quark\nGenAntiB = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenAntiB\",GenAntiB)\n\n# Vector of four-vector for generated W Plus\nGenWPlus = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenWPlus\",GenWPlus)\n\n# Vector of four-vector for generated W Minus\nGenWMinus = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<float>')()\ntree.SetBranchAddress(\"GenWMinus\",GenWMinus)\n\n# Vector of four-vector for generated lepton (GenLepton)\n\n# Vector of four-vector for generated anti-lepton (GenAntiLepton)\n\n\nDimuonMLInputList = []\nDimuonMLTruthOutputList = []\n\n# Decalaration and Booking of TH1 Histograms\nhGenBdeltaR = ROOT.TH1F(\"GenBdeltaR\", \"Delta R between GenB and reco jets\", 500, 0, 5)\nhGenAntiBdeltaR = ROOT.TH1F(\"GenAntiBdeltaR\", \"Delta R between GenAntiB and reco jets\", 500, 0, 5)\n\nhGenBdeltaR_gencut = ROOT.TH1F(\"GenBdeltaR_gencut\", \"Delta R between GenB and reco jets\", 500, 0, 5)\nhGenAntiBdeltaR_gencut = ROOT.TH1F(\"GenAntiBdeltaR_gencut\", \"Delta R between GenAntiB and reco jets\", 500, 0, 5)\n\n# Decalaration and Booking of TH2 Histograms\n\ninput_data = np.empty((0,6,6)) #np input array stack initialization\noutput_data = np.empty((0,6,3)) #np output array stack initialization\nfor i in range(nEvents):\n#for i in range(10000):\n\n# print \"Event \"+str(i)\n\n# DimuonMLInput[]\n# DimuonMLTruthOutput[]\n\n v_lepPdgId.clear()\n v_leptons.clear()\n v_lepPfIso.clear()\n v_lepSCEta.clear()\n v_lepID_MuonTight.clear()\n v_lepID_ElecCutBased.clear()\n v_lepID_MuonTight.clear()\n v_jets.clear()\n v_jetPFID.clear()\n v_jetBTagDeepCSV.clear()\n\n # Very important; this is where the data gets filled from the tree\n tree.GetEntry(i)\n \n # Begin Object Selection\n\n # Only consider events with at least 2 leptons\n if len(v_leptons) < 2: continue\n # Only consider events with oppositely charged leptons\n if v_lepPdgId[0]*v_lepPdgId[1] > 0 : continue\n # Only consider events with invariant mass of lepton pair > 20\n if (v_leptons[0] + v_leptons[1]).M() < 20.0 : continue \n # Only consider events with MET > 20\n if (met.Pt() < 20.0) : continue\n\n\n # Leading electron/positron cuts\n if abs(v_lepPdgId[0]) == 11 :\n if v_leptons[0].Pt() < 25 or (abs(v_lepSCEta[0]) > 1.4442 and abs(v_lepSCEta[0]) < 1.566) or v_leptons[0].Eta() > 2.4 : continue\n if v_lepID_ElecCutBased[0] != 4 : continue \n\n # Subleading electron/positron cuts\n if abs(v_lepPdgId[1]) == 11 :\n if v_leptons[1].Pt() < 20 or (abs(v_lepSCEta[1]) > 1.4442 and abs(v_lepSCEta[1]) < 1.566) or v_leptons[1].Eta() > 2.4 : continue\n if v_lepID_ElecCutBased[1] != 4 : continue \n\n # Subleading muon cuts\n if abs(v_lepPdgId[0]) == 13 :\n if v_leptons[0].Pt() < 25 or abs(v_leptons[0].Eta()) > 2.4 : continue\n if v_lepID_MuonTight[0] != 1 or v_lepPfIso[0] > 0.15 : continue \n\n # Subleading muon cuts\n if abs(v_lepPdgId[1]) == 13 :\n if v_leptons[1].Pt() < 15 or abs(v_leptons[1].Eta()) > 2.4 : continue\n if v_lepID_MuonTight[1] != 1 or v_lepPfIso[1] > 0.15 : continue \n\n\n selectedjets = []\n selectedjetsbtagscore = []\n\n for j in range(len(v_jets)):\n # Jet cuts\n if v_jets[j].Pt() < 30 or abs(v_jets[j].Eta()) > 2.4 : continue \n if v_jetPFID[j] != 3 : continue \n # Only consider jets that are isolated from leptons\n if deltaR(v_jets[j].Phi(),v_jets[j].Eta(),v_leptons[0].Phi(),v_leptons[0].Eta()) < 0.4 : continue \n if deltaR(v_jets[j].Phi(),v_jets[j].Eta(),v_leptons[1].Phi(),v_leptons[1].Eta()) < 0.4 : continue \n selectedjets.append(v_jets[j])\n selectedjetsbtagscore.append(v_jetBTagDeepCSV[j])\n\n\n # Only consider events with at least two selected jets\n if len(selectedjets) < 2 : continue \n energy_jet_list = [jet.E() for jet in selectedjets]\n best_jets=[] #select the four most energetic jets\n # print(\"lengths: \", len(selectedjets),\", \",len(selectedjetsbtagscore))\n for _ in range(4):\n max_idx = energy_jet_list.index(max(energy_jet_list))\n #print(\"max_idx: \", max_idx)\n best_jets.append([selectedjets[max_idx], selectedjetsbtagscore[max_idx]])\n energy_jet_list.pop(max_idx)\n selectedjets.pop(max_idx)\n selectedjetsbtagscore.pop(max_idx) #pop the max elements to get the next max element\n if len(energy_jet_list) == 0: break #if there are less than 4 selected jets, stop\n input = np.zeros((1,6,6)) #initialization of input for filling\n met_pt = met.Pt()\n met_phi = met.Phi()\n for idx in range(2):\n input[0,:,idx] = np.array([v_leptons[idx].Pt(), v_leptons[idx].Phi(), v_leptons[idx].Eta(), 0, met_pt, met_phi])\n for idx in range(len(best_jets)):\n input[0,:,idx+2] = np.array([best_jets[idx][0].Pt(), best_jets[idx][0].Phi(), best_jets[idx][0].Eta(), best_jets[idx][0].E(), best_jets[idx][0].M(), best_jets[idx][1] ])\n input_data = np.vstack((input_data, input))\n output = np.zeros((1,6,3)) # initialization of output for filling\n output[0,0,:] = np.array([GenB.Pt(), GenB.Phi(), GenB.Eta()])\n output[0,1,:] = np.array([GenAntiB.Pt(), GenAntiB.Phi(), GenAntiB.Eta()])\n output[0,2,:] = np.array([GenWPlus.Pt(), GenWPlus.Phi(), GenWPlus.Eta()])\n output[0,3,:] = np.array([GenWMinus.Pt(), GenWMinus.Phi(), GenWMinus.Eta()])\n output[0,4,:] = np.array([GenTop.Pt(), GenTop.Phi(), GenTop.Eta()])\n output[0,5,:] = np.array([GenAntiTop.Pt(), GenAntiTop.Phi(), GenAntiTop.Eta()])\n output_data = np.vstack((output_data, output))\nnp.save(\"X_raw.npy\", input_data)\nnp.save(\"Y_raw.npy\", output_data)\n\n\n# selectedbtaggedjets = []\n\n# for j in range(len(selectedjets)):\n# # Only consider jet with BTag score > 0.5\n# if selectedjetsbtagscore[j] < 0.5 : continue\n# selectedbtaggedjets.append(selectedjets[j])\n \n# # Only consider events with two BTagged jets\n# if len(selectedbtaggedjets) != 2 : continue \n\n\n# # Filling of the Histograms\n# for j in range(len(selectedbtaggedjets)):\n# if deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenB.Phi(),GenB.Eta()) < deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenAntiB.Phi(),GenAntiB.Eta()) :\n# hGenBdeltaR.Fill(deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenB.Phi(),GenB.Eta()))\n# else:\n# hGenAntiBdeltaR.Fill(deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenAntiB.Phi(),GenAntiB.Eta()))\n\n# if abs(GenB.Eta()) < 2.7 and abs(GenAntiB.Eta()) < 2.7 : \n# if deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenB.Phi(),GenB.Eta()) < deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenAntiB.Phi(),GenAntiB.Eta()) :\n# hGenBdeltaR_gencut.Fill(deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenB.Phi(),GenB.Eta()))\n# else:\n# hGenAntiBdeltaR_gencut.Fill(deltaR(selectedbtaggedjets[j].Phi(),selectedbtaggedjets[j].Eta(),GenAntiB.Phi(),GenAntiB.Eta()))\n\n\n\n# outHistFile = ROOT.TFile.Open(\"output.root\" ,\"RECREATE\")\n# outHistFile.cd()\n\n#c = ROOT.TCanvas(\"c\",\"c\",600,400)\n\n# Wrtiting of the histograms\n\n#hGenBdeltaR.Draw()\n#c.SaveAs(\"hGenBdeltaR.png\")\n# hGenBdeltaR.Write()\n# hGenBdeltaR_gencut.Write()\n \n#hGenAntiBdeltaR.Draw()\n#c.SaveAs(\"hGenAntiBdeltaR.png\") \n# hGenAntiBdeltaR.Write()\n# hGenAntiBdeltaR_gencut.Write()\n\n # Fill DimuonMLInput\n\n # Fill DimuonMLTruthOutput with GenTop, GenAntiTop, GenWPlus, GenWMinus \n\n# DimuonMLInputList.append(DimuonMLInput[])\n# DimuonMLTruthOutputList.append(DimuonMLTruthOutput[])\n"
]
| [
[
"numpy.vstack",
"numpy.save",
"numpy.zeros",
"numpy.empty"
]
]
|
Beckschen/TransMix | [
"cda9c7657d73dc8d2abfbd92054a8fae448aac19"
]
| [
"transmix.py"
]
| [
"import torch\nimport torch.nn as nn\nimport math\nfrom timm.data.mixup import Mixup, cutmix_bbox_and_lam, one_hot\n\ndef mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda', return_y1y2=False):\n off_value = smoothing / num_classes\n on_value = 1. - smoothing + off_value\n y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)\n y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)\n if return_y1y2:\n return y1 * lam + y2 * (1. - lam), y1.clone(), y2.clone()\n else:\n return y1 * lam + y2 * (1. - lam)\n\n\nclass Mixup_transmix(Mixup):\n \"\"\" act like Mixup(), but return useful information with method transmix_label()\n Mixup/Cutmix that applies different params to each element or whole batch, where per-batch is set as default\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n transmix (bool): enable TransMix or not\n \"\"\"\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n \n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] # cutmix for input!\n return lam, (yl, yh, xl, xh) # return box!\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n \n return lam\n\n\n def transmix_label(self, target, attn, input_shape, ratio=0.5):\n \"\"\"use the self information?\n args:\n attn (torch.tensor): attention map from the last Transformer with shape (N, hw)\n target (tuple): (target, y1, y2, use_cutmix, box)\n target (torch.tensor): mixed target by area-ratio\n y1 (torch.tensor): one-hot label for image A (background image) (N, k)\n y2 (torch.tensor): one-hot label for image B (cropped patch) (N, k)\n use_cutmix (bool): enable cutmix if True, otherwise enable Mixup\n box (tuple): (yl, yh, xl, xh)\n returns:\n target (torch.tensor): with shape (N, K)\n \"\"\"\n # the placeholder _ is the area-based target\n (_, y1, y2, box) = target\n lam0 = (box[1]-box[0]) * (box[3]-box[2]) / (input_shape[2] * input_shape[3])\n mask = torch.zeros((input_shape[2], input_shape[3])).cuda()\n mask[box[0]:box[1], box[2]:box[3]] = 1\n mask = nn.Upsample(size=int(math.sqrt(attn.shape[1])))(mask.unsqueeze(0).unsqueeze(0)).int()\n mask = mask.view(1, -1).repeat(len(attn), 1) # (b, hw)\n w1, w2 = torch.sum((1-mask) * attn, dim=1), torch.sum(mask * attn, dim=1)\n lam1 = w2 / (w1+w2) # (b, )\n lam = (lam0 + lam1) / 2 # ()+(b,) ratio=0.5\n target = y1 * (1. - lam).unsqueeze(1) + y2 * lam.unsqueeze(1)\n return target\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n assert self.mode == 'batch', 'Mixup mode is batch by default'\n lam = self._mix_batch(x) # tuple or value\n if isinstance(lam, tuple):\n lam, box = lam # lam: (b,)\n use_cutmix = True\n else: # lam is a value\n use_cutmix = False\n \n mixed_target, y1, y2 = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device, return_y1y2=True) # tuple or tensor\n if use_cutmix:\n return x, (mixed_target, y1, y2, box)\n else:\n return x, mixed_target"
]
| [
[
"torch.sum",
"torch.zeros"
]
]
|
JasonGUTU/EasySR | [
"6662d03b7499382b45feb481a1139de10d4e9049"
]
| [
"srdata/data_utils.py"
]
| [
"import os\nfrom PIL import Image\nimport torchvision\nimport cv2\nimport numpy as np\nimport torch\nimport pickle\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef pil_loader(path, mode='RGB'):\n \"\"\"\n open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n :param path: image path\n :return: PIL.Image\n \"\"\"\n assert _is_image_file(path), \"%s is not an image\" % path\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert(mode)\n\n\ndef get_paths_from_lmdb(dataroot):\n \"\"\"get image path list from lmdb meta info\"\"\"\n meta_info = pickle.load(open(os.path.join(dataroot, 'meta_info.pkl'), 'rb'))\n paths = meta_info['keys']\n sizes = meta_info['resolution']\n return paths, sizes\n\n\ndef read_img_lmdb(env, key, size, mode='RGB'):\n \"\"\"read image from lmdb with key (w/ and w/o fixed size)\n size: (C, H, W) tuple\"\"\"\n with env.begin(write=False) as txn:\n buf = txn.get(key.encode('ascii'))\n img_flat = np.frombuffer(buf, dtype=np.uint8)\n C, H, W = size\n img = img_flat.reshape(H, W, C)\n img = img.astype(np.float32)\n\n if mode != 'Y':\n img_tensor = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()\n img_tensor = img_tensor[[2, 1, 0], :, :] / 255.\n else:\n img = (np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0) / 255.\n img_tensor = torch.from_numpy(np.ascontiguousarray(np.expand_dims(img, axis=0))).float()\n\n return img_tensor\n\n\ndef cv2_load_as_tensor(path, mode='RGB'):\n \"\"\"read image by cv2 or from lmdb\n return: Numpy float32, HWC, BGR, [0,1]\"\"\"\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n img = img.astype(np.float32)\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n # some images have 4 channels\n if img.shape[2] > 3:\n img = img[:, :, :3]\n\n if mode != 'Y':\n img_tensor = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()\n img_tensor = img_tensor[[2, 1, 0], :, :] / 255.\n else:\n img = (np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0) / 255.\n img_tensor = torch.from_numpy(np.ascontiguousarray(np.expand_dims(img, axis=0))).float()\n\n return img_tensor\n\n\ndef pil_load_as_tensor(path, mode='RGB'):\n \"\"\"\n Load image to tensor\n :param path: image path\n :param mode: 'Y' returns 1 channel tensor, 'RGB' returns 3 channels, 'RGBA' returns 4 channels, 'YCbCr' returns 3 channels\n :return: 3D tensor\n \"\"\"\n if mode != 'Y':\n return PIL2Tensor(pil_loader(path, mode=mode))\n else:\n return PIL2Tensor(pil_loader(path, mode='YCbCr'))[:1]\n\n\ndef PIL2Tensor(pil_image):\n return torchvision.transforms.functional.to_tensor(pil_image)\n\n\ndef Tensor2PIL(tensor_image, mode='RGB'):\n if len(tensor_image.size()) == 4 and tensor_image.size()[0] == 1:\n tensor_image = tensor_image.view(tensor_image.size()[1:])\n return torchvision.transforms.functional.to_pil_image(tensor_image.detach(), mode=mode)\n\n\ndef _is_image_file(filename):\n \"\"\"\n judge if the file is an image file\n :param filename: path\n :return: bool of judgement\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n\ndef image_files(path):\n \"\"\"\n return list of images in the path\n\n # self.hr_img_path_list = sorted(glob.glob(f'{os.path.join(os.path.abspath(hr_path), \"*\")}'))\n # self.lr_img_path_list = sorted(glob.glob(f'{os.path.join(os.path.abspath(lr_path), \"*\")}'))\n :param path: path to Data Folder, absolute path\n :return: 1D list of image files absolute path\n \"\"\"\n abs_path = os.path.abspath(path)\n image_files = os.listdir(abs_path)\n for i in range(len(image_files)):\n if (not os.path.isdir(image_files[i])) and (_is_image_file(image_files[i])):\n image_files[i] = os.path.join(abs_path, image_files[i])\n return sorted(image_files)\n\n\ndef split_to_batches(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n"
]
| [
[
"numpy.frombuffer",
"numpy.dot",
"numpy.expand_dims",
"numpy.transpose"
]
]
|
kavehmahdavi/KAVICA | [
"226860d2a6c5b949e3b69e7121d92bc01b6ed3c8"
]
| [
"kavica/feature_selector/feature_analysis.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFeature selection by leverage of Feature Analysis that include PFA and IFA.\n\n------------------------------------------------------------------------------------------------------------------------\n References:\n - Y. Lu, I. Cohen, XS. Zhou, and Q. Tian, \"Feature selection using principal feature analysis,\" in Proceedings of\n the 15th international conference on Multimedia. ACM, 2007, pp. 301-304.\n------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n# Author: Kaveh Mahdavi <[email protected]>\n# License: BSD 3 clause\n\n# TODO: PFA\n# TODO: IFA\n\nimport argparse\nimport sys\nimport warnings\nimport time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import scale\nfrom terminaltables import DoubleTable\nfrom kavica.imputation.base import data_structure_Compatibilization\nfrom kavica.distance_measure import euclideanDistance\nfrom sklearn.cluster import KMeans\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn import decomposition\nfrom kavica.factor_analysis.factor_rotation import ObliqueRotation\nimport json\n\n__all__ = ['has_fitted',\n 'sort_parja',\n '_centroid',\n '__configoration',\n '_BaseFeatureAnalysis',\n 'PrincipalFeatureAnalysis',\n 'IndependentFeatureAnalysis']\n\n\ndef has_fitted(estimator, attributes, msg=None, all_or_any=any):\n pass\n\n\ndef sort_parja(x, y, order=-1):\n # TODO: parameter check (numpy array)\n index = np.array(x).argsort(kind='quicksort')\n return (np.array(x)[index][::order], np.array(y)[index][::order])\n\n\n# TODO: it is needed to rewrite it with method parameter\ndef _centroid(x, label):\n datafreamX = pd.DataFrame(x)\n datafreamX['label'] = label\n return datafreamX.groupby('label').mean()\n\n\n# read the configuration file for preparing the features\ndef __configoration(config, data):\n # read the configuration file\n with open(config, 'r') as config:\n config_dict = json.load(config)\n\n # Read the data file\n df = pd.read_csv(data)\n\n # config the data set based on configuration information\n df = df[list(config_dict['hardware_counters'].values())] # sub set of features\n df.replace([np.inf, -np.inf], np.nan, inplace=True)\n lastShape = df.shape\n\n # Remove the all zero rows\n df = df[(df.T != 0).any()]\n print(\"The {} row are full null that are eliminated.\".format(lastShape[0] - df.shape[0]))\n lastShape = df.shape\n\n # Remove all NaN columns.\n df = df.ix[:, (pd.notnull(df)).any()]\n print(\"The {} columns are full null that are eliminated.\".format(lastShape[1] - df.shape[1]))\n\n if config_dict['missing_values'] == 'mean':\n df.fillna(df.mean(), inplace=True)\n if config_dict['scale']:\n df = pd.DataFrame(scale(df), index=df.index, columns=df.columns)\n\n print(df.mean(axis=0), df.std(axis=0))\n\n return df\n\n\ndef arguments_parser():\n # set/receive the arguments\n if len(sys.argv) == 1:\n # It is used for testing and developing time.\n arguments = ['config/config_FS_gromacs_64p_INS_CYC.json',\n '../parser/source.csv',\n '-k',\n '2',\n '-m',\n 'IFA'\n ]\n sys.argv.extend(arguments)\n else:\n pass\n\n # parse the arguments\n parser = argparse.ArgumentParser(description='The files that are needed for selecting features most important.')\n parser.add_argument('config', help='A .json configuration file that included the'\n 'thread numbers,hardware counters and etc.')\n parser.add_argument('csvfile', help='A .csv dataset file')\n parser.add_argument('-k',\n dest='k',\n default=2,\n action='store',\n type=int,\n help=\"It significances the number of the most important features.\")\n parser.add_argument('-m',\n dest='m',\n default='IFA',\n choices=['IFA', 'PFA'],\n action='store',\n type=str.upper,\n help=\"The feature selection method that is either IFA or PFA.\")\n\n args = parser.parse_args()\n\n if args.k < 2:\n raise ValueError(\"Selected features have to be (=> 2). It is set {}\".format(args.k))\n\n return ({\"configPath\": args.config,\n \"csvPath\": args.csvfile,\n \"k_features\": args.k,\n \"featureSelectionMethod\": args.m})\n\n\n######################################################################\n# Base class\n######################################################################\nclass _BaseFeatureAnalysis(object):\n \"\"\"Initialize the feature analysis.\n\n Parameters\n \"\"\"\n\n def __init__(self, X=None, method=None, k_features=None):\n self.hasFitted = False\n self.originData = X\n self.k_features = k_features\n self.featureScore = {'method': method,\n 'scores': pd.DataFrame(columns=['features', 'subset', 'internal_score'])}\n\n def fit(self, X):\n \"\"\" Check the input data and fit to the model.\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n Returns\n -------\n self : object\n \"\"\"\n # fixme: it is a duplicated action\n self.originData = data_structure_Compatibilization(data=X,\n header=True,\n index=True)\n # fixme: it is obligatory to make the data standardize, it should move t o data pre-processing\n self.originData = pd.DataFrame(scale(self.originData,\n with_mean=True,\n with_std=True,\n copy=False),\n index=self.originData.index,\n columns=self.originData.columns)\n\n # Initiate the feature rank list that will updated during analysis\n self.featureScore['scores']['features'] = np.array(self.originData.columns.tolist())\n self._check_params(X)\n self.hasFitted = True\n return self\n\n def _sorted_features(self):\n return self.featureScore['scores'].sort_values(['subset', 'internal_score'],\n ascending=[True, True])\n\n # TODO: rewrite\n def _feature_score_table(self):\n sortedFeatureScore = np.array(self._sorted_features())\n table_data = [\n ['Feature', 'Subset', 'Internal_rank']\n ]\n for featurItem in sortedFeatureScore:\n table_data.append(featurItem.tolist())\n\n table = DoubleTable(table_data,\n title='{}'.format(str.upper(self.featureScore['method'])))\n table.justify_columns[2] = 'center'\n return table\n\n def _check_params(self, X):\n pass\n\n\n######################################################################\n# Feature analysis methods\n######################################################################\nclass PrincipalFeatureAnalysis(_BaseFeatureAnalysis):\n \"\"\" Split the features to a k subset and applies the feature ranking inside any subset.\n Objective function:\n Min\n Parameters:\n ----------\n Attributes:\n ----------\n Examples:\n --------\n See also:\n https://papers.nips.cc/paper/laplacian-score-for-feature-selection.pdf\n \"\"\"\n\n def __init__(self, X=None, k=None):\n super(PrincipalFeatureAnalysis, self).__init__(X, 'PFA', k)\n\n def __centroid_predefining(self, x, dendrogram=False):\n \"predefining the centroid for stabilizing the kmean.\"\n if dendrogram:\n # create dendrogram\n sch.dendrogram(sch.linkage(x, method='ward'))\n\n hc = AgglomerativeClustering(n_clusters=self.k_features, affinity='euclidean', linkage='ward')\n labels = hc.fit_predict(x)\n return _centroid(x, labels)\n\n # TODO: Wighted ranking the feature should be implemented\n def _rank_features(self, X=None, dendrogram=False):\n if X is not None:\n self.fit(X)\n elif self.hasFitted:\n pass\n else:\n raise ValueError('The model has not fitted and the X is None')\n\n eigenValues, eigenVectors = np.linalg.eigh(self.originData.cov())\n predefinedCentroids = self.__centroid_predefining(eigenVectors)\n\n # Do the clustering on rows that are the features.\n featureClustering = KMeans(n_clusters=self.k_features,\n max_iter=300,\n algorithm='auto',\n precompute_distances='auto',\n init=predefinedCentroids).fit(eigenVectors)\n featureSubstes = featureClustering.predict(eigenVectors)\n featureSubstesCentroid = featureClustering.cluster_centers_\n self.featureScore['scores']['subset'] = featureSubstes\n for index, label in enumerate(featureSubstes):\n self.featureScore['scores']['internal_score'][index] = euclideanDistance(eigenVectors[index, :],\n featureSubstesCentroid[label, :])\n\n def _check_params(self, X):\n pass\n\n\nclass IndependentFeatureAnalysis(_BaseFeatureAnalysis):\n \"\"\" Split the features to a k subset and applies the feature ranking inside any subset.\n Objective function:\n Min\n Parameters:\n ----------\n Attributes:\n ----------\n Examples:\n --------\n See also:\n https://papers.nips.cc/paper/laplacian-score-for-feature-selection.pdf\n \"\"\"\n\n def __init__(self, X=None, k=None):\n super(IndependentFeatureAnalysis, self).__init__(X, 'IFA', k)\n\n def __centroid_predefining(self, x, dendrogram=False):\n \"predefining the centroid for stabilizing the kmean.\"\n if dendrogram:\n # create dendrogram\n sch.dendrogram(sch.linkage(x, method='ward'))\n\n hc = AgglomerativeClustering(n_clusters=self.k_features, affinity='euclidean', linkage='ward')\n labels = hc.fit_predict(x)\n return _centroid(x, labels)\n\n # TODO: Insert the deprogram conditional\n # TODO: import the promax in to the method\n def _rank_features(self, X=None, dendrogram=False, rotation='promax'):\n if X is not None:\n self.fit(X)\n elif self.hasFitted:\n pass\n else:\n raise ValueError('The model has not fitted and the X is None')\n try:\n # TODO: the columns with all zero value have to be eliminated.\n # TODO: it is the problem of whiten=True.\n icaModel = decomposition.FastICA(whiten=True, random_state=1).fit(self.originData)\n except:\n warnings.warn(\"ICA is forced to run without whitening.\", UserWarning)\n icaModel = decomposition.FastICA(whiten=False, random_state=1).fit(self.originData)\n icaModel = decomposition.FastICA(whiten=False, random_state=1).fit(self.originData)\n finally:\n # The transpose of ICA components are used because the output of ICA is(n_component,n_features)\n independentComponents = icaModel.components_\n\n # the rotation that amplified the load of important component in any feature\n # The row is the components and the columns are the features\n if rotation == 'promax':\n promaxRotation = ObliqueRotation('promax')\n promaxRotation.fit(independentComponents)\n rotatedIndependentComponents = promaxRotation.oblique_rotate()\n independentComponents = rotatedIndependentComponents\n\n # The rotated ICA components (n_component,n_features) transpose to the (n_features, n_component)\n independentComponents = independentComponents.T\n predefinedCentroids = self.__centroid_predefining(independentComponents)\n\n # Do the clustering on rows that are the features.\n featureClustering = KMeans(n_clusters=self.k_features,\n max_iter=300,\n algorithm='auto',\n precompute_distances='auto',\n init=predefinedCentroids).fit(independentComponents)\n featureSubstes = featureClustering.predict(independentComponents)\n featureSubstesCentroid = featureClustering.cluster_centers_\n self.featureScore['scores']['subset'] = featureSubstes\n\n for index, label in enumerate(featureSubstes):\n self.featureScore['scores']['internal_score'][index] = euclideanDistance(\n independentComponents[index, :],\n featureSubstesCentroid[label, :])\n\n def _check_params(self, X):\n pass\n\n\ndef __test_me():\n # sample dataset:\n '''\n data0 = np.array([(1, 1, 1, 1, 1, 1, 1),\n (2, 2, 2, 2, 2, 2, 2),\n (3, 4, 45, 23, 24, 19, 16),\n (4, 2, 44, 23, 22, 13, 11),\n (5, 2, 4, 3, 2, 1, 1),\n (6, 1, 1, 1, 1, 1, 1),\n (7, 2, 2, 2, 2, 2, 2),\n (8, 2, 45, 23, 24, 13, 16),\n (9, 12, 0, 9, 5, 20, 89),\n (10, 6, 7, 8, 3, 8, 2),\n (11, 8, 7, 43, 12, 56, 1),\n (12, 13, 4, 5, 6, 33, 4),\n (13, 94, 5, 16, 8, 52, 45)])\n data = np.array([(1, 1, 1, 1, 1, 1, 1),\n (2, 2, 2, 2, 1, 2, 2),\n (2, 2, 45, 23, 24, 13, 16),\n (3, 12, 0, 9, 5, 20, 89)])\n data1 = np.array([(\"ind\", \"F1\", \"F2\", \"F3\", \"F4\", \"F5\", \"F6\"),\n (1, 1, 1, 1, 1, 1, 1),\n (2, 2, 2, 4, 2, 7, 2),\n (3, 4, 45, 23, 24, 19, 16),\n (4, 2, 44, 23, 22, 13, 11),\n (5, 2, 4, 3, 2, 1, 1),\n (6, 1, 1, 1, 1, 78, 1),\n (7, 2, 2, 8, 2, 2, 2),\n (8, 2, 45, 23, 24, 13, 16),\n (9, 12, 0, 9, 5, 20, 89),\n (10, 6, 7, 8, 3, 8, 2),\n (11, 8, 7, 43, 12, 56, 1),\n (12, 13, 4, 5, 6, 33, 4),\n (13, 94, 5, 16, 8, 52, 45),\n (14, 2, 3, 4, 3, 5, 300)])\n\n data2 = np.array([(\"ind\", \"F1\", \"F2\", \"F3\", \"F4\", \"F5\", \"F6\"),\n (1, 1, 1, 1, 1, 1, 1),\n (2, 2, 2, 2, 2, 2, 2),\n (3, 2, 4, 3, 2, 1, 1),\n (4, 1, 1, 1, 1, 1, 1),\n (5, 2, 2, 2, 2, 2, 2)])\n\n headers = ['A', 'B', 'C', 'D', 'E', 'F', 'G']\n index = [1, 2, 3, 4]\n df = pd.DataFrame(data0, columns=headers, index=index, dtype=np.float)\n '''\n\n df = __configoration('config/config_lulesh_27p.json', '../parser/source.csv')\n testICA = IndependentFeatureAnalysis(k=2)\n testICA._rank_features(df, dendrogram=True)\n print(testICA._feature_score_table().table)\n\n testPCA = PrincipalFeatureAnalysis(k=2)\n testPCA._rank_features(df, dendrogram=True)\n print(testPCA._feature_score_table().table)\n\n\n# Todo: add dendogram ****\ndef __select_feature():\n start = time.time()\n try:\n args = arguments_parser()\n df = __configoration(args['configPath'], args['csvPath'])\n if args['featureSelectionMethod'] == 'IFA':\n featureSelectionModel = IndependentFeatureAnalysis(k=args['k_features'])\n elif args['featureSelectionMethod'] == 'PFA':\n featureSelectionModel = PrincipalFeatureAnalysis(k=args['k_features'])\n else:\n pass\n\n featureSelectionModel._rank_features(df, dendrogram=True)\n print(featureSelectionModel._feature_score_table().table)\n print(\"\\033[32mThe feature selection process is successfully completed by {} method.\".format(\n featureSelectionModel.featureScore.get(\"method\")))\n except AssertionError as error:\n print(error)\n print(\"\\033[31mThe feature selection proses is failed.\")\n finally:\n duration = time.time() - start\n print('\\033[0mTotal duration is: %.3f' % duration)\n\n\nif __name__ == '__main__':\n # __test_me()\n __select_feature()\n"
]
| [
[
"pandas.read_csv",
"pandas.notnull",
"sklearn.cluster.KMeans",
"sklearn.decomposition.FastICA",
"pandas.DataFrame",
"sklearn.preprocessing.scale",
"scipy.cluster.hierarchy.linkage",
"sklearn.cluster.AgglomerativeClustering",
"numpy.array"
]
]
|
scottcohn97/labor_empirical_pset | [
"81785b834e6c2001d525dc1b9d7da8675dd281bd"
]
| [
"ipums_clean.py"
]
| [
"import pandas as pd\nimport numpy as np\n\n# load data\nipums = pd.read_csv(\"data/ipums_FL_raw.csv\")\n\n# Map the lowering function to all column names\nipums.columns = map(str.lower, ipums.columns)\n\n# recode sex\nipums['sex'] = ipums['sex'].map(lambda x: \"male\" if x == 1 else \"female\")\nipums['female'] = ipums['sex'].map(lambda x: 0 if x == \"male\" else 1)\n\n# add age-squared\nipums = ipums.assign(age_sq = ipums['age']**2)\n\n# rename hispand (detailed)\nipums = ipums.rename(columns={\"hispand\": \"hispan_detailed\"})\n\n# dummy for hispan\nipums['hispan_d'] = ipums['hispan'].map(lambda x: 1 if x != 0 else 0)\n\n# dummy for Black\nipums['black_d'] = ipums['race'].map(lambda x: 1 if x == 2 else 0)\n\n# dummy for Asian\n #---\n #4: Chinese\n #5: Japanese\n #6: Other Asian or Pacific Islander\n #---\nipums['asian_d'] = ipums['race'].map(lambda x: 1 if x in [4, 5, 6] else 0)\n\n# dummy for each educational attainment categories\n #---\n #(i) Completed less than 12 years of schooling\n #(ii) Completed exactly 12 years of schooling\n #(iii) Completed 1-2 years of college\n #(iv) Completed exactly 4 years of college\n #(v) Completed 5+ years of college\n #---\n\nipums['educ1_d'] = ipums['educ'].map(lambda x: 1 if x in range(0, 7) else 0)\nipums['educ2_d'] = ipums['educ'].map(lambda x: 1 if x == 6 else 0)\nipums['educ3_d'] = ipums['educ'].map(lambda x: 1 if x in range(7, 9) else 0)\nipums['educ4_d'] = ipums['educ'].map(lambda x: 1 if x == 10 else 0)\nipums['educ5_d'] = ipums['educ'].map(lambda x: 1 if x == 11 else 0)\n\n# dummy for each of 12 major occupation categories\n # https://usa.ipums.org/usa/volii/occ2018.shtml\n\nipums['occ_manag'] = ipums['occ'].map(lambda x: 1 if x in range(10, 1000) else 0)\nipums['occ_comp_eng'] = ipums['occ'].map(lambda x: 1 if x in range(1000, 2000) else 0)\nipums['occ_edu_leg_art'] = ipums['occ'].map(lambda x: 1 if x in range(2000, 3000) else 0)\nipums['occ_health_tech'] = ipums['occ'].map(lambda x: 1 if x in range(3000, 3600) else 0)\n\nipums['occ_serv'] = ipums['occ'].map(lambda x: 1 if x in range(3600, 4700) else 0)\nipums['occ_sales'] = ipums['occ'].map(lambda x: 1 if x in range(4700, 5000) else 0)\nipums['occ_office'] = ipums['occ'].map(lambda x: 1 if x in range(5000, 6000) else 0)\nipums['occ_farm_fish'] = ipums['occ'].map(lambda x: 1 if x in range(6000, 6200) else 0)\n\nipums['occ_constr'] = ipums['occ'].map(lambda x: 1 if x in range(6200, 7000) else 0)\nipums['occ_maintn'] = ipums['occ'].map(lambda x: 1 if x in range(7000, 7700) else 0)\nipums['occ_prod'] = ipums['occ'].map(lambda x: 1 if x in range(7700, 9000) else 0)\nipums['occ_transport'] = ipums['occ'].map(lambda x: 1 if x in range(9000, 10000) else 0)\n\n# dummy for 'public sector worker'\nipums['psw_d'] = ipums['classwkr'].map(lambda x: 1 if x == 1 else 0)\n\n# dummy for 'wage worker last year'\n #---\n #Equals 1 if individual worked 1 or more weeks last year \n #AND was not self-employed\n #AND was not an unpaid worker\n #\n #Equals 0 otherwise\n #---\n \nipums['wagework_lastyear'] = np.where( (ipums['wkswork2'] > 0) & (ipums['classwkr'] == 2), 1, 0)\n\n# Annual hours worked\n #---\n #Computed as usual hours worked per week multiplied by the midpoint \n #value of the interval for the individual’s weeks worked last year\n #---\n\ndef wkswork_mid(x):\n if x == 1:\n return(0.5*(1 + 13))\n elif x == 2:\n return(0.5*(14 + 26))\n elif x == 3:\n return(0.5*(27 + 39))\n elif x == 4:\n return(0.5*(40 + 47))\n elif x == 5:\n return(0.5*(48 + 49))\n elif x == 6:\n return(0.5*(50 + 52))\n else:\n return(None) \n\nipums['annl_hrs_wrkd'] = ipums['uhrswork'] * ipums['wkswork2'].map(lambda x: wkswork_mid(x)) \n\n# Nonlabor income\n #---\n #Total family income minus own wage income\n #---\nipums['nonlabor_inc'] = ipums['ftotinc'] - ipums['incwage']\n\n # Hourly wage\n #---\n #Calculated as total family income last year divided by annual hours worked\n #---\nipums['hourly_wage'] = ipums['ftotinc'] / ipums['annl_hrs_wrkd'] \n\n# Natural logs of annual hours worked, nonlabor income, and hourly wage\nwith np.errstate(invalid='ignore'): \n ipums['log_annl_hrs_wrkd'] = np.log(ipums['annl_hrs_wrkd'].replace(0, np.nan))\n ipums['log_nonlabor_inc'] = np.log(ipums['nonlabor_inc'].replace(0, np.nan))\n ipums['log_hourly_wage'] = np.log(ipums['hourly_wage'].replace(0, np.nan))\n \nipums.to_csv('data/ipums_FL.csv') \n\n\n"
]
| [
[
"numpy.errstate",
"pandas.read_csv",
"numpy.where"
]
]
|
badulion/code-2021-soilnet | [
"2a20f30707a7d9d121c32a18eb72e04895d7b458"
]
| [
"utils/geometric_median.py"
]
| [
"\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.spatial.distance import cdist\n\n\ndef geometric_median(points, method='auto', options={}):\n \"\"\"\n Calculates the geometric median of an array of points.\n\n method specifies which algorithm to use:\n * 'auto' -- uses a heuristic to pick an algorithm\n * 'minimize' -- scipy.optimize the sum of distances\n * 'weiszfeld' -- Weiszfeld's algorithm\n \"\"\"\n\n points = np.asarray(points)\n\n if len(points.shape) == 1:\n # geometric_median((0, 0)) has too much potential for error.\n # Did the user intend a single 2D point or two scalars?\n # Use np.median if you meant the latter.\n raise ValueError(\"Expected 2D array\")\n\n if method == 'auto':\n if points.shape[1] > 2:\n # weiszfeld tends to converge faster in higher dimensions\n method = 'weiszfeld'\n else:\n method = 'minimize'\n\n return _methods[method](points, options)\n\n\ndef minimize_method(points, options={}):\n \"\"\"\n Geometric median as a convex optimization problem.\n \"\"\"\n\n # objective function\n def aggregate_distance(x):\n return cdist([x], points).sum()\n\n # initial guess: centroid\n centroid = points.mean(axis=0)\n\n optimize_result = minimize(aggregate_distance, centroid, method='COBYLA')\n\n return optimize_result.x\n\n\ndef weiszfeld_method(points, options={}):\n \"\"\"\n Weiszfeld's algorithm as described on Wikipedia.\n \"\"\"\n\n default_options = {'maxiter': 1000, 'tol': 1e-7}\n default_options.update(options)\n options = default_options\n\n def distance_func(x):\n return cdist([x], points)\n\n # initial guess: centroid\n guess = points.mean(axis=0)\n\n iters = 0\n\n while iters < options['maxiter']:\n distances = distance_func(guess).T\n\n # catch divide by zero\n # TODO: Wikipedia cites how to deal with distance 0\n distances = np.where(distances == 0, 1, distances)\n\n guess_next = (points/distances).sum(axis=0) / (1./distances).sum(axis=0)\n\n guess_movement = np.sqrt(((guess - guess_next)**2).sum())\n\n guess = guess_next\n\n if guess_movement <= options['tol']:\n break\n\n iters += 1\n\n return guess\n\n\n_methods = {\n 'minimize': minimize_method,\n 'weiszfeld': weiszfeld_method,\n}\n"
]
| [
[
"numpy.asarray",
"numpy.where",
"scipy.optimize.minimize",
"scipy.spatial.distance.cdist"
]
]
|
karthiksekaran/crypto-price-prediction | [
"04756eaa7f4d6f71ddffee277c9fcf0c3005a872"
]
| [
"cryptocurrency-prediction.py"
]
| [
"from keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import Bidirectional\nfrom keras.models import Sequential\nfrom tensorflow.contrib.keras.preprocessing.text import Tokenizer\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\n\ndef load_data(filename, seq_len):\n\n df = pd.read_csv(filename, dtype=float, header=None).values\n df[df == 0] = 1\n data = df.tolist()\n \n result = []\n for index in range(len(data) - seq_len):\n result.append(data[index: index + seq_len])\n \n #normalize data\n d0 = np.array(result)\n dr = np.zeros_like(d0)\n dr[:,1:,:] = d0[:,1:,:] / d0[:,0:1,:] - 1\n result = dr[:, :, :6]\n\n #to unnormalize it later\n start = int(dr.shape[0]*0.9)\n end = int(dr.shape[0]+1)\n unnormalize_bases = d0[start:end,0:1,0]\n\n #split into train and testing data\n row = round(0.9 * result.shape[0])\n train = result[:int(row), :]\n np.random.shuffle(train)\n x_train = train[:, :-1]\n y_train = train[:, -1]\n y_train = y_train[:,3]\n \n x_test = result[int(row):, :-1]\n y_test = result[int(row):, 49, :]\n y_test = y_test[:,3]\n \n #the day before y_test aka today's price\n y_dayb4 = result[int(row):, 48, :]\n y_dayb4 = y_dayb4[:,3]\n \n return [x_train, y_train, x_test, y_test, unnormalize_bases, y_dayb4]\n\nsequence_length = 50\nlookback = sequence_length - 1\nX_train, y_train, X_test, y_test, unnormalize_bases, y_dayb4 = load_data('crypto_stock_new.csv', sequence_length)\nprint(\"X_train:\", X_train.shape)\nprint(\"y_train:\", y_train.shape)\nprint(\"X_test:\", X_test.shape)\nprint(\"y_test:\", y_test.shape)\n\n\nmodel = Sequential()\n\nmodel.add(Bidirectional(LSTM(lookback,return_sequences=True), input_shape=(lookback,X_train.shape[-1]),))\nmodel.add(Dropout(0.2))\n\nmodel.add(Bidirectional(LSTM((lookback*2), return_sequences=True)))\nmodel.add(Dropout(0.2))\n\nmodel.add(Bidirectional(LSTM(lookback, return_sequences=False)))\n\nmodel.add(Dense(output_dim=1))\nmodel.add(Activation('tanh'))\n\nmodel.compile(loss='mse', optimizer='adam')\nprint(model.summary())\n\n\nstart = time.time()\nmodel.fit(X_train, y_train, batch_size=1024, nb_epoch=10, validation_split=0.05)\nprint('training time : ', time.time() - start)\n\ny_predict = model.predict(X_test)\nreal_y_test = np.zeros_like(y_test)\nreal_y_predict = np.zeros_like(y_predict)\n\nfor i in range(y_test.shape[0]):\n y = y_test[i]\n predict = y_predict[i]\n real_y_test[i] = (y+1)*unnormalize_bases[i]\n real_y_predict[i] = (predict+1)*unnormalize_bases[i]\n \nplt.plot(real_y_predict, color='green')\nplt.plot(real_y_test, color='blue')\nplt.title(\"Bitcoin Price in 95-Day Period, 07/09/2017-10/12/2017\")\nplt.ylabel(\"Price (USD)\")\nplt.xlabel(\"Time (Days)\")\nplt.savefig('price.png', dpi = 600)\nplt.show()\n\n\nprint(\"y_predict:\", y_predict.shape)\ny_dayb4 = np.reshape(y_dayb4, (-1, 1))\nprint(\"y_dayb4:\", y_dayb4.shape)\ny_test = np.reshape(y_test, (-1, 1))\nprint(\"y_test:\", y_test.shape)\ndelta_predict = y_predict - y_dayb4\ndelta_real = y_test - y_dayb4\nplt.plot(delta_predict, color='green')\nplt.plot(delta_real, color='blue')\nplt.title(\"Bitcoin Price % Change in 95-Day Period, 07/09/2017-10/12/2017\")\nplt.ylabel(\"Percent Increase\")\nplt.xlabel(\"Time (Days)\")\nplt.savefig('best_fluctuation.png', dpi = 600)\nplt.show()\n\n\ndelta_predict_1_0 = np.empty(delta_predict.shape)\ndelta_real_1_0 = np.empty(delta_real.shape)\n\nfor i in range(delta_predict.shape[0]):\n if delta_predict[i][0] > 0:\n delta_predict_1_0[i][0] = 1\n else:\n delta_predict_1_0[i][0] = 0\n\nfor i in range(delta_real.shape[0]):\n if delta_real[i][0] > 0:\n delta_real_1_0[i][0] = 1\n else:\n delta_real_1_0[i][0] = 0 \n\ntrue_pos = 0\nfalse_pos = 0\ntrue_neg = 0\nfalse_neg = 0\n\nfor i in range(delta_real_1_0.shape[0]):\n real = delta_real_1_0[i][0]\n predicted = delta_predict_1_0[i][0]\n if real == 1:\n if predicted == 1:\n true_pos += 1\n else:\n false_pos += 1\n elif real == 0:\n if predicted == 0:\n true_neg += 1\n else:\n false_neg += 1\n\nprint(\"true_pos:\", true_pos)\nprint(\"true_neg:\", true_neg)\nprint(\"false_pos:\", false_pos)\nprint(\"false_neg:\", false_neg)\n\n#precision = float(true_pos) / (true_pos + false_pos)\n#recall = float(true_pos) / (true_pos + false_neg)\n#F1 = 2.0 / (1/precision + 1/recall)\n\n#print()\n#print(\"precision:\", precision)\n#print(\"recall:\", recall)\n#print(\"F1:\", F1)\n\n#MSE\nfrom sklearn.metrics import mean_squared_error\nprint()\nprint(\"Testing MSE:\", mean_squared_error(y_predict.flatten(), y_test.flatten()))\n\n"
]
| [
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"numpy.random.shuffle",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.empty",
"matplotlib.pyplot.ylabel"
]
]
|
VITA-Group/Ultra-Data-Efficient-GAN-Training | [
"11267b560a3a285582eae40d0bdcba87168f679f"
]
| [
"SNGAN/train_with_masks_less.py"
]
| [
"import cfg\nimport models\nimport datasets\nimport random\nfrom functions import train, validate, LinearLrDecay, load_params, copy_params\nfrom utils.utils import set_log_dir, save_checkpoint, create_logger\nfrom utils.inception_score import _init_inception\nfrom utils.fid_score import create_inception_graph, check_or_download_inception\n\nimport torch\nimport os\n# os.environ['CUDA_VISIBLE_DEVICES'] = '7'\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.utils.prune as prune\nfrom tensorboardX import SummaryWriter\nfrom copy import deepcopy\n\n\ndef pruning_generate(model, state_dict):\n\n parameters_to_prune =[]\n for (name, m) in model.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n m = prune.custom_from_mask(m, name = 'weight', mask = state_dict[name + \".weight_mask\"])\n \ndef main():\n args = cfg.parse_args()\n random.seed(args.random_seed)\n torch.manual_seed(args.random_seed)\n torch.cuda.manual_seed(args.random_seed)\n np.random.seed(args.random_seed)\n torch.backends.cudnn.deterministic = False\n torch.backends.cudnn.benchmark = True\n os.environ['PYTHONHASHSEED'] = str(args.random_seed)\n \n # set tf env\n _init_inception()\n inception_path = check_or_download_inception(None)\n create_inception_graph(inception_path)\n\n # import network\n gen_net = eval('models.'+args.model+'.Generator')(args=args)\n dis_net = eval('models.'+args.model+'.Discriminator')(args=args)\n avg_gen_net = eval('models.'+args.model+'.Generator')(args=args)\n initial_gen_net_weight = torch.load(os.path.join(args.init_path, 'initial_gen_net.pth'), map_location=\"cpu\")\n initial_dis_net_weight = torch.load(os.path.join(args.init_path, 'initial_dis_net.pth'), map_location=\"cpu\")\n \n \n \n gen_net.load_state_dict(initial_gen_net_weight)\n avg_gen_net.load_state_dict(initial_gen_net_weight)\n dis_net.load_state_dict(initial_dis_net_weight)\n \n # set optimizer\n gen_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gen_net.parameters()),\n args.g_lr, (args.beta1, args.beta2))\n dis_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, dis_net.parameters()),\n args.d_lr, (args.beta1, args.beta2))\n gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic)\n dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic)\n\n # set up data_loader\n dataset = datasets.ImageDatasetLess(args)\n train_loader = dataset.train\n\n \n \n # fid stat\n if args.dataset.lower() == 'cifar10':\n fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'\n elif args.dataset.lower() == 'stl10':\n fid_stat = 'fid_stat/fid_stats_stl10_train.npz'\n else:\n raise NotImplementedError('no fid stat for %s' % args.dataset.lower())\n assert os.path.exists(fid_stat)\n\n # epoch number for dis_net\n args.max_epoch = args.max_epoch * args.n_critic\n if args.max_iter:\n args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader))\n\n # initial\n fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))\n start_epoch = 0\n best_fid = 1e4\n\n # set writer\n args.path_helper = set_log_dir('logs', args.exp_name + \"_{}\".format(args.percent))\n logger = create_logger(args.path_helper['log_path'])\n logger.info(args)\n writer_dict = {\n 'writer': SummaryWriter(args.path_helper['log_path']),\n 'train_global_steps': start_epoch * len(train_loader),\n 'valid_global_steps': start_epoch // args.val_freq,\n }\n \n\n \n pruning_generate(gen_net, torch.load(os.path.join(args.rewind_path), map_location=\"cpu\")['avg_gen_state_dict'])\n pruning_generate(avg_gen_net, torch.load(os.path.join(args.rewind_path), map_location=\"cpu\")['avg_gen_state_dict'])\n \n gen_net = gen_net.cuda()\n dis_net = dis_net.cuda()\n avg_gen_net = avg_gen_net.cuda()\n gen_avg_param = copy_params(gen_net)\n logger.info(args)\n writer_dict = {\n 'writer': SummaryWriter(args.path_helper['log_path']),\n 'train_global_steps': start_epoch * len(train_loader),\n 'valid_global_steps': start_epoch // args.val_freq,\n }\n\n # train loop\n switch = False\n for epoch in range(int(start_epoch), int(args.max_epoch)):\n \n lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None\n train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict,\n lr_schedulers)\n\n if epoch and epoch % args.val_freq == 0 or epoch == int(args.max_epoch)-1:\n backup_param = copy_params(gen_net)\n load_params(gen_net, gen_avg_param)\n inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, epoch)\n logger.info('Inception score: %.4f, FID score: %.4f || @ epoch %d.' % (inception_score, fid_score, epoch))\n load_params(gen_net, backup_param)\n if fid_score < best_fid:\n best_fid = fid_score\n is_best = True\n else:\n is_best = False\n else:\n is_best = False\n\n load_params(avg_gen_net, gen_avg_param)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': args.model,\n 'gen_state_dict': gen_net.state_dict(),\n 'dis_state_dict': dis_net.state_dict(),\n 'avg_gen_state_dict': avg_gen_net.state_dict(),\n 'gen_optimizer': gen_optimizer.state_dict(),\n 'dis_optimizer': dis_optimizer.state_dict(),\n 'best_fid': best_fid,\n 'path_helper': args.path_helper,\n 'seed': args.random_seed\n }, is_best, args.path_helper['ckpt_path'])\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.normal",
"torch.nn.utils.prune.custom_from_mask"
]
]
|
luispedro/SemiBin_benchmark | [
"4b16a7076c803a4c0996d22ef6f8ab9fdbe79a64"
]
| [
"benchmark/gen_kmer.py"
]
| [
"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport numpy as np\nimport pandas as pd\nfrom itertools import product\nfrom Bio import SeqIO\n# optimized sliding window function from\n# http://stackoverflow.com/a/7636587\nfrom itertools import tee\nfrom collections import Counter, OrderedDict\nimport pandas as p\n\n\ndef window(seq, n):\n els = tee(seq, n)\n for i, el in enumerate(els):\n for _ in range(i):\n next(el, None)\n return zip(*els)\n\n\ndef generate_feature_mapping(kmer_len):\n BASE_COMPLEMENT = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n kmer_hash = {}\n counter = 0\n for kmer in product(\"ATGC\", repeat=kmer_len):\n if kmer not in kmer_hash:\n kmer_hash[kmer] = counter\n rev_compl = tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)])\n kmer_hash[rev_compl] = counter\n counter += 1\n return kmer_hash, counter\n\n\ndef generate_features_from_fasta(fasta_file, length_threshold, kmer_len, outfile):\n kmer_dict, nr_features = generate_feature_mapping(kmer_len)\n\n # Store composition vectors in a dictionary before creating dataframe\n composition_d = OrderedDict()\n contig_lengths = OrderedDict()\n for seq in SeqIO.parse(fasta_file, \"fasta\"):\n seq_len = len(seq)\n if seq_len <= length_threshold:\n continue\n contig_lengths[seq.id] = seq_len\n # Create a list containing all kmers, translated to integers\n kmers = [\n kmer_dict[kmer_tuple]\n for kmer_tuple\n in window(str(seq.seq).upper(), kmer_len)\n if kmer_tuple in kmer_dict\n ]\n kmers.append(nr_features - 1)\n composition_v = np.bincount(np.array(kmers, dtype=np.int64))\n composition_v[-1] -= 1\n composition_d[seq.id] = composition_v\n df = p.DataFrame.from_dict(composition_d, orient='index', dtype=float)\n df.to_csv(outfile)\n\n\nif __name__ == \"__main__\":\n import sys\n\n fasta_file = sys.argv[1]\n length_threshold = int(sys.argv[2])\n kmer_len = int(sys.argv[3])\n output = sys.argv[4]\n outfile = os.path.join(output, 'kmer.csv')\n generate_features_from_fasta(fasta_file, length_threshold, kmer_len, outfile)"
]
| [
[
"numpy.array",
"pandas.DataFrame.from_dict"
]
]
|
ajgallego/Mask_RCNN-Cleansea | [
"b0f79c0bac2206ada1e4416a4e41e192f60f5852"
]
| [
"test.py"
]
| [
"from audioop import add\nimport os\nimport sys\nfrom numpy import random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport json\nimport pandas as pd\nfrom PIL import Image, ImageDraw\nfrom tensorflow.python.framework.versions import VERSION as __version__\nimport tensorflow as tf\nfrom imgaug import augmenters as iaa\nimport warnings\nwarnings.filterwarnings(action='ignore')\nfrom sklearn.metrics import ConfusionMatrixDisplay\nfrom sklearn import svm, datasets\nimport itertools\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\n\n#Cambiamos el Directorio al propio de MASK_RCNN\nROOT_DIR = 'D:/Cleansea/Mask_RCNN-cleansea'\n#ROOT_DIR = '/home/saflex/Projecto_CleanSea/Mask_RCNN/Mask_RCNN-master'\nassert os.path.exists(ROOT_DIR), 'ROOT_DIR does not exist'\n\n# Import mrcnn libraries\nsys.path.append(ROOT_DIR)\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n############################################################\n# Configuracion\n############################################################\n\nclass CleanSeaConfig(Config):\n \"\"\"\n Configuracion para el entrenamiento con CleanSea Dataset.\n \"\"\"\n\n # Nombre de la configuracion\n NAME = \"debris\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 1\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n\n # Numero de clases + el background\n NUM_CLASSES = 1 + 19 # Cleansea tiene 19 clases\n\n # Salta las detecciones con <50% de seguridad\n DETECTION_MIN_CONFIDENCE = 0.5\n \n\nconfig= CleanSeaConfig()\nconfig.display()\n\ndef get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n\n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax\n\n# Directorio perteneciente a MASK-RCNN\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Dataset\n############################################################\nclass CleanSeaDataset(utils.Dataset):\n def load_data(self, dataset_dir, subset):\n # Train or validation dataset?\n assert subset in [\"train_coco\", \"test_coco\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n print(dataset_dir)\n\n # Cargamos el archivo json\n annotation_json = os.path.join(dataset_dir,\"annotations.json\")\n json_file = open(annotation_json)\n coco_json = json.load(json_file)\n json_file.close()\n print(\"\\nAnotaciones Cargadas\\n\")\n\n # Añadimos los nombres de las clases usando el metodo de utils.Dataset\n source_name = \"coco_like\"\n for category in coco_json['categories']:\n class_id = category['id']\n class_name = category['name']\n if class_id < 1:\n print('Error: Class id for \"{}\" reserved for the background'.format(class_name))\n else:\n self.add_class(source_name, class_id, class_name)\n print(\"Nombres Añadidos \\n\")\n\n # Almacenamos las anotaciones\n annotations = {}\n for annotation in coco_json['annotations']:\n image_id = annotation['image_id']\n if image_id not in annotations:\n annotations[image_id] = []\n annotations[image_id].append(annotation)\n print(\"Anotaciones Almacenadas\\n\")\n\n # Almacenamos las imagenes y las añadimos al dataset\n seen_images = {}\n for image in coco_json['images']:\n image_id = image['id']\n if image_id in seen_images:\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\n else:\n seen_images[image_id] = image\n try:\n image_file_name = image['file_name']\n image_width = image['width']\n image_height = image['height']\n except KeyError as key:\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\n \n image_path = os.path.join(dataset_dir, image_file_name)\n image_annotations = annotations[image_id]\n \n # Añadimos la imagen usando el metodo de utils.Dataset\n self.add_image(\n source=source_name,\n image_id=image_id,\n path=image_path,\n width=image_width,\n height=image_height,\n annotations=image_annotations\n )\n print(\"Imagenes añadidas al Dataset\\n\")\n\n def load_mask(self, image_id):\n \"\"\" Carga la mascara de instancia para la imagen dada\n MaskRCNN espera mascaras en forma de mapa de bits (altura, anchura e instancias)\n Argumentos:\n image_id: El ID de la imagen a la que vamos a cargar la mascara\n Salida:\n masks: Una cadena booleana con estructura (altura, anchya y la cuenta de instancias) con una mascara por instancia\n class_ids: Una cadena de 1 dimension de clase ID de la instancia de la mascara \"\"\"\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"object\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n############################################################\n# Matriz de Confusión\n############################################################\ndef confusion_matrix(y_test,y_pred):\n # import some data to play with\n class_names = ['background', 'Can', 'Squared_Can', 'Wood', 'Bottle', 'Plastic_Bag', 'Glove', 'Fishing_Net', 'Tire', 'Packaging_Bag', 'WashingMachine', 'Metal_Chain', 'Rope', 'Towel', 'Plastic_Debris', 'Metal_Debris', 'Pipe', 'Shoe', 'Car_Bumper', 'Basket']\n # Plot non-normalized confusion matrix\n title = \"Normalized confusion matrix\"\n\n disp = ConfusionMatrixDisplay.from_predictions(\n y_true= y_test,\n y_pred= y_pred,\n normalize='true',\n include_values=True,\n cmap=plt.cm.Blues,\n xticks_rotation='vertical',\n values_format='.2f'\n )\n disp.ax_.set_title(title)\n\n print(title)\n print(disp.confusion_matrix)\n\n plt.show()\n\n############################################################\n# Evaluacion\n############################################################\nclass InferenceConfig(CleanSeaConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n USE_MINI_MASK = False\n\ninference_config = InferenceConfig()\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode=\"inference\", \n config=inference_config,\n model_dir=MODEL_DIR)\n\n# Get path to saved weights\n# Either set a specific path or find last trained weights\nmodel_path = os.path.join(MODEL_DIR, \"mask_rcnn_debris_weights1000DA5Heads.h5\")\n#model_path = model.find_last()\n\n# Load trained weights\nprint(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# Training dataset.\ndataset_train = CleanSeaDataset()\nprint(\"Configuracion para dataset_train cargada\\n\")\ndataset_train.load_data(\"D:/Cleansea/cleansea_dataset/CocoFormatDataset\",\"train_coco\")\nprint(\"Dataset Inicializado Correctamente\\n\")\ndataset_train.prepare()\nprint(\"Preparacion del Dataset Completada\\n\")\n\n# Validation dataset\ndataset_test = CleanSeaDataset()\nprint(\"Configuracion para dataset_test cargada\\n\")\ndataset_test.load_data(\"D:/Cleansea/cleansea_dataset/CocoFormatDataset\", \"test_coco\")\nprint(\"Dataset Inicializado Correctamente\\n\")\ndataset_test.prepare()\nprint(\"Preparacion del Dataset Completada\\n\")\n\n#Configuramos el path para los archivos .json\nJSON_PATH = \"D:/Cleansea/cleansea_dataset/Dataset/test/json\"\n\n#Realizamos una lectura de todos los json y extraemos el toppic 'labels' para almacenarla en una variable con todos los labels de todas las imagenes.\nnlabels=[]\nimg_names= []\n\n#Recorremos el folder donde se almacenan los .json\nfor file_name in [file for file in os.listdir(JSON_PATH)]:\n with open(JSON_PATH + \"/\" + file_name) as json_file:\n content= json.load(json_file)\n #Almacenamos con que imagen va relacionado\n jpegname= content['imagePath']\n #Almacenamos el numero de poligonos que se encuentran dentro de dicho .json\n nshapes= len(content['shapes'])\n #Recogemos los labels de cada uno de los poligonos anteriores\n for topic in range(nshapes):\n label=content['shapes'][topic]['label']\n #Añadimos cada label a la lista de labels (excepto las clases con los labels Metal_Chain y WashingMachine ya que no tienen las muestras minimas para poder separarlas) y el path de todas las imagenes\n if label != 'Metal_Chain' and label != 'WashingMachine':\n nlabels.append(label)\n\n#Mostramos todos los labels e imagenes que hemos analizado\n#print('Stored Labels:', nlabels)\n\nclass_names=np.array(nlabels)\nimg_names=np.array(img_names)\n\n############################################################\n# Deteccion Deseada vs Obtenida\n############################################################\n# Test on a random training image\nimage_id = 138\nprint(f\"Image {image_id} to process...\")\noriginal_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_test, inference_config, \n image_id)\n\nlog(\"original_image\", original_image)\nlog(\"image_meta\", image_meta)\nlog(\"gt_class_id\", gt_class_id)\nlog(\"gt_bbox\", gt_bbox)\nlog(\"gt_mask\", gt_mask)\nvisualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n dataset_train.class_names, figsize=(8, 8))\nplt.show()\n# Resultados de la deteccion procesada por el modelo\nprint(\"Detection done by trained model...\")\nresults = model.detect([original_image], verbose=1)\nr = results[0]\nvisualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], \n dataset_test.class_names, r['scores'], ax=get_ax(),figsize=(8,8))\nplt.show()\n############################################################\n# Curva de Precision-Recall\n############################################################\n# Draw precision-recall curve\nprint(\"Precision-recall curve\")\nAP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,\n r['rois'], r['class_ids'], r['scores'], r['masks'])\nvisualize.plot_precision_recall(AP, precisions, recalls)\nplt.show()\n############################################################\n# Precision del Modelo\n############################################################\nprint(\"Calculating mAP...\")\n\n#ground-truth and predictions lists\ngt_tot = np.array([])\npred_tot = np.array([])\n#mAP list\nmAP_ = []\ncompare_images = []\n\n\"\"\"\n# Comparacion con estudios anteriores (Se escogen determinadas clases)\nfor image_id in dataset_test.image_ids:\n image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_test, inference_config, image_id)\n classes = gt_class_id\n added = False\n for object in classes:\n if object == 1 and added==False:\n compare_images.append(image_id)\n added = True\n #elif object == 3 and added==False:\n # compare_images.append(image_id)\n # added = True\n elif object == 4 and added==False:\n compare_images.append(image_id)\n added = True\n elif object == 5 and added==False:\n compare_images.append(image_id)\n added = True\n elif object == 7 and added==False:\n compare_images.append(image_id)\n added = True\n elif object == 9 and added==False:\n compare_images.append(image_id)\n added = True\n #elif object == 11 and added==False:\n # compare_images.append(image_id)\n # added = True\n elif object == 12 and added==False:\n compare_images.append(image_id)\n added = True\n elif object == 14 and added==False:\n compare_images.append(image_id)\n added = True\n #elif object == 15 and added==False:\n # compare_images.append(image_id)\n # added = True\n elif object == 16 and added==False:\n compare_images.append(image_id)\n added = True\n elif object == 17 and added==False:\n compare_images.append(image_id)\n added = True\nprint(compare_images)\n\"\"\"\n\n#compute gt_tot, pred_tot and mAP for each image in the test dataset\nfor image_id in dataset_test.image_ids:\n image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_test, inference_config, image_id)\n info = dataset_test.image_info[image_id]\n\n # Run the model\n results = model.detect([image], verbose=1)\n r = results[0]\n \n #compute gt_tot and pred_tot\n gt, pred = utils.gt_pred_lists(gt_class_id, gt_bbox, r['class_ids'], r['rois'])\n gt_tot = np.append(gt_tot, gt)\n pred_tot = np.append(pred_tot, pred)\n \n #precision_, recall_, AP_ \n AP_, precision_, recall_, overlap_ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,\n r['rois'], r['class_ids'], r['scores'], r['masks'])\n \n mAP_.append(AP_)\n\ngt_tot=gt_tot.astype(int)\npred_tot=pred_tot.astype(int)\nprint(f\"Test Dataset: {dataset_test.class_names}\")\n#print(\"ground truth list: \",gt_tot)\n#print(\"predicted list: \",pred_tot)\n\nnorm_detections = []\nnorm_gt = []\nfor i in gt_tot:\n norm_gt.append(dataset_test.class_names[i])\nfor i in pred_tot:\n norm_detections.append(dataset_test.class_names[i])\n\n#print(f\"Filtered GT: {norm_gt}\")\n#print(f\"Filtered Detections: {norm_detections}\")\n#print(f\"Accuracy list {mAP_}\")\n\nprint(\"mAP: \", np.mean(mAP_))\n\n#save the vectors of gt and pred\nsave_dir = \"output\"\ngt_pred_tot_json = {\"gt_tot\" : gt_tot, \"pred_tot\" : pred_tot}\ndf = pd.DataFrame(gt_pred_tot_json)\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\ndf.to_json(os.path.join(save_dir,\"gt_pred_test.json\"))\n\n############################################################\n# Matriz de Confusion\n############################################################\n# Grid of ground truth objects and their predictions\nprint(\"Confusion Matrix\")\nconfusion_matrix(norm_gt,norm_detections,)"
]
| [
[
"tensorflow.config.experimental.set_memory_growth",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.dstack",
"numpy.append",
"tensorflow.config.list_physical_devices",
"numpy.mean",
"sklearn.metrics.ConfusionMatrixDisplay.from_predictions",
"numpy.array",
"matplotlib.pyplot.show"
]
]
|
douglasrizzo/ELF | [
"1f790173095cd910976d9f651b80beb872ec5d12"
]
| [
"rlpytorch/methods/policy_gradient.py"
]
| [
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport math\nfrom ..args_provider import ArgsProvider\nfrom .utils import *\n\nclass PolicyGradient:\n def __init__(self):\n '''Initialization for arguments.\n Accepted arguments:\n\n ``entropy_ratio``: The entropy ratio we put on PolicyGradient\n\n ``grad_clip_norm``: Gradient norm clipping\n\n ``min_prob``: Minimal probability used in training\n\n ``ratio_clamp``: importance sampling coefficient clamp\n\n ``policy_action_nodes``: ;separated string that specify policy_action nodes.\n '''\n self.args = ArgsProvider(\n call_from = self,\n define_args = [\n (\"entropy_ratio\", dict(type=float, help=\"The entropy ratio we put on PG\", default=0.01)),\n (\"grad_clip_norm\", dict(type=float, help=\"Gradient norm clipping\", default=None)),\n (\"min_prob\", dict(type=float, help=\"Minimal probability used in training\", default=1e-6)),\n (\"ratio_clamp\", 10),\n (\"policy_action_nodes\", dict(type=str, help=\";separated string that specify policy_action nodes.\", default=\"pi,a\"))\n ],\n on_get_args = self._init,\n )\n\n def _init(self, args):\n '''Initialize policy loss to be an ``nn.NLLLoss`` and parse ``policy_action_nodes``'''\n self.policy_loss = nn.NLLLoss().cuda()\n self.policy_action_nodes = []\n for node in args.policy_action_nodes.split(\";\"):\n policy, action = node.split(\",\")\n self.policy_action_nodes.append((policy, action))\n\n def _compute_one_policy_entropy_err(self, pi, a):\n '''Compute policy error and entropy error for one. Pass in ``args.min_prob`` to avoid ``Nan`` in logrithms.\n\n Returns:\n dict of\n ``logpi``: log policy\n ``policy_err``: polict error\n ``entropy_err``: entropy error\n '''\n batchsize = a.size(0)\n\n # Add normalization constant\n logpi = (pi + self.args.min_prob).log()\n # TODO Seems that logpi.clone() won't create a few hook list.\n # See https://github.com/pytorch/pytorch/issues/2601\n logpi2 = (pi + self.args.min_prob).log()\n\n # Get policy. N * #num_actions\n policy_err = self.policy_loss(logpi, a)\n entropy_err = (logpi2 * pi).sum() / batchsize\n return dict(logpi=logpi, policy_err=policy_err, entropy_err=entropy_err)\n\n def _compute_policy_entropy_err(self, pi, a):\n '''Compute policy error and entropy error for a batch. Pass in ``args.min_prob`` to avoid ``Nan`` in logrithms.\n\n Returns:\n dict of\n ``logpi``: log policy\n ``policy_err``: polict error\n ``entropy_err``: entropy error\n '''\n args = self.args\n\n errs = { }\n if isinstance(pi, list):\n # Action map, and we need compute the error one by one.\n for i, pix in enumerate(pi):\n for j, pixy in enumerate(pix):\n errs = accumulate(errs, self._compute_one_policy_entropy_err(pixy, a[:,i,j], args.min_prob))\n else:\n errs = self._compute_one_policy_entropy_err(pi, a)\n\n return errs\n\n def _reg_backward(self, v, pg_weights):\n ''' Register the backward hook. Clip the gradient if necessary.'''\n grad_clip_norm = getattr(self.args, \"grad_clip_norm\", None)\n def bw_hook(grad_in):\n # this works only on pytorch 0.2.0\n grad = grad_in.mul(pg_weights.view(-1, 1))\n # import pdb\n # pdb.set_trace()\n if grad_clip_norm is not None:\n average_norm_clip(grad, grad_clip_norm)\n return grad\n v.register_hook(bw_hook)\n\n def feed(self, Q, pi_s, actions, stats, old_pi_s=dict()):\n '''\n One iteration of policy gradient.\n\n pho nabla_w log p_w(a|s) Q + entropy_ratio * nabla H(pi(.|s))\n\n Args:\n Q(tensor): estimated return\n actions(tensor): action\n pi_s(variable): policy\n old_pi_s(tensor, optional): old policy, in order to get importance factor.\n\n If you specify multiple policies, then all the log prob of these policies are added, and their importance factors are multiplied.\n Feed to stats: policy error and nll error\n\n '''\n args = self.args\n batchsize = Q.size(0)\n\n # We need to set it beforehand.\n # Note that the samples we collect might be off-policy, so we need\n # to do importance sampling.\n pg_weights = Q.clone()\n\n policy_err = None\n entropy_err = None\n log_pi_s = []\n\n for pi_node, a_node in self.policy_action_nodes:\n pi = pi_s[pi_node]\n a = actions[a_node].squeeze()\n\n if pi_node in old_pi_s:\n old_pi = old_pi_s[pi_node].squeeze()\n\n # Cap it.\n coeff = torch.clamp(pi.data.div(old_pi), max=args.ratio_clamp).gather(1, a.view(-1, 1)).squeeze()\n pg_weights.mul_(coeff)\n # There is another term (to compensate clamping), but we omit it for now.\n\n # Compute policy gradient error:\n errs = self._compute_policy_entropy_err(pi, Variable(a))\n policy_err = add_err(policy_err, errs[\"policy_err\"])\n entropy_err = add_err(entropy_err, errs[\"entropy_err\"])\n log_pi_s.append(errs[\"logpi\"])\n\n stats[\"nll_\" + pi_node].feed(errs[\"policy_err\"].data[0])\n stats[\"entropy_\" + pi_node].feed(errs[\"entropy_err\"].data[0])\n\n for log_pi in log_pi_s:\n self._reg_backward(log_pi, Variable(pg_weights))\n\n if len(args.policy_action_nodes) > 1:\n stats[\"total_nll\"].feed(policy_err.data[0])\n stats[\"total_entropy\"].feed(entropy_err.data[0])\n\n return policy_err + entropy_err * args.entropy_ratio\n"
]
| [
[
"torch.nn.NLLLoss",
"torch.autograd.Variable"
]
]
|
philopon/datapane | [
"c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f"
]
| [
"tests/resources/samples/dp_complex_report.py"
]
| [
"\"\"\"datapane script\"\"\"\nimport altair as alt\nimport pandas as pd\nimport datapane as dp\nfrom bokeh.plotting import figure\nfrom pathlib import Path\nimport folium\nimport plotly.graph_objects as go\nfrom matplotlib.collections import EventCollection\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef gen_df(dim: int = 4) -> pd.DataFrame:\n axis = [i for i in range(0, dim)]\n data = {\"x\": axis, \"y\": axis}\n return pd.DataFrame.from_dict(data)\n\n\nlis = [1, 2, 3]\ndf = gen_df(10000)\n\n# Bokeh\np = figure(title=\"simple line example\", x_axis_label='x', y_axis_label='y')\np.line([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], legend_label=\"Temp.\", line_width=2)\nbokeh_asset = dp.Plot(data=p)\n\n# Folium\nm = folium.Map(\n location=[45.372, -121.6972],\n zoom_start=12,\n tiles='Stamen Terrain'\n)\nfolium.Marker(\n location=[45.3288, -121.6625],\n popup='Mt. Hood Meadows',\n icon=folium.Icon(icon='cloud')\n).add_to(m)\nfolium.Marker(\n location=[45.3311, -121.7113],\n popup='Timberline Lodge',\n icon=folium.Icon(color='green')\n).add_to(m)\nfolium.Marker(\n location=[45.3300, -121.6823],\n popup='Some Other Location',\n icon=folium.Icon(color='red', icon='info-sign')\n).add_to(m)\nfolium_asset = dp.Plot(data=m)\n\n# Plotly\nfig = go.Figure()\nfig.add_trace(\n go.Scatter(\n x=[0, 1, 2, 3, 4, 5],\n y=[1.5, 1, 1.3, 0.7, 0.8, 0.9]\n ))\nfig.add_trace(\n go.Bar(\n x=[0, 1, 2, 3, 4, 5],\n y=[1, 0.5, 0.7, -1.2, 0.3, 0.4]\n ))\nplotly_asset = dp.Plot(data=fig)\n\n# Markdown\nmd_block = dp.Text(text=\"# Test markdown block \\n Test **content**\")\n\n# In-line JSON\nlist_asset = dp.File(data=lis, is_json=True)\n\n# Downloadable file\nfile_asset = dp.File(data=lis)\n\n# In-line image\nimg_asset = dp.File(file=Path(\"./datapane-logo.png\"))\n\n# Vega\nvega_asset = dp.Plot(data=alt.Chart(gen_df()).mark_line().encode(x=\"x\", y=\"y\"))\n\n# Table\ndf_asset = dp.DataTable(df, can_pivot=False)\n\n# Pivot table\npv_asset = dp.DataTable(gen_df(10), can_pivot=True)\n\n# Matplotlib\nnp.random.seed(19680801)\nxdata = np.random.random([2, 10])\nxdata1 = xdata[0, :]\nxdata2 = xdata[1, :]\nxdata1.sort()\nxdata2.sort()\nydata1 = xdata1 ** 2\nydata2 = 1 - xdata2 ** 3\nmpl_fig = plt.figure(figsize=(15, 15))\nax = mpl_fig.add_subplot(1, 1, 1)\nax.plot(xdata1, ydata1, color='tab:blue')\nax.plot(xdata2, ydata2, color='tab:orange')\nxevents1 = EventCollection(xdata1, color='tab:blue', linelength=0.05)\nxevents2 = EventCollection(xdata2, color='tab:orange', linelength=0.05)\nyevents1 = EventCollection(ydata1, color='tab:blue', linelength=0.05,\n orientation='vertical')\nyevents2 = EventCollection(ydata2, color='tab:orange', linelength=0.05,\n orientation='vertical')\nax.add_collection(xevents1)\nax.add_collection(xevents2)\nax.add_collection(yevents1)\nax.add_collection(yevents2)\nax.set_xlim([0, 1])\nax.set_ylim([0, 1])\nax.set_title('line plot with data points')\nmpl_asset = dp.Plot(mpl_fig)\n\n# Report\nreport = dp.Report(\n list_asset,\n df_asset,\n md_block,\n vega_asset,\n pv_asset,\n img_asset,\n file_asset,\n bokeh_asset,\n plotly_asset,\n folium_asset,\n mpl_asset\n)\n\nreport.save(path=\"local_xml_report.html\")\nreport.publish(name=\"xml_report\")\n"
]
| [
[
"numpy.random.random",
"matplotlib.collections.EventCollection",
"numpy.random.seed",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.figure"
]
]
|
Ema93sh/pytorch-saliency | [
"419cc0c0665d5764e9a721776615a61835e1be18"
]
| [
"saliency/guided/saliency.py"
]
| [
"import torch\nimport torch.nn as nn\n\nfrom saliency.saliency import Saliency\n\nclass GuidedSaliency(Saliency):\n \"\"\"Class for computing guided saliency\"\"\"\n def __init__(self, model):\n super(GuidedSaliency, self).__init__(model)\n\n\n def guided_relu_hook(self, module, grad_in, grad_out):\n return (torch.clamp(grad_in[0], min=0.0), )\n\n\n def generate_saliency(self, input, target):\n input.requires_grad = True\n\n self.model.zero_grad()\n\n for module in self.model.modules():\n if type(module) == nn.ReLU:\n module.register_backward_hook(self.guided_relu_hook)\n\n output = self.model(input)\n\n grad_outputs = torch.zeros_like(output)\n\n grad_outputs[:, target] = 1\n\n output.backward(gradient = grad_outputs)\n\n input.requires_grad = False\n\n return input.grad.clone()[0]\n"
]
| [
[
"torch.clamp",
"torch.zeros_like"
]
]
|
tataudat/jax | [
"62862267e416ec4e053cca91a1376f1ac2ad7b72"
]
| [
"jaxlib/cusolver.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport functools\nimport operator\n\nimport numpy as np\n\nfrom jaxlib import xla_client\n\ntry:\n from jaxlib import cublas_kernels\n for _name, _value in cublas_kernels.registrations().items():\n xla_client.register_custom_call_target(_name, _value, platform=\"CUDA\")\nexcept ImportError:\n pass\n\ntry:\n from jaxlib import cusolver_kernels\n for _name, _value in cusolver_kernels.registrations().items():\n xla_client.register_custom_call_target(_name, _value, platform=\"CUDA\")\nexcept ImportError:\n pass\n\n_ops = xla_client.ops\n_Shape = xla_client.Shape\n\n# TODO(phawkins): remove after we no longer need to support old jax releases.\ndef _unpack_builder(c):\n # If `c` is a ComputationBuilder object, extracts the underlying XlaBuilder.\n return getattr(c, \"_builder\", c)\n\ndef _real_type(dtype):\n \"\"\"Returns the real equivalent of 'dtype'.\"\"\"\n if dtype == np.float32:\n return np.float32\n elif dtype == np.float64:\n return np.float64\n elif dtype == np.complex64:\n return np.float32\n elif dtype == np.complex128:\n return np.float64\n else:\n raise NotImplementedError(\"Unsupported dtype {}\".format(dtype))\n\n_prod = lambda xs: functools.reduce(operator.mul, xs, 1)\n\ndef trsm(c, a, b, left_side=False, lower=False, trans_a=False, conj_a=False,\n diag=False):\n \"\"\"Batched triangular solve.\n\n XLA implements unbatched triangular solve directly, so we need only implement\n the batched case.\"\"\"\n c = _unpack_builder(c)\n b_shape = c.get_shape(b)\n dtype = b_shape.element_type()\n dims = b_shape.dimensions()\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n k = m if left_side else n\n\n a_shape = c.get_shape(a)\n if (batch_dims + (k, k) != a_shape.dimensions() or\n a_shape.element_type() != dtype):\n raise ValueError(\"Argument mismatch for trsm, got {} and {}\".format(\n a_shape, b_shape))\n\n if conj_a and not trans_a:\n raise NotImplementedError(\"Conjugation without transposition not supported\")\n\n lwork, opaque = cublas_kernels.build_trsm_batched_descriptor(\n np.dtype(dtype), batch, m, n, left_side, lower, trans_a, conj_a, diag)\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n out = _ops.CustomCallWithLayout(\n c, b\"cublas_trsm_batched\",\n operands=(a, b),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(dtype, b_shape.dimensions(), layout),\n _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)),\n _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)))),\n operand_shapes_with_layout=(\n _Shape.array_shape(dtype, a_shape.dimensions(), layout),\n _Shape.array_shape(dtype, b_shape.dimensions(), layout),\n ),\n opaque=opaque)\n return _ops.GetTupleElement(out, 0)\n\n\ndef potrf(c, a, lower):\n \"\"\"Cholesky decomposition.\"\"\"\n c = _unpack_builder(c)\n a_shape = c.get_shape(a)\n dtype = a_shape.element_type()\n dims = a_shape.dimensions()\n m, n = dims[-2:]\n assert m == n\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n\n lwork, opaque = cusolver_kernels.build_potrf_descriptor(\n np.dtype(dtype), lower, batch, n)\n kernel = b\"cusolver_potrf\"\n\n out = _ops.CustomCallWithLayout(\n c, kernel,\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(\n dtype, batch_dims + (n, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)),\n )),\n operand_shapes_with_layout=(_Shape.array_shape(\n dtype, batch_dims + (n, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),),\n opaque=opaque)\n return _ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1)\n\n\ndef getrf(c, a):\n \"\"\"LU decomposition.\"\"\"\n c = _unpack_builder(c)\n a_shape = c.get_shape(a)\n dtype = a_shape.element_type()\n dims = a_shape.dimensions()\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n\n if batch > 1 and m == n and m // batch <= 128:\n lwork, opaque = cublas_kernels.build_getrf_batched_descriptor(\n np.dtype(dtype), batch, m)\n workspace = _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,))\n kernel = b\"cublas_getrf_batched\"\n else:\n lwork, opaque = cusolver_kernels.build_getrf_descriptor(\n np.dtype(dtype), batch, m, n)\n workspace = _Shape.array_shape(dtype, (lwork,), (0,))\n kernel = b\"cusolver_getrf\"\n\n out = _ops.CustomCallWithLayout(\n c, kernel,\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims + (min(m, n),),\n tuple(range(num_bd, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),\n workspace,\n )),\n operand_shapes_with_layout=(_Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),),\n opaque=opaque)\n return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),\n _ops.GetTupleElement(out, 2))\n\ndef geqrf(c, a):\n \"\"\"QR decomposition.\"\"\"\n c = _unpack_builder(c)\n a_shape = c.get_shape(a)\n dtype = a_shape.element_type()\n dims = a_shape.dimensions()\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n\n lwork, opaque = cusolver_kernels.build_geqrf_descriptor(\n np.dtype(dtype), batch, m, n)\n workspace = _Shape.array_shape(dtype, (lwork,), (0,))\n kernel = b\"cusolver_geqrf\"\n\n out = _ops.CustomCallWithLayout(\n c, kernel,\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(\n dtype, batch_dims + (min(m, n),),\n tuple(range(num_bd, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),\n workspace,\n )),\n operand_shapes_with_layout=(_Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),),\n opaque=opaque)\n return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),\n _ops.GetTupleElement(out, 2))\n\ndef orgqr(c, a, tau):\n \"\"\"Product of elementary Householder reflections.\"\"\"\n c = _unpack_builder(c)\n a_shape = c.get_shape(a)\n dtype = a_shape.element_type()\n dims = a_shape.dimensions()\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n\n tau_dims = c.get_shape(tau).dimensions()\n assert tau_dims[:-1] == dims[:-2]\n k = tau_dims[-1]\n\n lwork, opaque = cusolver_kernels.build_orgqr_descriptor(\n np.dtype(dtype), batch, m, n, k)\n workspace = _Shape.array_shape(dtype, (lwork,), (0,))\n kernel = b\"cusolver_orgqr\"\n\n out = _ops.CustomCallWithLayout(\n c, kernel,\n operands=(a, tau),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),\n workspace,\n )),\n operand_shapes_with_layout=(\n _Shape.array_shape(\n dtype, batch_dims + (m, n),\n (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(\n dtype, batch_dims + (k,),\n tuple(range(num_bd, -1, -1))),\n ),\n opaque=opaque)\n return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1))\n\n\ndef syevd(c, a, lower=False):\n \"\"\"Symmetric (Hermitian) eigendecomposition.\"\"\"\n c = _unpack_builder(c)\n\n a_shape = c.get_shape(a)\n dtype = a_shape.element_type()\n dims = a_shape.dimensions()\n assert len(dims) >= 2\n m, n = dims[-2:]\n assert m == n\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n batch = _prod(batch_dims)\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n\n if n <= 32:\n kernel = b\"cusolver_syevj\"\n lwork, opaque = cusolver_kernels.build_syevj_descriptor(\n np.dtype(dtype), lower, batch, n)\n else:\n kernel = b\"cusolver_syevd\"\n lwork, opaque = cusolver_kernels.build_syevd_descriptor(\n np.dtype(dtype), lower, batch, n)\n eigvals_type = _real_type(dtype)\n\n out = _ops.CustomCallWithLayout(\n c, kernel,\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(dtype, dims, layout),\n _Shape.array_shape(\n np.dtype(eigvals_type), batch_dims + (n,),\n tuple(range(num_bd, -1, -1))),\n _Shape.array_shape(\n np.dtype(np.int32), batch_dims,\n tuple(range(num_bd - 1, -1, -1))),\n _Shape.array_shape(dtype, (lwork,), (0,))\n )),\n operand_shapes_with_layout=(\n _Shape.array_shape(dtype, dims, layout),\n ),\n opaque=opaque)\n return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 1),\n _ops.GetTupleElement(out, 2))\n\n\ndef gesvd(c, a, full_matrices=True, compute_uv=True):\n \"\"\"Singular value decomposition.\"\"\"\n c = _unpack_builder(c)\n\n a_shape = c.get_shape(a)\n dims = a_shape.dimensions()\n dtype = a_shape.element_type()\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n b = _prod(batch_dims)\n singular_vals_dtype = np.dtype(_real_type(dtype))\n\n if m < 32 and n < 32:\n lwork, opaque = cusolver_kernels.build_gesvdj_descriptor(\n np.dtype(dtype), b, m, n, compute_uv)\n scalar_layout = tuple(range(num_bd - 1, -1, -1))\n vector_layout = (num_bd,) + scalar_layout\n matrix_layout = (num_bd, num_bd + 1) + scalar_layout\n out = _ops.CustomCallWithLayout(\n c, b\"cusolver_gesvdj\",\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n _Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),\n vector_layout),\n _Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),\n _Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),\n _Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),\n _Shape.array_shape(dtype, (lwork,), (0,)),\n )),\n operand_shapes_with_layout=(\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n ),\n opaque=opaque)\n s = _ops.GetTupleElement(out, 1)\n u = _ops.GetTupleElement(out, 2)\n v = _ops.GetTupleElement(out, 3)\n info = _ops.GetTupleElement(out, 4)\n vt = _ops.Transpose(v, tuple(range(num_bd)) + (num_bd + 1, num_bd))\n if np.issubdtype(dtype, np.complexfloating):\n vt = _ops.Conj(vt)\n elif m < n:\n lwork, opaque = cusolver_kernels.build_gesvd_descriptor(\n np.dtype(dtype), b, n, m, compute_uv, full_matrices)\n scalar_layout = tuple(range(num_bd - 1, -1, -1))\n vector_layout = (num_bd,) + scalar_layout\n matrix_layout = (num_bd + 1, num_bd) + scalar_layout\n out = _ops.CustomCallWithLayout(\n c, b\"cusolver_gesvd\",\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n _Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),\n vector_layout),\n _Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),\n _Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),\n _Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),\n _Shape.array_shape(dtype, (lwork,), (0,)),\n )),\n operand_shapes_with_layout=(\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n ),\n opaque=opaque)\n s = _ops.GetTupleElement(out, 1)\n vt = _ops.GetTupleElement(out, 2)\n u = _ops.GetTupleElement(out, 3)\n info = _ops.GetTupleElement(out, 4)\n else:\n lwork, opaque = cusolver_kernels.build_gesvd_descriptor(\n np.dtype(dtype), b, m, n, compute_uv, full_matrices)\n\n scalar_layout = tuple(range(num_bd - 1, -1, -1))\n vector_layout = (num_bd,) + scalar_layout\n matrix_layout = (num_bd, num_bd + 1) + scalar_layout\n out = _ops.CustomCallWithLayout(\n c, b\"cusolver_gesvd\",\n operands=(a,),\n shape_with_layout=_Shape.tuple_shape((\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n _Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),\n vector_layout),\n _Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),\n _Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),\n _Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),\n _Shape.array_shape(dtype, (lwork,), (0,)),\n )),\n operand_shapes_with_layout=(\n _Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),\n ),\n opaque=opaque)\n s = _ops.GetTupleElement(out, 1)\n u = _ops.GetTupleElement(out, 2)\n vt = _ops.GetTupleElement(out, 3)\n info = _ops.GetTupleElement(out, 4)\n if not full_matrices:\n u = _ops.Slice(u, (0,) * len(dims), batch_dims + (m, min(m, n)),\n (1,) * len(dims))\n vt = _ops.Slice(vt, (0,) * len(dims), batch_dims + (min(m, n), n),\n (1,) * len(dims))\n return s, u, vt, info\n"
]
| [
[
"numpy.issubdtype",
"numpy.dtype"
]
]
|
paulaWesselmann/testing_pydial | [
"bf4fd0c99242e49d67895d92c6cfc3dc31084182"
]
| [
"curiosity_module.py"
]
| [
"###############################################################################\n# idea adapted from:\n# Deepak Pathak, Pulkit Agrawal, Alexei A. Efros, Trevor Darrell\n# University of California, Berkeley\n# Curiosity-driven Exploration by Self-supervised Prediction\n\n# added by Paula\n###############################################################################\n\nimport tensorflow as tf\nimport model_prediction_curiosity as mpc\nimport os\nimport numpy as np\nfrom utils import Settings\n\n\nclass Curious(object):\n def __init__(self):\n tf.reset_default_graph()\n self.learning_rate = 0.001\n self.forward_loss_wt = 0.2\n self.feat_size = 200\n self.num_actions = 16\n self.num_belief_states = 268\n self.layer2 = 200\n\n if Settings.config.has_option(\"eval\", \"feat_size\"):\n self.feat_size = Settings.config.getint(\"eval\", \"feat_size\")\n\n with tf.variable_scope('curiosity', reuse=tf.AUTO_REUSE):\n self.predictor = mpc.StateActionPredictor(self.num_belief_states, self.num_actions,\n feature_size=self.feat_size, layer2=self.layer2)\n\n self.predloss = self.predictor.invloss * (1 - self.forward_loss_wt) + \\\n self.predictor.forwardloss * self.forward_loss_wt\n\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.optimize = self.optimizer.minimize(self.predloss)\n # self.optimize = self.optimizer.minimize(self.predictor.forwardloss) # when no feature encoding is used!\n self.cnt = 1\n\n self.sess2 = tf.Session()\n self.sess2.run(tf.global_variables_initializer())\n all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)\n self.saver = tf.train.Saver(var_list=[v for v in all_variables if \"Variab\" not in v.name and \"beta\" not in v.name])\n\n def training(self, state_vec, prev_state_vec, action_1hot):\n _, predictionloss = self.sess2.run([self.optimize, self.predloss],\n feed_dict={self.predictor.s1: prev_state_vec,\n self.predictor.s2: state_vec,\n self.predictor.asample: action_1hot})\n return predictionloss\n\n def reward(self, s1, s2, asample):\n error = self.sess2.run(self.predictor.forwardloss,\n {self.predictor.s1: [s1], self.predictor.s2: [s2], self.predictor.asample: [asample]})\n return error\n\n def inv_loss(self, s1, s2, asample):\n predloss, invloss = self.sess2.run([self.predloss, self.predictor.invloss],\n {self.predictor.s1: [s1], self.predictor.s2: [s2], self.predictor.asample: [asample]})\n return predloss, invloss\n\n def predictedstate(self, s1, s2, asample):\n pred, orig = self.sess2.run([self.predictor.predstate, self.predictor.origstate],\n {self.predictor.s1: [s1], self.predictor.s2: [s2],\n self.predictor.asample: [asample]})\n return pred, orig\n\n def load_curiosity(self, load_filename):\n self.saver.restore(self.sess2, load_filename)\n print('Curiosity model has successfully loaded.')\n\n def save_ICM(self, save_filename):\n self.saver.save(self.sess2, save_filename)\n print('Curiosity model saved.')"
]
| [
[
"tensorflow.get_collection_ref",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.variable_scope",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver"
]
]
|
DanMitroshin/tensorflow | [
"74aa353842f1788bdb7506ecceaf6ba99140e165"
]
| [
"tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py"
]
| [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for checkpointing the _RebatchDataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.experimental.ops import distribute\nfrom tensorflow.python.data.kernel_tests import checkpoint_test_base\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.platform import test\n\n\nclass LegacyRebatchDatasetCheckpointTest(\n checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testCore(self):\n\n def build_dataset(num_elements, batch_size):\n return distribute._LegacyRebatchDataset(\n dataset_ops.Dataset.range(num_elements).batch(\n 4 * batch_size, drop_remainder=True),\n num_replicas=4)\n\n self.run_core_tests(lambda: build_dataset(64, 8), 8)\n\n\nclass RebatchDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testCore(self):\n\n def build_dataset(num_elements, batch_size):\n return distribute._RebatchDataset(\n dataset_ops.Dataset.range(num_elements).batch(\n 2 * batch_size, drop_remainder=True),\n batch_sizes=[batch_size, batch_size])\n\n self.run_core_tests(lambda: build_dataset(64, 8), 8)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
]
| [
[
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.dataset_ops.Dataset.range"
]
]
|
smrtnrd/001-BB-DL-ILI | [
"ee62f0351697ce93de809f9796d58890af7f9cda"
]
| [
"src/models/model.3.py"
]
| [
"import os, sys, errno\nimport warnings\n\nfrom math import sqrt\nimport numpy\n\nimport pydot\nimport graphviz\n\n# Take a look at the raw data :\nimport pandas as pd\nfrom pandas import DataFrame\nfrom pandas import read_csv\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error\n\nimport matplotlib\n# be able to save images on server\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\nimport keras\nfrom keras.layers import Input\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import plot_model\n# be able to save images on server\n# matplotlib.use('Agg')\nimport time\nimport datetime\n\nimport multiprocessing\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #Hide messy TensorFlow warnings\nwarnings.filterwarnings(\"ignore\") #Hide messy Numpy warnings\n\n\nclass RData:\n def __init__(self, path, n_weeks=1):\n self.path = path\n self.data = {}\n # add raw database\n self.data['raw'] = self.load_data()\n # scale data\n self.scaler = preprocessing.MinMaxScaler()\n self.scale()\n # reframe data\n self.reframe()\n # self.state_list_name = self.data.state.unique()\n self.n_weeks = n_weeks\n self.n_features = int(len(self.data['raw'][0].columns))\n print(\"number of features: {}\".format(self.n_features))\n \n self.split_data()\n #print(self.n_features)\n\n # Return specific data\n def __getitem__(self, index):\n return self.data[index]\n\n # convert series to supervised learning\n @staticmethod\n def series_to_supervised(data, n_in=26, n_out=26, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n \n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg\n \n def load_data(self):\n raw = read_csv(self.path)\n raw = raw.fillna(0)\n # print(raw['0'].head())\n #raw = raw.drop([\"0\"], axis = 1)\n #print(raw.head())\n\n # transform column names\n raw.columns = map(str.lower, raw.columns)\n # raw.rename(columns={'weekend': 'date'}, inplace=True)\n latitudeList = raw.latitude.unique()\n longitudeList = raw.longitude.unique()\n data_list = list()\n cell_label = list()\n for la in latitudeList:\n for lo in longitudeList:\n data = raw[(raw.latitude == la) & (raw.longitude == lo)]\n if(len(data) == 260):\n select = [\n #'date',\n #'year',\n #'month',\n #'week',\n #'week_temp',\n #'week_prcp',\n 'latitude',\n 'longitude',\n 'mean_ili',\n #'ili_activity_label',\n #'ili_activity_group'\n ]\n # One Hot Encoding\n data = pd.get_dummies(data[select])\n # print(data.head(1))\n data_list.append(data)\n cell_label.append('lat {} - long {}'.format(la, lo))\n print(\"The data for latitude {} and longitude {} contains {} rows\".format(\n la, lo, len(data)))\n self.data['cell_labels'] = cell_label \n print(\"The are {} cell in the data\".format(len(data_list)))\n return data_list\n\n # normalize\n def scale(self):\n scaled = list()\n for df in self.data['raw']:\n scaled_df = self.scaler.fit_transform(df)\n scaled_df = pd.DataFrame(scaled_df, columns=df.columns.values)\n scaled.append(scaled_df)\n self.data['scaled'] = scaled\n\n def reframe(self, n_weeks=26):\n # specify the number of lag_weeks\n reframed = list()\n for df in self.data['scaled']:\n # frame as supervised learning\n reframed.append(self.series_to_supervised(df, n_weeks))\n self.data['reframed'] = reframed\n\n # Return specific data\n def split_data(self):\n # split into train and test sets\n train_X, train_y = list(), list()\n test_X, test_y = list(), list()\n\n for reframed in self.data['reframed']:\n values = reframed.values\n n_train_weeks = 52 * 4\n train = values[:n_train_weeks, :]\n test = values[n_train_weeks:, :]\n # split into input and outputs\n n_obs = self.n_weeks * self.n_features\n tr_X, tr_y = train[:, :n_obs], train[:, -self.n_features]\n te_X, te_y = test[:, :n_obs], test[:, -self.n_features]\n #print(tr_X.shape, len(tr_X), tr_y.shape)\n # reshape input to be 3D [samples, timesteps, features]\n tr_X = tr_X.reshape((tr_X.shape[0], self.n_weeks, self.n_features))\n te_X = te_X.reshape((te_X.shape[0], self.n_weeks, self.n_features))\n #print(tr_X.shape, tr_y.shape, te_X.shape, te_y.shape)\n train_X.append(tr_X)\n train_y.append(tr_y)\n test_X.append(te_X)\n test_y.append(te_y)\n self.data['train_X'] = train_X\n self.data['train_y'] = train_y\n self.data['test_X'] = test_X\n self.data['test_y'] = test_y\n\n\n# we have a set of data\n# class RSet(RData):\n\n\nclass RModel:\n\n # Class variable\n # data = None\n # Constructor method\n\n def __init__(self, data, features, timesteps, batch_size, n_neurons, n_inputs ):\n self.timesteps = timesteps\n self.features = features\n self.batch_size = batch_size\n self.n_neurons = n_neurons\n self.n_inputs = n_inputs\n self.lstmInputs = []\n self.lstmLayers = []\n self.data = data\n self.model = self.create_model() \n\n def fit_lstm(self, nb_epoch=1, region_nb=0, model_name=\"model-01-weights.best.hdf5\"):\n train_X = self.data['train_X']\n train_y = self.data['train_y']\n test_X = self.data['test_X']\n test_y = self.data['test_y']\n\n # checkpoint\n models_dir = os.path.join(os.getcwd(), 'models')\n filepath= models_dir + \"/\" + model_name\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max')\n callbacks_list = [checkpoint]\n\n \n # fit model\n train_rmse, test_rmse = list(), list()\n for i in range(nb_epoch):\n for a in range(len(self.data['raw'])): #train for the different zone\n self.model.fit(\n train_X,\n train_y[a], # label for the targeted state\n validation_data=(\n test_X,\n test_y[a]),\n epochs=1,\n verbose=0,\n shuffle=False,\n batch_size=self.batch_size,\n callbacks=callbacks_list\n )\n self.model.reset_states()\n train_rmse.append(self.evaluate(self.model, train_X, train_y[region_nb]))\n self.model.reset_states()\n test_rmse.append(self.evaluate(self.model, test_X, test_y[region_nb]))\n #train_rmse.append(history.history['loss'][0])\n #test_rmse.append(history.history['val_loss'][0])\n self.model.reset_states()\n history = DataFrame()\n history['train'], history['test'] = train_rmse, test_rmse\n return history\n \n def evaluate(self, model, test_X, test_y):\n scaler = self.data.scaler\n # make a prediction\n yhat = model.predict(test_X)\n #rmse = list()\n for X in test_X : \n X = X.reshape((X.shape[0], X.shape[2])) #4*51 * self.features\n # invert scaling for forecast\n inv_yhat = numpy.concatenate((yhat, X[:, 1:]), axis=1)\n inv_yhat = scaler.inverse_transform(inv_yhat)\n inv_yhat = inv_yhat[:,0]\n # invert scaling for actual\n test_y = test_y.reshape((len(test_y), 1))\n inv_y = numpy.concatenate((test_y, X[:, 1:]), axis=1)\n inv_y = scaler.inverse_transform(inv_y)\n inv_y = inv_y[:,0]\n # calculate RMSE\n # rmse.append(sqrt(mean_squared_error(inv_y, inv_yhat)))\n rmse = sqrt(mean_squared_error(inv_y, inv_yhat))\n #print('RMSE: %.3f' % rmse)\n return rmse\n\n def create_model(self):\n start = time.time()\n for i in range(self.n_inputs):\n inputName = \"{}_input\".format(i)\n\n lstm_input = keras.layers.Input(\n shape=(self.timesteps, self.features),\n name=inputName)\n self.lstmInputs.append(lstm_input)\n\n lstm_layer = LSTM(self.n_neurons,\n return_sequences=False)(self.lstmInputs[i])\n self.lstmLayers.append(lstm_layer)\n\n # combined the output\n output = keras.layers.concatenate(self.lstmLayers)\n output = Dense(1, activation='relu',\n name='wheighthedAverage_output')(output)\n stateInput = self.lstmInputs\n model = keras.models.Model(inputs=stateInput, outputs=[output])\n start = time.time()\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\n print(\"> Compilation Time : \", time.time() - start)\n\n #save model\n reports_dir = os.path.join(os.getcwd(), 'reports','figures')\n d = datetime.datetime.today().strftime(\"%y-%m-%d\")\n directory = os.path.join(reports_dir, 'BB_Model-{}-{}'.format(i,d))\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n filepath_model = directory + '/BB-lstm_model_{}cells-{}.png'.format(self.n_inputs, d)\n plot_model(model, to_file=filepath_model)\n end = time.time()\n return model\n\ndef plot_RMSE(history, title):\n plt.plot(history['train'], color='orange')\n plt.plot(history['test'], color='green')\n plt.title(title)\n plt.ylabel('RMSE')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n \n\n# run a repeated experiment\ndef experiment(repeats, epochs, param, m_name):\n\n # config\n \n reports_dir = os.path.join(os.getcwd(), 'reports','figures')\n param = param\n data = param['data']\n labels = data['cell_labels']\n # run tests\n \n model = RModel(**param)\n fitting_time = list()\n training = DataFrame()\n start = time.time()\n\n histories = dict()\n for i in range(repeats):\n #create a directory for my data\n d = datetime.datetime.today().strftime(\"%y-%m-%d\")\n directory = os.path.join(reports_dir, 'BB_Exp-{}-{}'.format(i,d))\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n print(\"REPEATS : {}\".format(i))\n #for m in range(param['n_inputs']):\n \n rhistory = list()\n for m in range(2):\n start = time.time()\n history = model.fit_lstm(epochs, m, m_name)\n print('{}) Cell:{}, TrainRMSE={}, TestRMSE={}'.format(i, m, history['train'].iloc[-1], history['test'].iloc[-1]))\n rtime.append(time.time() - start)\n print('> Training Time: {1}, Coordinate: {0}\\n'.format(labels[m], time.time() - start))\n rhistory.append(history)\n\n #store all the experiments\n history[i] = rhistory\n\n title = 'Model loss for {}'.format(epochs)\n #plot history data\n for index,history in enumerate(rhistory): \n plot_RMSE(history, title)\n\n filepath = directory + '/{}_exp_{}_epochs_rmse_2010-2014.png'.format(i, epochs)\n print('data stored: {}'.format(filepath))\n plt.savefig(filepath)\n plt.close()\n\n\n end = time.time()\n #print('The experiment run for {} minutes'.format((end - start)/60))\n\ndef main(): \n # get the data\n data_dir = os.path.join(os.getcwd(), 'reports','figures')\n path = \"/Users/bbuildman/Documents/Developer/GitHub/001-BB-DL-ILI/data/raw/2010-2015_ili_climate.csv\"\n #name of the model\n m_name = \"Exp_1-model-LSTM-BB.hdf5\"\n #longitude + latitude + mean_ili\n data = RData(path)\n #config\n param = {\n 'features': data.n_features,\n 'timesteps': 1,\n 'batch_size': 52,\n 'n_neurons': 1,\n 'n_inputs': len(data['raw']), #I am suppose to have 36\n 'data': data\n }\n #experjment test\n repeats = 2\n epochs = 10\n\n \n experiment(repeats,epochs, param, m_name)\n\nif __name__ == '__main__':\n main()"
]
| [
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.use",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"pandas.get_dummies",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel"
]
]
|
JiaweiZhao-git/DASNet | [
"6097ba4a75cf928556434a1b7a81fc00e7c7c100"
]
| [
"src/train.py"
]
| [
"#!/usr/bin/python3\n# coding=utf-8\n\nimport sys\nimport datetime\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nsys.path.insert(0, '../')\nsys.dont_write_bytecode = True\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport dataset\nfrom net_MM import DASNet\nfrom apex import amp\n\n\ndef bce_loss(pred, mask):\n bce = F.binary_cross_entropy_with_logits(pred, mask, reduction='none')\n return bce.mean()\n\n\ndef iou_loss(pred, mask):\n pred = torch.sigmoid(pred)\n inter = (pred * mask).sum(dim=(2, 3))\n union = (pred + mask).sum(dim=(2, 3))\n wiou = 1 - (inter + 1) / (union - inter + 1)\n return wiou.mean()\n\n\ndef logMSE_loss(dpred, depth):\n mse = nn.MSELoss()\n dpred = torch.sigmoid(dpred)\n dpred = 1.0 + dpred * 255.0\n depth = 1.0 + depth * 255.0\n dpred = 257.0 - dpred\n depth = 257.0 - depth\n return mse(torch.log(dpred), torch.log(depth))\n\n\ndef dec_loss(pred, mask, dpred, depth):\n dpred = torch.sigmoid(dpred)\n # deeper 255 -> deeper 1\n dpred = 256.0 - dpred * 255.0\n depth = 256.0 - depth * 255.0\n # Control the error window size by kernel_size\n # logDiff = torch.abs(torch.log(dpred) - torch.log(depth))\n logDiff = torch.abs(F.avg_pool2d(torch.log(dpred) - torch.log(depth), kernel_size=7, stride=1, padding=3))\n weit = logDiff / torch.max(logDiff)\n wbce = F.binary_cross_entropy_with_logits(pred, mask, reduction='none')\n wbce = (weit * wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))\n return wbce.mean()\n\n\ndef train(Dataset, Network):\n # dataset\n cfg = Dataset.Config(datapath='../../data/RGBD-TR', savepath='./', mode='train', batch=32, lr=0.05, momen=0.9,\n decay=5e-4, epoch=32)\n data = Dataset.Data(cfg)\n loader = DataLoader(data, collate_fn=data.collate, batch_size=cfg.batch, shuffle=True, pin_memory=True, num_workers=8)\n\n # network\n net = Network(cfg)\n net.train(True)\n net.cuda()\n\n ## parameter\n base, head = [], []\n for name, param in net.named_parameters():\n if 'bkbone.conv1' in name or 'bkbone.bn1' in name:\n print(name)\n elif 'bkbone' in name:\n base.append(param)\n else:\n head.append(param)\n optimizer = torch.optim.SGD([{'params': base}, {'params': head}], lr=cfg.lr, momentum=cfg.momen,\n weight_decay=cfg.decay, nesterov=True)\n net, optimizer = amp.initialize(net, optimizer, opt_level='O1')\n\n for epoch in range(cfg.epoch):\n optimizer.param_groups[0]['lr'] = (1 - abs((epoch + 1) / (cfg.epoch + 1) * 2 - 1)) * cfg.lr * 0.1\n optimizer.param_groups[1]['lr'] = (1 - abs((epoch + 1) / (cfg.epoch + 1) * 2 - 1)) * cfg.lr\n\n for step, (image, mask, depth) in enumerate(loader):\n image, mask, depth = image.float().cuda(), mask.float().cuda(), depth.float().cuda()\n pred, out2h, out3h, out4h, out5h, dpred = net(image)\n\n # sod loss\n loss1b = bce_loss(pred, mask)\n loss1u = iou_loss(pred, mask)\n loss2s_b = bce_loss(out2h, mask)\n loss2s_u = iou_loss(out2h, mask)\n loss3s_b = bce_loss(out3h, mask)\n loss3s_u = iou_loss(out3h, mask)\n loss4s_b = bce_loss(out4h, mask)\n loss4s_u = iou_loss(out4h, mask)\n loss5s_b = bce_loss(out5h, mask)\n loss5s_u = iou_loss(out5h, mask)\n\n # depth correction loss\n loss1h = dec_loss(pred, mask, dpred, depth)\n loss2h = dec_loss(out2h, mask, dpred, depth)\n loss3h = dec_loss(out3h, mask, dpred, depth)\n loss4h = dec_loss(out4h, mask, dpred, depth)\n loss5h = dec_loss(out5h, mask, dpred, depth)\n\n # depth loss\n loss2d = logMSE_loss(dpred, depth)\n\n loss = loss2d + loss1b + loss1u + loss1h \\\n + 0.8 * (loss2s_b + loss2s_u + loss2h) \\\n + 0.6 * (loss3s_b + loss3s_u + loss3h) \\\n + 0.4 * (loss4s_b + loss4s_u + loss4h) \\\n + 0.2 * (loss5s_b + loss5s_u + loss5h)\n\n optimizer.zero_grad()\n with amp.scale_loss(loss, optimizer) as scale_loss:\n scale_loss.backward()\n # loss.backward()\n optimizer.step()\n\n if step % 30 == 0:\n print('%s | step:%d/%d | lr=%.6f | loss=%.3f | s=%.3f | u=%.3f | d=%.3f | h=%.3f ' % (\n datetime.datetime.now(), epoch+1, cfg.epoch, optimizer.param_groups[1]['lr'],\n loss.item(), loss1b.item(), loss1u.item(), loss2d.item(), loss1h.item()))\n\n if epoch >= 30:\n torch.save(net.state_dict(), cfg.savepath + '/DASNet-'+str(epoch+1))\n\n\nif __name__ == '__main__':\n train(dataset, DASNet)\n"
]
| [
[
"torch.sigmoid",
"torch.max",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.utils.data.DataLoader",
"torch.log",
"torch.optim.SGD",
"torch.nn.MSELoss"
]
]
|
goktuggokmen/cnn-registration-fixed | [
"eec6efa355bc5089f3d4909e4af75c27a907d481"
]
| [
"src/demo.py"
]
| [
"from __future__ import print_function\nimport Registration\nimport matplotlib.pyplot as plt\nfrom utils.utils import *\nimport cv2\n\n# designate image path here\nIX_path = '../img/1a.jpg'\nIY_path = '../img/1b.jpg'\n\nIX = cv2.imread(IX_path)\nIY = cv2.imread(IY_path)\n\n#initialize\nreg = Registration.CNN()\n#register\nX, Y, Z = reg.register(IX, IY)\n#generate regsitered image using TPS\nregistered = tps_warp(Y, Z, IY, IX.shape)\ncb = checkboard(IX, registered, 11)\n\nplt.subplot(131)\nplt.title('reference')\nplt.imshow(cv2.cvtColor(IX, cv2.COLOR_BGR2RGB))\nplt.subplot(132)\nplt.title('registered')\nplt.imshow(cv2.cvtColor(registered, cv2.COLOR_BGR2RGB))\nplt.subplot(133)\nplt.title('checkboard')\nplt.imshow(cv2.cvtColor(cb, cv2.COLOR_BGR2RGB))\n\nplt.show()\n\n\n\n\n\n\n \n"
]
| [
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title"
]
]
|
MahmudulAlam/Object-Detection-Using-GPM | [
"52662eb9f8bf98fc8b2282bb8edd409a49e99bf9"
]
| [
"draw.py"
]
| [
"import cv2\nimport pickle\nimport numpy as np\nfrom flag import Flag\nimport tensorflow as tf\nfrom utils_draw import paint\nfrom utils.gaussian import pdf\nimport matplotlib.pyplot as plt\n\nflag = Flag()\n\nwith open('label.txt', 'r') as f:\n classes = f.readlines()\n\nwith open('assets/colors.h5', 'rb') as f:\n colors = pickle.loads(f.read())\n\nwith open('dataset/annotations.h5', 'rb') as f:\n annotations = pickle.loads(f.read())\n\ndirectory = '../COCO/train2017/'\nimage_name = '000000265725.jpg'\n# image_name = '000000000839.jpg'\n# image_name = '000000012166.jpg'\n\nfor annotation in annotations:\n if image_name == annotation[0]:\n image = cv2.imread(directory + image_name)\n height = annotation[1]\n width = annotation[2]\n\n label = np.zeros((flag.y_size, flag.y_size, flag.classes), dtype=np.float32)\n x, y, id_ = None, None, None\n objects = []\n\n for i in range(3, len(annotation)):\n obj = annotation[i]\n id_ = obj['id']\n bbox = obj['bbox']\n\n if id_ not in objects:\n objects.append(id_)\n\n image = paint(img=image, bbox=bbox, cls=id_)\n x = tf.range(0, flag.y_size, dtype=tf.float32)\n y = tf.range(0, flag.y_size, dtype=tf.float32)\n x, y = tf.meshgrid(x, y)\n\n x1 = bbox[0] / flag.x_size * flag.y_size\n y1 = bbox[1] / flag.x_size * flag.y_size\n x2 = bbox[2] / flag.x_size * flag.y_size\n y2 = bbox[3] / flag.x_size * flag.y_size\n\n xc = (x2 + x1) / 2\n yc = (y2 + y1) / 2\n w = x2 - x1\n h = y2 - y1\n z = pdf(x, xc, w / flag.factor) * pdf(y, yc, h / flag.factor)\n label[:, :, id_] = label[:, :, id_] + z\n\n label_max = tf.reduce_max(tf.reduce_max(label, axis=0, keepdims=True), axis=1, keepdims=True)\n label = label / label_max\n\n for id_ in objects:\n # plot label\n category = classes[id_ - 1]\n fig = plt.figure(category)\n ax = plt.axes(projection='3d')\n ax.plot_surface(x, y, label[:, :, id_], cmap='viridis', edgecolor='none')\n ax.set_title(classes[id_ - 1], fontsize=16)\n ax.view_init(elev=45, azim=60)\n ax.invert_xaxis()\n plt.savefig('figure/' + classes[id_ - 1][:-1] + '.jpg')\n\n cv2.imwrite('figure/' + image_name, image)\n cv2.imshow(image_name, image)\n plt.show()\n break\n"
]
| [
[
"tensorflow.reduce_max",
"tensorflow.range",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"tensorflow.meshgrid",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
]
|
georgios-ts/toqito | [
"d9379fb267a8e77784b97820c3131522d384f54d"
]
| [
"toqito/matrix_props/is_diagonal.py"
]
| [
"\"\"\"Is matrix a diagonal matrix.\"\"\"\nimport numpy as np\nfrom toqito.matrix_props import is_square\n\n\ndef is_diagonal(mat: np.ndarray) -> bool:\n r\"\"\"\n Determine if a matrix is diagonal [WikDiag]_.\n\n A matrix is diagonal if the matrix is square and if the diagonal of the matrix is non-zero,\n while the off-diagonal elements are all zero.\n\n The following is an example of a 3-by-3 diagonal matrix:\n\n .. math::\n \\begin{equation}\n \\begin{pmatrix}\n 1 & 0 & 0 \\\\\n 0 & 2 & 0 \\\\\n 0 & 0 & 3\n \\end{pmatrix}\n \\end{equation}\n\n This quick implementation is given by Daniel F. from StackOverflow in [SODIA]_.\n\n Examples\n ==========\n\n Consider the following diagonal matrix:\n\n .. math::\n A = \\begin{pmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{pmatrix}.\n\n Our function indicates that this is indeed a diagonal matrix:\n\n >>> from toqito.matrix_props import is_diagonal\n >>> import numpy as np\n >>> A = np.array([[1, 0], [0, 1]])\n >>> is_diagonal(A)\n True\n\n Alternatively, the following example matrix\n\n .. math::\n B = \\begin{pmatrix}\n 1 & 2 \\\\\n 3 & 4\n \\end{pmatrix}\n\n is not diagonal, as shown using :code:`toqito`.\n\n >>> from toqito.matrix_props import is_diagonal\n >>> import numpy as np\n >>> B = np.array([[1, 2], [3, 4]])\n >>> is_diagonal(B)\n False\n\n References\n ==========\n .. [WikDiag] Wikipedia: Diagonal matrix\n https://en.wikipedia.org/wiki/Diagonal_matrix\n\n .. [SODIA] StackOverflow post\n https://stackoverflow.com/questions/43884189/\n\n :param mat: The matrix to check.\n :return: Returns True if the matrix is diagonal and False otherwise.\n \"\"\"\n if not is_square(mat):\n return False\n i, j = mat.shape\n test = mat.reshape(-1)[:-1].reshape(i - 1, j + 1)\n return ~np.any(test[:, 1:])\n"
]
| [
[
"numpy.any"
]
]
|
RLGraph/RLGraph | [
"428fc136a9a075f29a397495b4226a491a287be2",
"428fc136a9a075f29a397495b4226a491a287be2",
"428fc136a9a075f29a397495b4226a491a287be2",
"428fc136a9a075f29a397495b4226a491a287be2"
]
| [
"rlgraph/tests/agent_learning/long_tasks/test_impala_agent_long_task_learning.py",
"rlgraph/tests/components/test_string_layers.py",
"rlgraph/tests/components/test_ppo_loss_functions.py",
"rlgraph/tests/component_test.py"
]
| [
"# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport numpy as np\nimport time\nimport unittest\n\nfrom rlgraph.environments import OpenAIGymEnv\nfrom rlgraph.agents import IMPALAAgent\nfrom rlgraph.spaces import FloatBox\nfrom rlgraph.utils import root_logger\nfrom rlgraph.tests.test_util import config_from_path\n\n\nclass TestIMPALAAgentLongTaskLearning(unittest.TestCase):\n \"\"\"\n Tests whether the DQNAgent can learn in tough environments.\n \"\"\"\n root_logger.setLevel(level=logging.INFO)\n\n #atari_preprocessed_state_space = FloatBox(shape=(80, 80, 4), add_batch_rank=True)\n #atari_preprocessing_spec = [\n # dict(type=\"image_crop\", x=0, y=25, width=160, height=160),\n # dict(type=\"image_resize\", width=80, height=80),\n # dict(type=\"grayscale\", keep_rank=True),\n # dict(type=\"divide\", divisor=255,),\n # dict(type=\"sequence\", sequence_length=4, batch_size=1, add_rank=False)\n #]\n\n def test_impala_on_outbreak(self):\n \"\"\"\n Creates a DQNAgent and runs it via a Runner on an openAI Pong Env.\n \"\"\"\n env = OpenAIGymEnv(\"Breakout-v0\", frameskip=4, max_num_noops=30, episodic_life=True, visualize=False)\n config_ = config_from_path(\"configs/impala_agent_for_breakout.json\")\n agent = IMPALAAgent.from_spec(\n config_,\n state_space=env.state_space,\n action_space=env.action_space,\n )\n\n learn_updates = 4000000\n mean_returns = []\n for i in range(learn_updates):\n ret = agent.update()\n mean_return = self._calc_mean_return(ret)\n mean_returns.append(mean_return)\n print(\"i={} Loss={:.4} Avg-reward={:.2}\".format(i, float(ret[1]), mean_return))\n\n time.sleep(3)\n agent.terminate()\n time.sleep(3)\n\n @staticmethod\n def _calc_mean_return(records):\n size = records[3][\"rewards\"].size\n rewards = records[3][\"rewards\"].reshape((size,))\n terminals = records[3][\"terminals\"].reshape((size,))\n returns = list()\n return_ = 0.0\n for r, t in zip(rewards, terminals):\n return_ += r\n if t:\n returns.append(return_)\n return_ = 0.0\n\n return np.mean(returns)\n",
"# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport numpy as np\n\nfrom rlgraph.components.layers.strings import *\nfrom rlgraph.spaces import IntBox, TextBox\nfrom rlgraph.tests import ComponentTest\n\n\nclass TestStringLayers(unittest.TestCase):\n \"\"\"\n Tests for the different StringLayer Components. Each layer is tested separately.\n \"\"\"\n def test_embedding_lookup_layer(self):\n # Input space for lookup indices (double indices for picking 2 rows per batch item).\n input_space = IntBox(shape=(2,), add_batch_rank=True)\n\n embedding = EmbeddingLookup(embed_dim=5, vocab_size=4, initializer_spec=np.array([\n [1.0, 2.0, 3.0, 4.0, 5.0],\n [6.0, 7.0, 8.0, 9.0, 10.0],\n [11.0, 12.0, 13.0, 14.0, 15.0],\n [16.0, 17.0, 18.0, 19.0, 20.0]\n ]))\n test = ComponentTest(component=embedding, input_spaces=dict(ids=input_space))\n\n # Pull a batch of 3 (2 vocabs each) from the embedding matrix.\n inputs = np.array(\n [[0, 1], [3, 2], [2, 1]]\n )\n\n expected = np.array([\n [\n [1.0, 2.0, 3.0, 4.0, 5.0],\n [6.0, 7.0, 8.0, 9.0, 10.0]\n ], [\n [16.0, 17.0, 18.0, 19.0, 20.0],\n [11.0, 12.0, 13.0, 14.0, 15.0]\n ], [\n [11.0, 12.0, 13.0, 14.0, 15.0],\n [6.0, 7.0, 8.0, 9.0, 10.0],\n ]\n ])\n test.test((\"call\", inputs), expected_outputs=expected, decimals=5)\n\n def test_string_to_hash_bucket_layer(self):\n # Input space: Batch of strings.\n input_space = TextBox(add_batch_rank=True)\n\n # Use a fast-hash function with 10 possible buckets to put a word into.\n string_to_hash_bucket = StringToHashBucket(num_hash_buckets=10, hash_function=\"fast\")\n test = ComponentTest(component=string_to_hash_bucket, input_spaces=dict(text_inputs=input_space))\n\n # Send a batch of 3 strings through the hash-bucket generator.\n inputs = np.array([\n \"text A\",\n \"test B\",\n \"text C D and E\"\n ])\n\n # NOTE that some different words occupy the same hash bucket (e.g. 'C' and 'and' (7) OR 'text' and [empty] (3)).\n # This can be avoided by 1) picking a larger `num_hash_buckets` or 2) using the \"strong\" hash function.\n expected_hash_bucket = np.array([\n [3, 4, 3, 3, 3], # text A . . .\n [6, 8, 3, 3, 3], # test B . . .\n [3, 7, 5, 7, 2], # text C D and E\n ])\n expected_lengths = np.array([2, 2, 5])\n test.test((\"call\", inputs), expected_outputs=(expected_hash_bucket, expected_lengths))\n\n def test_string_to_hash_bucket_layer_with_different_ctor_params(self):\n # Input space: Batch of strings.\n input_space = TextBox(add_batch_rank=True)\n\n # Construct a strong hash bucket with different delimiter, larger number of buckets, string algo and\n # int16 dtype.\n string_to_hash_bucket = StringToHashBucket(delimiter=\"-\", num_hash_buckets=20, hash_function=\"strong\",\n dtype=\"int16\")\n test = ComponentTest(component=string_to_hash_bucket, input_spaces=dict(text_inputs=input_space))\n\n # Send a batch of 5 strings through the hash-bucket generator.\n inputs = np.array([\n \"text-A\",\n \"test-B\",\n \"text-C--D-and-E\",\n \"bla bla-D\"\n ])\n\n # NOTE that some different words occupy the same hash bucket (e.g. 'C' and 'and' OR 'text' and [empty]).\n # This can be avoided by 1) picking a larger `num_hash_buckets` or 2) using the \"strong\" hash function.\n expected_hash_bucket = np.array([\n [2, 6, 18, 18, 18], # text A . . .\n [12, 7, 18, 18, 18], # test B . . .\n [2, 6, 13, 19, 15], # text C D and E\n [13, 13, 18, 18, 18], # bla bla D . . . <- Note that \"bla bla\" and \"D\" still have the same bucket (13)\n ])\n expected_lengths = np.array([2, 2, 5, 2])\n test.test((\"call\", inputs), expected_outputs=(expected_hash_bucket, expected_lengths))\n",
"# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nfrom math import log\n\nimport numpy as np\nfrom rlgraph.components.loss_functions import PPOLossFunction\nfrom rlgraph.spaces import *\nfrom rlgraph.tests import ComponentTest\n\n\nclass TestPPOLossFunctions(unittest.TestCase):\n\n input_spaces = dict(\n loss_per_item=FloatBox(add_batch_rank=True),\n log_probs=FloatBox(shape=(1,), add_batch_rank=True),\n prev_log_probs=FloatBox(shape=(1,), add_batch_rank=True),\n state_values=FloatBox(shape=(1,), add_batch_rank=True),\n prev_state_values=FloatBox(shape=(1,), add_batch_rank=True),\n advantages=FloatBox(add_batch_rank=True),\n entropy=FloatBox(add_batch_rank=True),\n time_percentage=float\n )\n\n def test_ppo_loss_function_on_int_action_space(self):\n action_space = IntBox(2, add_batch_rank=True)\n clip_ratio = 0.2\n\n ppo_loss_function = PPOLossFunction(clip_ratio=clip_ratio, value_function_clipping=False)\n\n test = ComponentTest(component=ppo_loss_function, input_spaces=self.input_spaces, action_space=action_space)\n\n # Batch of size=n.\n log_probs = np.array([[log(0.4)], [log(0.9)], [log(0.1)]])\n prev_log_probs = np.array([[log(0.3)], [log(0.95)], [log(0.2)]])\n state_values = np.array([[-2.0], [-1.0], [1.0]])\n prev_state_values = np.array([[-3.4], [-1.3], [0.3]])\n advantages = np.array([1.0, 3.0, 2.0])\n entropy = np.array([0.7, 0.3, 3.2])\n\n \"\"\"\n Calculation of PG loss term:\n # IS ratios\n rhos = probs / prev_probs = exp(log(probs/prev_probs)) = exp(log_probs - prev_log_probs)\n # clipping around 1.0\n clipped = clip(rhos, 1.0-clip_ratio, 1.0+clip_ratio)\n # entropy loss term\n Le = - weight * entropy\n \n L = min(clipped * A, rhos * A) + Le \n \"\"\"\n rhos = np.exp(log_probs - prev_log_probs)\n clipped_rhos = np.clip(rhos, 1.0 - clip_ratio, 1.0 + clip_ratio)\n expanded_advantages = np.expand_dims(advantages, axis=-1)\n clipped_advantages = -np.minimum(rhos * expanded_advantages, clipped_rhos * expanded_advantages)\n entropy_term = -0.00025 * np.expand_dims(entropy, axis=-1) # 0.00025 == default entropy weight\n\n expected_pg_loss_per_item = np.squeeze(clipped_advantages + entropy_term)\n\n test.test(\n (\"pg_loss_per_item\", [log_probs, prev_log_probs, advantages, entropy]),\n expected_outputs=expected_pg_loss_per_item, decimals=3\n )\n\n v_targets = advantages + np.squeeze(prev_state_values) # Q-value targets\n expected_value_loss_per_item = np.square(np.squeeze(state_values) - v_targets)\n\n test.test(\n (\"value_function_loss_per_item\", [state_values, prev_state_values, advantages]),\n expected_outputs=expected_value_loss_per_item, decimals=3\n )\n\n # All together.\n test.test(\n (\"loss_per_item\", [log_probs, prev_log_probs, state_values, prev_state_values, advantages, entropy]),\n expected_outputs=[expected_pg_loss_per_item, expected_value_loss_per_item], decimals=3\n )\n\n # Expect the mean over the batch.\n test.test((\"loss_average\", expected_pg_loss_per_item), expected_outputs=expected_pg_loss_per_item.mean())\n\n # Both.\n test.test(\n (\"loss\", [log_probs, prev_log_probs, state_values, prev_state_values, advantages, entropy]),\n expected_outputs=[\n expected_pg_loss_per_item.mean(), expected_pg_loss_per_item,\n expected_value_loss_per_item.mean(), expected_value_loss_per_item\n ], decimals=3\n )\n\n",
"# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport numpy as np\nfrom rlgraph import get_backend\nfrom rlgraph.graphs import GraphBuilder\nfrom rlgraph.graphs.graph_executor import GraphExecutor\nfrom rlgraph.tests.test_util import recursive_assert_almost_equal\nfrom rlgraph.utils import root_logger, PyTorchVariable\nfrom rlgraph.utils.input_parsing import parse_execution_spec\n\n\nclass ComponentTest(object):\n \"\"\"\n A simple (and limited) Graph-wrapper to test a single component in an easy, straightforward way.\n \"\"\"\n def __init__(\n self,\n component,\n input_spaces=None,\n action_space=None,\n seed=10,\n logging_level=None,\n execution_spec=None,\n # TODO: Move all the below into execution_spec just like for Agent class.\n enable_profiler=False,\n disable_monitoring=False,\n device_strategy=\"default\",\n device_map=None,\n backend=None,\n auto_build=True,\n build_kwargs=None\n ):\n \"\"\"\n Args:\n component (Component): The Component to be tested (may contain sub-components).\n input_spaces (Optional[dict]): Dict with component's API input-parameter' names as keys and Space objects\n or Space specs as values. Describes the input Spaces for the component.\n None, if the Component to be tested has no API methods with input parameters.\n action_space (Optional[Space]): The action space to pass into the GraphBuilder.\n seed (Optional[int]): The seed to use for random-seeding the Model object.\n If None, do not seed the Graph (things may behave non-deterministically).\n logging_level (Optional[int]): When provided, sets RLGraph's root_logger's logging level to this value.\n execution_spec (Optional[dict]): Specification dict for execution settings.\n enable_profiler (bool): When enabled, activates backend profiling. Default: False.\n disable_monitoring (bool): When True, will not use a monitored session. Default: False.\n device_strategy (str): Optional device-strategy to be passed into GraphExecutor.\n device_map (Optional[Dict[str,str]]): Optional device-map to be passed into GraphExecutor.\n backend (Optional[str]): Override global backend settings for a test by passing in a specific\n backend, convenience method.\n auto_build (Optional[bool]): If false, build has to be triggered manually to eval build stats.\n build_kwargs (Optional[dict]): Dict to be passed as **kwargs to the call to `self.graph_executor.build`.\n \"\"\"\n self.seed = seed\n np.random.seed(seed)\n random.seed(seed)\n\n if logging_level is not None:\n root_logger.setLevel(logging_level)\n\n # Create a GraphBuilder.\n self.graph_builder = GraphBuilder(action_space=action_space)\n self.component = component\n self.component.nesting_level = 0\n self.input_spaces = input_spaces\n self.build_kwargs = build_kwargs or dict()\n\n # Build the model.\n execution_spec = parse_execution_spec(execution_spec or dict(\n seed=self.seed,\n enable_profiler=enable_profiler,\n profiler_frequency=1,\n device_strategy=device_strategy,\n disable_monitoring=disable_monitoring,\n device_map=device_map\n ))\n use_backend = backend if backend is not None else get_backend()\n self.graph_executor = GraphExecutor.from_spec(\n use_backend,\n graph_builder=self.graph_builder,\n execution_spec=execution_spec\n )\n if auto_build:\n self.build()\n else:\n print(\"Auto-build false, did not build. Waiting for manual build.\")\n\n def build(self):\n return self.graph_executor.build([self.component], self.input_spaces, **self.build_kwargs)\n\n def test(self, *api_method_calls, **kwargs):\n \"\"\"\n Does one test pass through the component to test.\n\n Args:\n api_method_calls (Union[str,list,tuple]): See rlgraph.graphs.graph_executor for details.\n A specifier for an API-method call.\n - str: Call the API-method that has the given name w/o any input args.\n - tuple len=2: 0=the API-method name to call; 1=the input args to use for the call.\n - tuple len=3: same as len=2, AND 2=list of returned op slots to pull (e.g. [0]: only pull\n the first op).\n\n Keyword Args:\n expected_outputs (Optional[any]): The expected return value(s) generated by the API-method.\n If None, no checks will be done on the output.\n decimals (Optional[int]): The number of digits after the floating point up to which to compare actual\n outputs and expected values.\n fn_test (Optional[callable]): Test function to call with (self, outs) as parameters.\n print (bool): Whether to print out the actual results (before doing any checks on the results).\n\n Returns:\n any: The actual returned values when calling the API-method with the given parameters.\n \"\"\"\n expected_outputs = kwargs.pop(\"expected_outputs\", None)\n decimals = kwargs.pop(\"decimals\", 7)\n fn_test = kwargs.pop(\"fn_test\", None)\n print_ = kwargs.pop(\"print\", False)\n assert not kwargs\n\n # Get the outs ..\n outs = self.graph_executor.execute(*api_method_calls)\n\n if print_ is True:\n print(\"Results:\\n{}\".format(repr(outs)))\n\n # Optionally do test asserts here.\n if expected_outputs is not None:\n self.assert_equal(outs, expected_outputs, decimals=decimals)\n\n if callable(fn_test):\n fn_test(self, outs)\n\n return outs\n\n def variable_test(self, variables, expected_values):\n \"\"\"\n Asserts that all given `variables` have the `expected_values`.\n Variables can be given in an arbitrary structure including nested ones.\n\n Args:\n variables (any): Any structure that contains variables.\n expected_values (any): Matching structure with the expected values for the given variables.\n \"\"\"\n values = self.read_variable_values(variables)\n self.assert_equal(values, expected_values)\n\n def read_variable_values(self, *variables):\n \"\"\"\n Executes a session to retrieve the values of the provided variables.\n\n Args:\n variables (Union[variable,List[variable]]): Variable objects whose values to retrieve from the graph.\n\n Returns:\n any: Values of the variables provided.\n \"\"\"\n # No variables given: Read all variables of our component.\n if len(variables) == 0:\n variables = self.component.variable_registry\n ret = self.graph_executor.read_variable_values(variables)\n if len(variables) == 1:\n return ret[0]\n return ret\n\n def get_variable_values(self, component, names):\n \"\"\"\n Reads value of component state for given component and names.\n\n Args:\n component (Component):\n *names (list): List of strings.\n\n Returns:\n Dict: Variable values.\n \"\"\"\n variables = component.get_variables(names, global_scope=False)\n if get_backend() == \"tf\":\n return self.graph_executor.read_variable_values(variables)\n else:\n return variables\n\n @staticmethod\n def read_params(name, params, transpose_torch_params=True):\n \"\"\"\n Tries to read name from params. Name may be either an actual key or a prefix of a key.\n\n Args:\n name (str): Key or prefix to key.\n params (dict): Params to read.\n transpose_torch_params (bool): If the parameter lookup yields a torch parameter and this argument is True,\n transpose the result. Accounts for different internal data layouts.\n Returns:\n any: Param value for key.\n\n Raises:\n ValueError: If no key can be found.\n \"\"\"\n param = ComponentTest.read_prefixed_params(name, params)\n if isinstance(param, PyTorchVariable):\n # Weights require gradients -> detach.\n weight = param.get_value().detach().numpy()\n\n # PyTorch and TF\n if transpose_torch_params:\n return weight.transpose()\n else:\n return weight\n else:\n return param\n\n @staticmethod\n def read_prefixed_params(name, params):\n \"\"\"\n Tries to read name from params. Name may be either an actual key or a prefix of a key\n\n Args:\n name (str): Key or prefix to key.\n params (dict): Params to read.\n\n Returns:\n any: Param value for key.\n\n Raises:\n ValueError: If no key can be found.\n \"\"\"\n if name in params:\n return params[name]\n\n # Otherwise return first matching key.\n for key in params.keys():\n if key.startswith(name):\n return params[key]\n # Catch key ending with extra scope separator\n elif name[-1] == \"/\" and key.startswith(name[:-1]):\n return params[key]\n\n raise ValueError(\"No value found for key = {}. Keys are: {}\".format(name, params.keys()))\n\n @staticmethod\n def assert_equal(outs, expected_outputs, decimals=7):\n \"\"\"\n Convenience wrapper: See implementation of `recursive_assert_almost_equal` for details.\n \"\"\"\n recursive_assert_almost_equal(outs, expected_outputs, decimals=decimals)\n\n def terminate(self):\n \"\"\"\n Terminates this ComponentTest object (so it can no longer be used) allowing for cleanup\n operations to be executed.\n \"\"\"\n self.graph_executor.terminate()\n"
]
| [
[
"numpy.mean"
],
[
"numpy.array"
],
[
"numpy.expand_dims",
"numpy.minimum",
"numpy.clip",
"numpy.squeeze",
"numpy.array",
"numpy.exp"
],
[
"numpy.random.seed"
]
]
|
jshannon-usbr/usbr_temperature_ann | [
"b630b8bde17ae07329df1bba6df21690a372bc56"
]
| [
"__models/HEC5Q/HEC5Q/HEC5Q.py"
]
| [
"r\"\"\"\nSummary\n-------\nThis Python module executes the Simulation Protocols outlined by David\nJulian/CH2M HILL, dated 2015-05-26. The following Temperature Model Simulation\nProtocols, with a common directory of ./_Tools/HEC5Q_Toolkit, were delivered to\nReclamation on 2016-01-13:\n - American_River/HEC5Q_AR_Temp_Model_Protocol_052615.docx\n - Stanislaus_River/HEC5Q_StanR_Temp_Model_Protocol_052615.docx\n - Trinity_Sacramento_Rivers/HEC5Q_SR_Temp_Model_Protocol_052615.docx\n\n\"\"\"\n# %% Import libraries.\n# Import standard libraries.\nimport os\nimport sys\nimport shutil\nimport subprocess as sb\n# Import third party libraries.\nimport pandas as pd\nimport numpy as np\n# Import custom libraries.\nCustDir = os.path.abspath(r'..\\usbr_py3dss')\n# The following conditional statement is required when re-running a kernal.\nif CustDir not in sys.path:\n sys.path.insert(1, CustDir)\nimport dss3_functions_reference as dss\n\n# %% Establish functions.\ndef AR_targets(model_dir):\n r\"\"\"\n Summary\n -------\n Function to generate temperature targets for American River HEC5Q model.\n\n \"\"\"\n # Read target tables from AmerR_Temp_Sel_Tool_rev15_APP_FINAL_3-16-15.xlsm.\n table_path = (r'Pre_Processor'\n + r'\\AmerR_Temp_Sel_Tool_rev15_APP_FINAL_3-16-15.xlsm')\n table_path = os.path.join(model_dir, table_path)\n col_names = ['Storage Plus Inflow (TAF)'] + list(range(1, 13))\n sheetname = 'Input Schedules Selected'\n F_targ = pd.read_excel(table_path, sheet_name=sheetname, header=None,\n names=col_names, index_col=0, usecols='D:P',\n skiprows=list(range(9)), nrows=23)\n W_targ = pd.read_excel(table_path, sheet_name=sheetname, header=None,\n names=col_names, index_col=0, usecols='D:P',\n skiprows=list(range(37)), nrows=23)\n F_targ.columns.set_names('Calendar Month Number', inplace=True)\n W_targ.columns.set_names('Calendar Month Number', inplace=True)\n # Read I300, I8, and S8 from CalSimII SV & DV file.\n # ???: Spreadsheet also queries C301, but it looks like it is not used; why\n # is C301 needed?\n # <JAS 2019-04-16>\n cdate = '31Oct1921'\n ctime = '2400'\n nvalsi = 984\n DateTime = pd.date_range(start='1921-10-31', end='2003-09-30', freq='M')\n Folsom = pd.DataFrame(index=DateTime)\n SV = [r'/CALSIM/I300/FLOW-INFLOW//1MON/2020D09E/',\n r'/CALSIM/I8/FLOW-INFLOW//1MON/2020D09E/']\n DV = [r'/CALSIM/S8/STORAGE//1MON/2020D09E/']\n fpSV = os.path.join(model_dir, r'Pre_Processor\\2020D09ESV.dss')\n ifltab_SV = dss.open_dss(fpSV)[0]\n dss_rtn = dss.read_regtsd(ifltab_SV, SV[0], cdate, ctime, nvalsi)\n Folsom['I300'] = dss_rtn[1]\n dss_rtn = dss.read_regtsd(ifltab_SV, SV[1], cdate, ctime, nvalsi)\n Folsom['I8'] = dss_rtn[1]\n dss.close_dss(ifltab_SV)\n fpDV = os.path.join(model_dir, r'Pre_Processor\\2020D09EDV.dss')\n ifltab_DV = dss.open_dss(fpDV)[0]\n dss_rtn = dss.read_regtsd(ifltab_DV, DV[0], cdate, ctime, nvalsi)\n Folsom['S8'] = dss_rtn[1]\n dss.close_dss(ifltab_DV)\n # Sum S8 End of May Storage and June through September inflow (I8 & I300).\n cfs2taf_I300 = lambda x: x['I300'] * 86400 * x.name.day / 43560 / 1000\n Folsom['I300'] = Folsom.apply(cfs2taf_I300, axis=1)\n cfs2taf_I8 = lambda x: x['I8'] * 86400 * x.name.day / 43560 / 1000\n Folsom['I8'] = Folsom.apply(cfs2taf_I8, axis=1)\n Folsom = Folsom.iloc[::-1]\n Folsom['Storage'] = (Folsom['S8']\n + Folsom['I300'].shift(1).rolling(4).sum()\n + Folsom['I8'].shift(1).rolling(4).sum())\n Folsom = Folsom.iloc[::-1]\n Folsom = Folsom.loc[Folsom.index.month == 5, :]\n # Re-index monthly series with each month equal to its year's May value.\n Folsom = Folsom.shift(7, freq='M')\n Folsom = Folsom.reindex(pd.date_range(start='1921-10-31',\n end='2003-09-30',\n freq='M'),\n method='bfill')\n # Lookup temperature target based on summed volume.\n W_target = lambda x: W_targ.iloc[W_targ.index.get_loc(x['Storage'],\n method='ffill'),\n W_targ.columns.get_loc(x.name.month)]\n Folsom['Watt Target'] = Folsom.apply(W_target, axis=1)\n F_target = lambda x: F_targ.iloc[F_targ.index.get_loc(x['Storage'],\n method='ffill'),\n F_targ.columns.get_loc(x.name.month)]\n Folsom['Folsom Target'] = Folsom.apply(F_target, axis=1)\n # Re-index from monthly to daily series.\n Folsom = Folsom.reindex(pd.date_range(start='1921-10-01',\n end='2003-09-30',\n freq='D'),\n method='bfill')\n # Store daily series to CALSIMII_HEC5Q.dss.\n fpHEC5Q = os.path.join(model_dir, r'Pre_Processor\\CALSIMII_HEC5Q.dss')\n ifltab_HEC5Q = dss.open_dss(fpHEC5Q)[0]\n cpath = [r'/CALSIM_STOR/WATTAVE_PT/TARGET-F//1DAY/2020D09E-1/',\n r'/CALSIM_STOR/FOLSOM_PT/TARGET-F//1DAY/2020D09E-1/']\n cdate = '01Oct1921'\n cunits = 'DEGF'\n ctype = 'Per-aver'\n dss_storWatt = dss.write_regtsd(ifltab_HEC5Q, cpath[0], cdate, ctime,\n Folsom['Watt Target'].values, cunits,\n ctype)\n dss_storFlsm = dss.write_regtsd(ifltab_HEC5Q, cpath[1], cdate, ctime,\n Folsom['Folsom Target'].values, cunits,\n ctype)\n dss.close_dss(ifltab_HEC5Q)\n # Return success indicators.\n return (0, dss_storWatt, dss_storFlsm)\n\n\ndef SR_targets(model_dir):\n r\"\"\"\n Summary\n -------\n Function to generate temperature targets for Trinity/Sacramento River HEC5Q\n model.\n\n Notes\n -----\n 1. Need to determine how SacR_Temp_Sel_Tool_rev05_FULL_FINAL_3-3-15.xlsm\n fits into this process.\n\n \"\"\"\n # Get target table in SacR_Temp_Sel_Tool_rev05_APP_FINAL_3-3-15-16-15.xlsm.\n table_path = (r'Pre_Processor'\n + r'\\SacR_Temp_Sel_Tool_rev05_APP_FINAL_3-3-15.xlsm')\n table_path = os.path.join(model_dir, table_path)\n col_names = ['S4 EO Apr Storage (TAF)'] + list(range(1, 13))\n targ = pd.read_excel(table_path, sheet_name='Input Schedules Selected',\n header=None, names=col_names, index_col=0,\n usecols='E:Q', skiprows=list(range(7)), nrows=6)\n targ.columns.set_names('Calendar Month Number', inplace=True)\n # Read S4 from CalSimII DV file.\n # ???: Why are C5 and C109 queried for the excel spreadsheet? They do not\n # seem to be used.\n # <JAS 2019-04-17>\n cdate = '31Oct1921'\n ctime = '2400'\n nvalsi = 984\n DateTime = pd.date_range(start='1921-10-31', end='2003-09-30', freq='M')\n Shasta = pd.DataFrame(index=DateTime)\n DV = [r'/CALSIM/S_SHSTA/STORAGE//1MON/2020D09E/']\n fpDV = os.path.join(model_dir, r'Pre_Processor\\2020D09EDV.dss')\n ifltab_DV = dss.open_dss(fpDV)[0]\n dss_rtn = dss.read_regtsd(ifltab_DV, DV[0], cdate, ctime, nvalsi)\n Shasta['S4'] = dss_rtn[1]\n dss.close_dss(ifltab_DV)\n # Select only April Months.\n Shasta = Shasta.loc[Shasta.index.month == 4, :]\n # Re-index monthly series with each month equal to its year's April value.\n Shasta = Shasta.shift(8, freq='M')\n Shasta = Shasta.reindex(pd.date_range(start='1921-10-31', end='2003-09-30',\n freq='M'),\n method='bfill')\n # Lookup temperature target based on S4 End of April storage level.\n target = lambda x: targ.iloc[targ.index.get_loc(x['S4'], method='ffill'),\n targ.columns.get_loc(x.name.month)]\n Shasta['Target'] = Shasta.apply(target, axis=1)\n # Re-index from monthly to daily series.\n Shasta = Shasta.reindex(pd.date_range(start='1922-01-01', end='2003-09-30',\n freq='D'),\n method='bfill')\n # Store daily series to CALSIMII_HEC5Q.dss.\n fpHEC5Q = os.path.join(model_dir, r'Pre_Processor\\CALSIMII_HEC5Q.dss')\n ifltab_HEC5Q = dss.open_dss(fpHEC5Q)[0]\n cpath = [r'/CALSIM_STOR/Shasta_PT/TARGET-F//1DAY/2020D09E-1/']\n cdate = '01Jan1922'\n cunits = 'DEGF'\n ctype = 'Per-aver'\n dss_stor = dss.write_regtsd(ifltab_HEC5Q, cpath[0], cdate, ctime,\n Shasta['Target'].values, cunits, ctype)\n dss.close_dss(ifltab_HEC5Q)\n # Return success indicators.\n return (0, dss_stor)\n\n\ndef HEC5Q_protocol(SV, DV, study='NAA', watershed='SR', climate='Q5',\n force_delete=False, safe_mode=True):\n r\"\"\"\n No documentation as of 2019-04-12.\n\n Notes\n -----\n 1. Select `climate` scenario (i.e. q1, q2, q3, q4, or q5); string is not\n case sensitive.\n 2. Select one of the following watersheds (string is not case sensitive):\n - 'AR' = American River\n - 'StanR' = Stanislaus River\n - 'SR' = Trinity & Sacramento Rivers\n\n \"\"\"\n # Check inputs.\n if watershed.upper() not in ['AR', 'STANR', 'SR']:\n err_msg = watershed + ' is not a valid value for `watershed`.'\n raise ValueError(err_msg)\n # Stylize `watershed`.\n if watershed.upper() == 'STANR':\n watershed = 'StanR'\n # Check to ensure that `climate` is 'q5', per HEC5Q_Toolkit_102315_v21.\n if climate.lower() != 'q5':\n err_msg = 'Cannot run StanR model with climate other than Q5.'\n raise ValueError(err_msg)\n else:\n watershed = watershed.upper()\n # Change to HEC5Q directory.\n cwd = os.getcwd()\n this_dir = os.path.dirname(os.path.abspath(__file__))\n os.chdir(this_dir)\n # Select HEC5Q_Toolkit directory.\n tool_map = {'AR': 'American_River',\n 'StanR': 'Stanislaus_River',\n 'SR': 'Trinity_Sacramento_Rivers'}\n tool_dir = tool_map[watershed]\n # Set model directory.\n model_name = '{}_HEC5Q_{}'.format(watershed, climate.upper())\n study_dir = os.path.abspath(os.path.join(r'./', study))\n model_dir = os.path.join(study_dir, model_name)\n # Check existence of model directory.\n dir_exists = os.path.isdir(model_dir)\n # If model directory exists, ...\n if dir_exists:\n # ...force delete and replace model directory without user input, or...\n if force_delete and not safe_mode:\n shutil.rmtree(model_dir)\n del_msg = 'Deleted content of existing model directory {}.'\n print(del_msg.format(model_dir))\n # NOTE: PermissionError raised if user had `model_dir` open in\n # Windows File Explorer when this code was executed.\n # <JAS 2019-04-15>\n os.mkdir(model_dir)\n # ...prompt user to allow deletion and replacement of model directory.\n else:\n if force_delete and safe_mode:\n print('`safe_mode` is True.')\n prompt = 'Delete contents of existing model directory {}? Y/[N]: '\n ans = input(prompt.format(model_dir))\n if ans == 'Y':\n shutil.rmtree(model_dir)\n del_msg = 'Deleted content of existing model directory {}.'\n print(del_msg.format(model_dir))\n # NOTE: PermissionError raised if user had `model_dir` open in\n # Windows File Explorer when this code was executed.\n # <JAS 2019-04-15>\n os.mkdir(model_dir)\n else:\n err_msg = 'Permission Denied: Cannot delete directory.'\n raise ValueError(err_msg)\n # If model directory does not exist, create directory.\n else:\n print('Directory does not exist. Created new directory.')\n if not os.path.isdir(study_dir):\n os.mkdir(study_dir)\n os.mkdir(model_dir)\n # Copy setup_{watershed}_temp_run.bat from .\\_Tools\\HEC5Q_Toolkit.\n setup_bat = 'setup_{}_temp_run.bat'.format(watershed)\n setup_bat = os.path.join(model_dir, setup_bat)\n setup_src = (r'.\\_Tools\\HEC5Q_Toolkit\\{}'.format(tool_dir)\n + r'\\Common\\setup_{}_temp_run.bat'.format(watershed))\n setup_src = os.path.abspath(setup_src)\n shutil.copyfile(setup_src, setup_bat)\n # Run setup_{watershed}_temp_run.bat with input `climate`.\n run_setup = sb.run(setup_bat, cwd=model_dir, input=climate.lower(),\n encoding='utf-8', stdout=sb.PIPE)\n if run_setup.returncode == 0:\n print(r'Copied model files from .\\_Tools\\HEC5Q_Toolkit.')\n else:\n err_msg = 'Setup process interrupted.'\n print(err_msg)\n return run_setup.returncode\n # Copy CalSimII SV and DV files to {model_dir}\\Pre_Processor.\n input_data = os.path.join(model_dir, 'Pre_Processor')\n shutil.copyfile(SV, os.path.join(input_data, '2020D09ESV.dss'))\n shutil.copyfile(DV, os.path.join(input_data, '2020D09EDV.dss'))\n print('Added CalSim files to Pre_Processor folder.')\n # Run process_calsim_temp_inputs.bat.\n inputs_bat = os.path.join(model_dir, 'process_calsim_temp_inputs.bat')\n print('Processing CalSim temperature inputs...')\n process_input = sb.run(inputs_bat, cwd=model_dir, encoding='utf-8',\n creationflags=sb.CREATE_NEW_CONSOLE)\n if process_input.returncode == 0:\n print('Processed CalSim temperature inputs.')\n else:\n err_msg = 'Input processing interrupted.'\n print(err_msg)\n return process_input.returncode\n # Store temperature target schedules in input file.\n if watershed == 'AR':\n AR_targets(model_dir)\n elif watershed == 'SR':\n SR_targets(model_dir)\n # Run run_{watershed}_temp_model.bat.\n run_bat = 'run_{}_temp_model.bat'.format(watershed)\n run_bat = os.path.join(model_dir, run_bat)\n print('Running temperature model...')\n run_model = sb.run(run_bat, cwd=model_dir, encoding='utf-8',\n creationflags=sb.CREATE_NEW_CONSOLE)\n if run_model.returncode == 0:\n print('Temperature model run complete!')\n else:\n err_msg = 'Temperature model process interrupted.'\n print(err_msg)\n return run_model.returncode\n # Switch back to original directory.\n os.chdir(cwd)\n # Print message to console and return success indicator.\n print('HEC5Q Subprocess Complete!')\n return 0\n\n\n# %% Execute script.\nif __name__ == '__main__':\n SV = os.path.abspath('./data/CalSim3-Base/2020D09ESV_3.dss')\n DV = os.path.abspath('./data/CalSim3-Base/2020D09EDV_3.dss')\n HEC5Q_protocol(SV, DV, study='CalSim3-Base', watershed='sr', climate='Q0')\n msg = 'This script is complete!'\n print(msg)\n"
]
| [
[
"pandas.DataFrame",
"pandas.date_range"
]
]
|
johnzero7/blender-plugin | [
"e8448b0ef7d4d64d75db5b262511f2e2bf9b7ef4"
]
| [
"addons/io_sketchfab_plugin/blender/imp/gltf2_blender_mesh.py"
]
| [
"# Copyright 2018-2021 The glTF-Blender-IO authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bpy\nfrom mathutils import Matrix\nimport numpy as np\n\nfrom ...io.imp.gltf2_io_binary import BinaryData\nfrom ..com.gltf2_blender_extras import set_extras\nfrom .gltf2_blender_material import BlenderMaterial\nfrom ...io.com.gltf2_io_debug import print_console\nfrom .gltf2_io_draco_compression_extension import decode_primitive\n\n\nclass BlenderMesh():\n \"\"\"Blender Mesh.\"\"\"\n def __new__(cls, *args, **kwargs):\n raise RuntimeError(\"%s should not be instantiated\" % cls)\n\n @staticmethod\n def create(gltf, mesh_idx, skin_idx):\n \"\"\"Mesh creation.\"\"\"\n return create_mesh(gltf, mesh_idx, skin_idx)\n\n\n# Maximum number of TEXCOORD_n/COLOR_n sets to import\nUV_MAX = 8\nCOLOR_MAX = 8\n\n\ndef create_mesh(gltf, mesh_idx, skin_idx):\n pymesh = gltf.data.meshes[mesh_idx]\n name = pymesh.name or 'Mesh_%d' % mesh_idx\n mesh = bpy.data.meshes.new(name)\n\n # Temporarily parent the mesh to an object.\n # This is used to set skin weights and shapekeys.\n tmp_ob = None\n try:\n tmp_ob = bpy.data.objects.new('##gltf-import:tmp-object##', mesh)\n do_primitives(gltf, mesh_idx, skin_idx, mesh, tmp_ob)\n set_extras(mesh, gltf.data.meshes[mesh_idx].extras, exclude=['targetNames'])\n\n finally:\n if tmp_ob:\n bpy.data.objects.remove(tmp_ob)\n\n return mesh\n\n\ndef do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):\n \"\"\"Put all primitive data into the mesh.\"\"\"\n pymesh = gltf.data.meshes[mesh_idx]\n\n # Scan the primitives to find out what we need to create\n\n has_normals = False\n num_uvs = 0\n num_cols = 0\n num_joint_sets = 0\n for prim in pymesh.primitives:\n if 'POSITION' not in prim.attributes:\n continue\n\n if 'NORMAL' in prim.attributes:\n has_normals = True\n\n if skin_idx is not None:\n i = 0\n while ('JOINTS_%d' % i) in prim.attributes and \\\n ('WEIGHTS_%d' % i) in prim.attributes:\n i += 1\n num_joint_sets = max(i, num_joint_sets)\n\n i = 0\n while i < UV_MAX and ('TEXCOORD_%d' % i) in prim.attributes: i += 1\n num_uvs = max(i, num_uvs)\n\n i = 0\n while i < COLOR_MAX and ('COLOR_%d' % i) in prim.attributes: i += 1\n num_cols = max(i, num_cols)\n\n num_shapekeys = 0\n if len(pymesh.primitives) > 0: # Empty primitive tab is not allowed, but some invalid files...\n for morph_i, _ in enumerate(pymesh.primitives[0].targets or []):\n if pymesh.shapekey_names[morph_i] is not None:\n num_shapekeys += 1\n\n # -------------\n # We'll process all the primitives gathering arrays to feed into the\n # various foreach_set function that create the mesh data.\n\n num_faces = 0 # total number of faces\n vert_locs = np.empty(dtype=np.float32, shape=(0,3)) # coordinate for each vert\n vert_normals = np.empty(dtype=np.float32, shape=(0,3)) # normal for each vert\n edge_vidxs = np.array([], dtype=np.uint32) # vertex_index for each loose edge\n loop_vidxs = np.array([], dtype=np.uint32) # vertex_index for each loop\n loop_uvs = [\n np.empty(dtype=np.float32, shape=(0,2)) # UV for each loop for each layer\n for _ in range(num_uvs)\n ]\n loop_cols = [\n np.empty(dtype=np.float32, shape=(0,4)) # color for each loop for each layer\n for _ in range(num_cols)\n ]\n vert_joints = [\n np.empty(dtype=np.uint32, shape=(0,4)) # 4 joints for each vert for each set\n for _ in range(num_joint_sets)\n ]\n vert_weights = [\n np.empty(dtype=np.float32, shape=(0,4)) # 4 weights for each vert for each set\n for _ in range(num_joint_sets)\n ]\n sk_vert_locs = [\n np.empty(dtype=np.float32, shape=(0,3)) # coordinate for each vert for each shapekey\n for _ in range(num_shapekeys)\n ]\n\n for prim in pymesh.primitives:\n prim.num_faces = 0\n\n if 'POSITION' not in prim.attributes:\n continue\n\n vert_index_base = len(vert_locs)\n\n if prim.extensions is not None and 'KHR_draco_mesh_compression' in prim.extensions:\n print_console('INFO', 'Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]'))\n decode_primitive(gltf, prim)\n\n if prim.indices is not None:\n indices = BinaryData.decode_accessor(gltf, prim.indices)\n indices = indices.reshape(len(indices))\n else:\n num_verts = gltf.data.accessors[prim.attributes['POSITION']].count\n indices = np.arange(0, num_verts, dtype=np.uint32)\n\n mode = 4 if prim.mode is None else prim.mode\n points, edges, tris = points_edges_tris(mode, indices)\n if points is not None:\n indices = points\n elif edges is not None:\n indices = edges\n else:\n indices = tris\n\n # We'll add one vert to the arrays for each index used in indices\n unique_indices, inv_indices = np.unique(indices, return_inverse=True)\n\n vs = BinaryData.decode_accessor(gltf, prim.attributes['POSITION'], cache=True)\n vert_locs = np.concatenate((vert_locs, vs[unique_indices]))\n\n if has_normals:\n if 'NORMAL' in prim.attributes:\n ns = BinaryData.decode_accessor(gltf, prim.attributes['NORMAL'], cache=True)\n ns = ns[unique_indices]\n else:\n ns = np.zeros((len(unique_indices), 3), dtype=np.float32)\n vert_normals = np.concatenate((vert_normals, ns))\n\n for i in range(num_joint_sets):\n if ('JOINTS_%d' % i) in prim.attributes and ('WEIGHTS_%d' % i) in prim.attributes:\n js = BinaryData.decode_accessor(gltf, prim.attributes['JOINTS_%d' % i], cache=True)\n ws = BinaryData.decode_accessor(gltf, prim.attributes['WEIGHTS_%d' % i], cache=True)\n js = js[unique_indices]\n ws = ws[unique_indices]\n else:\n js = np.zeros((len(unique_indices), 4), dtype=np.uint32)\n ws = np.zeros((len(unique_indices), 4), dtype=np.float32)\n vert_joints[i] = np.concatenate((vert_joints[i], js))\n vert_weights[i] = np.concatenate((vert_weights[i], ws))\n\n for morph_i, target in enumerate(prim.targets or []):\n if pymesh.shapekey_names[morph_i] is None:\n continue\n morph_vs = BinaryData.decode_accessor(gltf, target['POSITION'], cache=True)\n morph_vs = morph_vs[unique_indices]\n sk_vert_locs[morph_i] = np.concatenate((sk_vert_locs[morph_i], morph_vs))\n\n # inv_indices are the indices into the verts just for this prim;\n # calculate indices into the overall verts array\n prim_vidxs = inv_indices.astype(np.uint32, copy=False)\n prim_vidxs += vert_index_base # offset for verts from previous prims\n\n if edges is not None:\n edge_vidxs = np.concatenate((edge_vidxs, prim_vidxs))\n\n if tris is not None:\n prim.num_faces = len(indices) // 3\n num_faces += prim.num_faces\n\n loop_vidxs = np.concatenate((loop_vidxs, prim_vidxs))\n\n for uv_i in range(num_uvs):\n if ('TEXCOORD_%d' % uv_i) in prim.attributes:\n uvs = BinaryData.decode_accessor(gltf, prim.attributes['TEXCOORD_%d' % uv_i], cache=True)\n uvs = uvs[indices]\n else:\n uvs = np.zeros((len(indices), 2), dtype=np.float32)\n loop_uvs[uv_i] = np.concatenate((loop_uvs[uv_i], uvs))\n\n for col_i in range(num_cols):\n if ('COLOR_%d' % col_i) in prim.attributes:\n cols = BinaryData.decode_accessor(gltf, prim.attributes['COLOR_%d' % col_i], cache=True)\n cols = cols[indices]\n if cols.shape[1] == 3:\n cols = colors_rgb_to_rgba(cols)\n else:\n cols = np.ones((len(indices), 4), dtype=np.float32)\n loop_cols[col_i] = np.concatenate((loop_cols[col_i], cols))\n\n # Accessors are cached in case they are shared between primitives; clear\n # the cache now that all prims are done.\n gltf.decode_accessor_cache = {}\n vert_locs, vert_normals, vert_joints, vert_weights, \\\n sk_vert_locs, loop_vidxs, edge_vidxs = \\\n merge_duplicate_verts(\n vert_locs, vert_normals, vert_joints, vert_weights, \\\n sk_vert_locs, loop_vidxs, edge_vidxs\\\n )\n\n # ---------------\n # Convert all the arrays glTF -> Blender\n\n # Change from relative to absolute positions for morph locs\n for sk_locs in sk_vert_locs:\n sk_locs += vert_locs\n\n gltf.locs_batch_gltf_to_blender(vert_locs)\n gltf.normals_batch_gltf_to_blender(vert_normals)\n for sk_locs in sk_vert_locs:\n gltf.locs_batch_gltf_to_blender(sk_locs)\n\n if num_joint_sets:\n skin_into_bind_pose(\n gltf, skin_idx, vert_joints, vert_weights,\n locs=[vert_locs] + sk_vert_locs,\n vert_normals=vert_normals,\n )\n\n for uvs in loop_uvs:\n uvs_gltf_to_blender(uvs)\n\n for cols in loop_cols:\n colors_linear_to_srgb(cols[:, :-1])\n\n # ---------------\n # Start creating things\n\n mesh.vertices.add(len(vert_locs))\n mesh.vertices.foreach_set('co', squish(vert_locs))\n\n mesh.loops.add(len(loop_vidxs))\n mesh.loops.foreach_set('vertex_index', loop_vidxs)\n\n mesh.edges.add(len(edge_vidxs) // 2)\n mesh.edges.foreach_set('vertices', edge_vidxs)\n\n mesh.polygons.add(num_faces)\n\n # All polys are tris\n loop_starts = np.arange(0, 3 * num_faces, step=3)\n loop_totals = np.full(num_faces, 3)\n mesh.polygons.foreach_set('loop_start', loop_starts)\n mesh.polygons.foreach_set('loop_total', loop_totals)\n\n for uv_i in range(num_uvs):\n name = 'UVMap' if uv_i == 0 else 'UVMap.%03d' % uv_i\n layer = mesh.uv_layers.new(name=name)\n\n if layer is None:\n print(\"WARNING: UV map is ignored because the maximum number of UV layers has been reached.\")\n break\n\n layer.data.foreach_set('uv', squish(loop_uvs[uv_i]))\n\n for col_i in range(num_cols):\n name = 'Col' if col_i == 0 else 'Col.%03d' % col_i\n layer = mesh.vertex_colors.new(name=name)\n\n if layer is None:\n print(\"WARNING: Vertex colors are ignored because the maximum number of vertex color layers has been \"\n \"reached.\")\n break\n\n layer.data.foreach_set('color', squish(loop_cols[col_i]))\n\n # Skinning\n # TODO: this is slow :/\n if num_joint_sets:\n pyskin = gltf.data.skins[skin_idx]\n for i, _ in enumerate(pyskin.joints):\n # ob is a temp object, so don't worry about the name.\n ob.vertex_groups.new(name='X%d' % i)\n\n vgs = list(ob.vertex_groups)\n\n for i in range(num_joint_sets):\n js = vert_joints[i].tolist() # tolist() is faster\n ws = vert_weights[i].tolist()\n for vi in range(len(vert_locs)):\n w0, w1, w2, w3 = ws[vi]\n j0, j1, j2, j3 = js[vi]\n if w0 != 0: vgs[j0].add((vi,), w0, 'REPLACE')\n if w1 != 0: vgs[j1].add((vi,), w1, 'REPLACE')\n if w2 != 0: vgs[j2].add((vi,), w2, 'REPLACE')\n if w3 != 0: vgs[j3].add((vi,), w3, 'REPLACE')\n\n # Shapekeys\n if num_shapekeys:\n ob.shape_key_add(name='Basis')\n mesh.shape_keys.name = mesh.name\n\n sk_i = 0\n for sk_name in pymesh.shapekey_names:\n if sk_name is None:\n continue\n\n ob.shape_key_add(name=sk_name)\n key_block = mesh.shape_keys.key_blocks[sk_name]\n key_block.data.foreach_set('co', squish(sk_vert_locs[sk_i]))\n\n sk_i += 1\n\n # ----\n # Assign materials to faces\n has_materials = any(prim.material is not None for prim in pymesh.primitives)\n if has_materials:\n material_indices = np.empty(num_faces, dtype=np.uint32)\n empty_material_slot_index = None\n f = 0\n\n for prim in pymesh.primitives:\n if prim.material is not None:\n # Get the material\n pymaterial = gltf.data.materials[prim.material]\n vertex_color = 'COLOR_0' if 'COLOR_0' in prim.attributes else None\n if vertex_color not in pymaterial.blender_material:\n BlenderMaterial.create(gltf, prim.material, vertex_color)\n material_name = pymaterial.blender_material[vertex_color]\n\n # Put material in slot (if not there)\n if material_name not in mesh.materials:\n mesh.materials.append(bpy.data.materials[material_name])\n material_index = mesh.materials.find(material_name)\n else:\n if empty_material_slot_index is None:\n mesh.materials.append(None)\n empty_material_slot_index = len(mesh.materials) - 1\n material_index = empty_material_slot_index\n\n material_indices[f:f + prim.num_faces].fill(material_index)\n\n f += prim.num_faces\n\n mesh.polygons.foreach_set('material_index', material_indices)\n\n # ----\n # Normals\n\n # Set polys smooth/flat\n set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs)\n\n mesh.validate()\n has_loose_edges = len(edge_vidxs) != 0 # need to calc_loose_edges for them to show up\n mesh.update(calc_edges_loose=has_loose_edges)\n\n if has_normals:\n mesh.create_normals_split()\n mesh.normals_split_custom_set_from_vertices(vert_normals)\n mesh.use_auto_smooth = True\n\n\ndef points_edges_tris(mode, indices):\n points = None\n edges = None\n tris = None\n\n if mode == 0:\n # POINTS\n points = indices\n\n elif mode == 1:\n # LINES\n # 1 3\n # / /\n # 0 2\n edges = indices\n\n elif mode == 2:\n # LINE LOOP\n # 1---2\n # / \\\n # 0-------3\n # in: 0123\n # out: 01122330\n edges = np.empty(2 * len(indices), dtype=np.uint32)\n edges[[0, -1]] = indices[[0, 0]] # 0______0\n edges[1:-1] = np.repeat(indices[1:], 2) # 01122330\n\n elif mode == 3:\n # LINE STRIP\n # 1---2\n # / \\\n # 0 3\n # in: 0123\n # out: 011223\n edges = np.empty(2 * len(indices) - 2, dtype=np.uint32)\n edges[[0, -1]] = indices[[0, -1]] # 0____3\n edges[1:-1] = np.repeat(indices[1:-1], 2) # 011223\n\n elif mode == 4:\n # TRIANGLES\n # 2 3\n # / \\ / \\\n # 0---1 4---5\n tris = indices\n\n elif mode == 5:\n # TRIANGLE STRIP\n # 0---2---4\n # \\ / \\ /\n # 1---3\n # TODO: numpyify\n def alternate(i, xs):\n even = i % 2 == 0\n return xs if even else (xs[0], xs[2], xs[1])\n tris = np.array([\n alternate(i, (indices[i], indices[i + 1], indices[i + 2]))\n for i in range(0, len(indices) - 2)\n ])\n tris = squish(tris)\n\n elif mode == 6:\n # TRIANGLE FAN\n # 3---2\n # / \\ / \\\n # 4---0---1\n # TODO: numpyify\n tris = np.array([\n (indices[0], indices[i], indices[i + 1])\n for i in range(1, len(indices) - 1)\n ])\n tris = squish(tris)\n\n else:\n raise Exception('primitive mode unimplemented: %d' % mode)\n\n return points, edges, tris\n\n\ndef squish(array):\n \"\"\"Squish nD array into 1D array (required by foreach_set).\"\"\"\n return array.reshape(array.size)\n\n\ndef colors_rgb_to_rgba(rgb):\n rgba = np.ones((len(rgb), 4), dtype=np.float32)\n rgba[:, :3] = rgb\n return rgba\n\n\ndef colors_linear_to_srgb(color):\n assert color.shape[1] == 3 # only change RGB, not A\n\n not_small = color >= 0.0031308\n small_result = np.where(color < 0.0, 0.0, color * 12.92)\n large_result = 1.055 * np.power(color, 1.0 / 2.4, where=not_small) - 0.055\n color[:] = np.where(not_small, large_result, small_result)\n\n\ndef uvs_gltf_to_blender(uvs):\n # u,v -> u,1-v\n uvs[:, 1] *= -1\n uvs[:, 1] += 1\n\n\ndef skin_into_bind_pose(gltf, skin_idx, vert_joints, vert_weights, locs, vert_normals):\n # Skin each position/normal using the bind pose.\n # Skinning equation: vert' = sum_(j,w) w * joint_mat[j] * vert\n # where the sum is over all (joint,weight) pairs.\n\n # Calculate joint matrices\n joint_mats = []\n pyskin = gltf.data.skins[skin_idx]\n if pyskin.inverse_bind_matrices is not None:\n inv_binds = BinaryData.get_data_from_accessor(gltf, pyskin.inverse_bind_matrices)\n inv_binds = [gltf.matrix_gltf_to_blender(m) for m in inv_binds]\n else:\n inv_binds = [Matrix.Identity(4) for i in range(len(pyskin.joints))]\n bind_mats = [gltf.vnodes[joint].bind_arma_mat for joint in pyskin.joints]\n joint_mats = [bind_mat @ inv_bind for bind_mat, inv_bind in zip(bind_mats, inv_binds)]\n\n # TODO: check if joint_mats are all (approximately) 1, and skip skinning\n\n joint_mats = np.array(joint_mats, dtype=np.float32)\n\n # Compute the skinning matrices for every vert\n num_verts = len(locs[0])\n skinning_mats = np.zeros((num_verts, 4, 4), dtype=np.float32)\n weight_sums = np.zeros(num_verts, dtype=np.float32)\n for js, ws in zip(vert_joints, vert_weights):\n for i in range(4):\n skinning_mats += ws[:, i].reshape(len(ws), 1, 1) * joint_mats[js[:, i]]\n weight_sums += ws[:, i]\n # Normalize weights to one; necessary for old files / quantized weights\n skinning_mats /= weight_sums.reshape(num_verts, 1, 1)\n\n skinning_mats_3x3 = skinning_mats[:, :3, :3]\n skinning_trans = skinning_mats[:, :3, 3]\n\n for vs in locs:\n vs[:] = mul_mats_vecs(skinning_mats_3x3, vs)\n vs[:] += skinning_trans\n\n if len(vert_normals) != 0:\n vert_normals[:] = mul_mats_vecs(skinning_mats_3x3, vert_normals)\n # Don't translate normals!\n normalize_vecs(vert_normals)\n\n\ndef mul_mats_vecs(mats, vecs):\n \"\"\"Given [m1,m2,...] and [v1,v2,...], returns [m1@v1,m2@v2,...]. 3D only.\"\"\"\n return np.matmul(mats, vecs.reshape(len(vecs), 3, 1)).reshape(len(vecs), 3)\n\n\ndef normalize_vecs(vectors):\n norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n np.divide(vectors, norms, out=vectors, where=norms != 0)\n\n\ndef set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs):\n num_polys = len(mesh.polygons)\n\n # Try to guess which polys should be flat based on the fact that all the\n # loop normals for a flat poly are = the poly's normal.\n\n poly_smooths = np.empty(num_polys, dtype=np.bool)\n\n poly_normals = np.empty(num_polys * 3, dtype=np.float32)\n mesh.polygons.foreach_get('normal', poly_normals)\n poly_normals = poly_normals.reshape(num_polys, 3)\n\n f = 0\n for prim in pymesh.primitives:\n if 'NORMAL' not in prim.attributes:\n # Primitives with no NORMALs should use flat shading\n poly_smooths[f:f + prim.num_faces].fill(False)\n f += prim.num_faces\n continue\n\n # Check the normals at the three corners against the poly normal.\n # Two normals are equal iff their dot product is 1.\n\n poly_ns = poly_normals[f:f + prim.num_faces]\n\n # Dot product against the first vertex normal in the tri\n vert_ns = vert_normals[loop_vidxs[3*f:3*(f + prim.num_faces):3]]\n dot_prods = np.sum(vert_ns * poly_ns, axis=1) # dot product\n smooth = (dot_prods <= 0.9999999)\n\n # Same for the second vertex, etc.\n vert_ns = vert_normals[loop_vidxs[3*f+1:3*(f + prim.num_faces):3]]\n dot_prods = np.sum(vert_ns * poly_ns, axis=1)\n np.logical_or(smooth, dot_prods <= 0.9999999, out=smooth)\n\n vert_ns = vert_normals[loop_vidxs[3*f+2:3*(f + prim.num_faces):3]]\n dot_prods = np.sum(vert_ns * poly_ns, axis=1)\n np.logical_or(smooth, dot_prods <= 0.9999999, out=smooth)\n\n poly_smooths[f:f + prim.num_faces] = smooth\n\n f += prim.num_faces\n\n mesh.polygons.foreach_set('use_smooth', poly_smooths)\n\n\ndef merge_duplicate_verts(vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs):\n # This function attempts to invert the splitting done when exporting to\n # glTF. Welds together verts with the same per-vert data (but possibly\n # different per-loop data).\n #\n # Ideally normals would be treated as per-loop data, but that has problems,\n # so we currently treat the normal as per-vert.\n #\n # Strategy is simple: put all the per-vert data into an array of structs\n # (\"dots\"), dedupe with np.unique, then take all the data back out.\n\n # Very often two verts that \"morally\" should be merged will have normals\n # with very small differences. Round off the normals to smooth this over.\n if len(vert_normals) != 0:\n vert_normals *= 50000\n vert_normals[:] = np.trunc(vert_normals)\n vert_normals *= (1/50000)\n\n dot_fields = [('x', np.float32), ('y', np.float32), ('z', np.float32)]\n if len(vert_normals) != 0:\n dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)]\n for i, _ in enumerate(vert_joints):\n dot_fields += [\n ('joint%dx' % i, np.uint32), ('joint%dy' % i, np.uint32),\n ('joint%dz' % i, np.uint32), ('joint%dw' % i, np.uint32),\n ('weight%dx' % i, np.float32), ('weight%dy' % i, np.float32),\n ('weight%dz' % i, np.float32), ('weight%dw' % i, np.float32),\n ]\n for i, _ in enumerate(sk_vert_locs):\n dot_fields += [\n ('sk%dx' % i, np.float32), ('sk%dy' % i, np.float32), ('sk%dz' % i, np.float32),\n ]\n dots = np.empty(len(vert_locs), dtype=np.dtype(dot_fields))\n\n dots['x'] = vert_locs[:, 0]\n dots['y'] = vert_locs[:, 1]\n dots['z'] = vert_locs[:, 2]\n if len(vert_normals) != 0:\n dots['nx'] = vert_normals[:, 0]\n dots['ny'] = vert_normals[:, 1]\n dots['nz'] = vert_normals[:, 2]\n for i, (joints, weights) in enumerate(zip(vert_joints, vert_weights)):\n dots['joint%dx' % i] = joints[:, 0]\n dots['joint%dy' % i] = joints[:, 1]\n dots['joint%dz' % i] = joints[:, 2]\n dots['joint%dw' % i] = joints[:, 3]\n dots['weight%dx' % i] = weights[:, 0]\n dots['weight%dy' % i] = weights[:, 1]\n dots['weight%dz' % i] = weights[:, 2]\n dots['weight%dw' % i] = weights[:, 3]\n for i, locs in enumerate(sk_vert_locs):\n dots['sk%dx' % i] = locs[:, 0]\n dots['sk%dy' % i] = locs[:, 1]\n dots['sk%dz' % i] = locs[:, 2]\n\n unique_dots, inv_indices = np.unique(dots, return_inverse=True)\n\n loop_vidxs = inv_indices[loop_vidxs]\n edge_vidxs = inv_indices[edge_vidxs]\n\n vert_locs = np.empty((len(unique_dots), 3), dtype=np.float32)\n vert_locs[:, 0] = unique_dots['x']\n vert_locs[:, 1] = unique_dots['y']\n vert_locs[:, 2] = unique_dots['z']\n if len(vert_normals) != 0:\n vert_normals = np.empty((len(unique_dots), 3), dtype=np.float32)\n vert_normals[:, 0] = unique_dots['nx']\n vert_normals[:, 1] = unique_dots['ny']\n vert_normals[:, 2] = unique_dots['nz']\n for i in range(len(vert_joints)):\n vert_joints[i] = np.empty((len(unique_dots), 4), dtype=np.uint32)\n vert_joints[i][:, 0] = unique_dots['joint%dx' % i]\n vert_joints[i][:, 1] = unique_dots['joint%dy' % i]\n vert_joints[i][:, 2] = unique_dots['joint%dz' % i]\n vert_joints[i][:, 3] = unique_dots['joint%dw' % i]\n vert_weights[i] = np.empty((len(unique_dots), 4), dtype=np.float32)\n vert_weights[i][:, 0] = unique_dots['weight%dx' % i]\n vert_weights[i][:, 1] = unique_dots['weight%dy' % i]\n vert_weights[i][:, 2] = unique_dots['weight%dz' % i]\n vert_weights[i][:, 3] = unique_dots['weight%dw' % i]\n for i in range(len(sk_vert_locs)):\n sk_vert_locs[i] = np.empty((len(unique_dots), 3), dtype=np.float32)\n sk_vert_locs[i][:, 0] = unique_dots['sk%dx' % i]\n sk_vert_locs[i][:, 1] = unique_dots['sk%dy' % i]\n sk_vert_locs[i][:, 2] = unique_dots['sk%dz' % i]\n\n return vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs\n"
]
| [
[
"numpy.trunc",
"numpy.unique",
"numpy.power",
"numpy.arange",
"numpy.divide",
"numpy.linalg.norm",
"numpy.dtype",
"numpy.full",
"numpy.concatenate",
"numpy.logical_or",
"numpy.zeros",
"numpy.repeat",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.empty"
]
]
|
LucasFDutra/engenharia-eletrica | [
"33e21910aea4b2a839dd374ab493fb89a80edfb8"
]
| [
"Algoritmo Multi-Agente Aplicado a Entrega e Coleta de Objetos/traveling_Q.py"
]
| [
"import numpy as np\n\ndef traveling_Q(Q_, sequences, dim_env, n_repositories, n_agents):\n Q = Q_.copy()\n states_x = np.arange(n_agents, dtype = int)\n states_y = np.zeros(n_agents, dtype = int)\n\n steps = [states_x]\n\n for choice_order in sequences:\n for i in choice_order:\n i = int(i)\n y_index = np.where(Q[states_x[i]] >= np.max(Q[states_x[i]]))[0]\n if y_index[0] == 0:\n y_index = 0\n elif y_index.shape[0] > 1:\n y_index = int(np.random.choice(y_index, 1))\n else:\n y_index = int(y_index)\n\n states_y[i] = y_index\n\n if states_y[i] >= (dim_env-n_repositories):\n Q[states_x[i], states_y[i]] = 0\n else:\n Q[:,states_y[i]] = 0\n\n states_x = states_y.copy()\n steps.append(states_x)\n states_y = np.zeros(n_agents, dtype=int)\n return steps\n"
]
| [
[
"numpy.arange",
"numpy.max",
"numpy.zeros",
"numpy.random.choice"
]
]
|
Irvinfaith/numpy_neural_network | [
"46c86884611d0174e6ab96eb70d1f4ebec8caafb"
]
| [
"core/optimizer.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2020/12/22 13:04\n\n@author: Irvinfaith\n\n@email: [email protected]\n\"\"\"\nimport numpy as np\n\n\nclass Optimizer:\n def __init__(self):\n pass\n\n def update_target(self, *args, **kwargs):\n pass\n\n\nclass SGD(Optimizer):\n def __init__(self, alpha=0.01, beta=None):\n \"\"\"\n Args:\n alpha: 学习率\n beta: 动量加速率\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n super().__init__()\n\n @property\n def name(self):\n return \"SGD\"\n\n def calc_momentum(self, gradient, last_momentum):\n \"\"\"计算动量,根据上一轮的梯度更新,若梯度方向没改变,梯度持续增加,加快下降;\n 若梯度方向改变,则会加上一个相反符号的梯度,即会减少梯度下降的速度,减轻震荡。\n\n Args:\n gradient: 上一层传递的梯度\n last_momentum: 上一轮的动量\n\n Returns:\n\n \"\"\"\n # http://www.cs.toronto.edu/~fritz/absps/momentum.pdf (1)\n # return self.beta * last_momentum + self.alpha * gradient\n return self.beta * last_momentum + (1 - self.beta) * gradient\n\n def update_target(self, target, gradient, this_momentum=None):\n \"\"\"更新目标权重\n\n Args:\n target: 待更新的目标权重矩阵\n gradient: 上一层传递的梯度\n this_momentum: 本轮的动量\n\n Returns:\n np.array: 更新后的目标权重矩阵\n \"\"\"\n if self.beta:\n return target - self.alpha * this_momentum\n return target - self.alpha * gradient\n\n\nclass BGD(SGD):\n def __init__(self, alpha=0.01):\n super().__init__(alpha)\n\n @property\n def name(self):\n return \"BGD\"\n\n\nclass MBGD(SGD):\n def __init__(self, alpha=0.01):\n super().__init__(alpha)\n\n @property\n def name(self):\n return \"MBGD\"\n\n\nclass AdaGrad(Optimizer):\n def __init__(self, alpha=0.01, epsilon=1e-8):\n \"\"\"\n\n Args:\n alpha: 学习率\n epsilon: 模糊因子,防止除数为零\n \"\"\"\n self.alpha = alpha\n self.epsilon = epsilon\n super().__init__()\n\n @property\n def name(self):\n return \"AdaGrad\"\n\n def update_target(self, target, gradient, total_squared_gradient_sum):\n \"\"\"更新目标权重\n\n Args:\n target: 待更新的目标权重矩阵\n gradient: 上一层传递的梯度\n total_squared_gradient_sum: 累积的历史梯度平方和\n\n Returns:\n np.array: 更新后的目标权重矩阵\n \"\"\"\n # Optimizer: 计算累积梯度平方和 -> AdaGrad\n ada_grad = gradient / (np.sqrt(total_squared_gradient_sum) + self.epsilon)\n return target - self.alpha * ada_grad\n\n\nclass AdaDelta(Optimizer):\n def __init__(self, alpha=1, beta=0.95, epsilon=1e-8):\n \"\"\"\n\n Args:\n alpha: 学习率,论文中是不需要学习率的,这里还是保留,默认为1\n beta: 累积梯度平方衰减率\n epsilon: 模糊因子,防止除数为零\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.epsilon = epsilon\n super().__init__()\n\n @property\n def name(self):\n return \"AdaDelta\"\n\n def calc_ewa_squared_value(self, this_value, last_ewa_squared_value):\n # https://arxiv.org/pdf/1212.5701.pdf (8)\n return self.beta * last_ewa_squared_value + (1 - self.beta) * np.power(this_value, 2)\n\n def calc_rms_value(self, ewa_squared_value):\n # https://arxiv.org/pdf/1212.5701.pdf (9)\n return np.sqrt(ewa_squared_value + self.epsilon)\n\n def calc_delta_x(self, last_rms_delta_x, rms_gradient, gradient):\n # https://arxiv.org/pdf/1212.5701.pdf (10)\n return - last_rms_delta_x / rms_gradient * gradient\n\n def update_target(self, target, gradient, last_ewa_squared_gradient, last_ewa_squared_delta_x):\n \"\"\"更新目标权重\n\n Args:\n target: 待更新的目标权重矩阵\n gradient: 上一层传递的梯度\n last_ewa_squared_gradient: 根据上一轮的移动平均梯度平方和计算的自适应的梯度\n last_ewa_squared_delta_x: 根据上一轮的移动平均自适应率平方和计算的自适应率\n\n Returns:\n np.array: 更新后的目标权重矩阵\n \"\"\"\n # Optimizer: 计算移动累积梯度平方和\n ewa_squared_gradient = self.calc_ewa_squared_value(gradient, last_ewa_squared_gradient)\n rms_gradient = self.calc_rms_value(ewa_squared_gradient)\n last_rms_delta_x = self.calc_rms_value(last_ewa_squared_delta_x)\n delta_x = self.calc_delta_x(last_rms_delta_x, rms_gradient, gradient)\n return target + self.alpha * delta_x\n\n\nclass RMSProp(AdaDelta):\n # http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf\n def __init__(self, alpha=0.001, beta=0.9, epsilon=1e-8):\n \"\"\"\n\n Args:\n alpha: 学习率\n beta: 梯度平方移动平均的衰减率\n epsilon: 模糊因子,防止除数为零\n \"\"\"\n super().__init__()\n self.alpha = alpha\n self.beta = beta\n self.epsilon = epsilon\n\n @property\n def name(self):\n return \"RMSProp\"\n\n def update_target(self, target, gradient, last_ewa_squared_gradient):\n \"\"\"更新目标权重\n\n Args:\n target: 待更新的目标权重矩阵\n gradient: 上一层传递的梯度\n last_ewa_squared_gradient: 上一轮的指数平均梯度平方和\n\n Returns:\n np.array: 更新后的目标权重矩阵\n \"\"\"\n # gamma: 梯度平方移动平均的衰减率; epsilon: 模糊因子,防止除数为零,通常为很小的数\n ewa_squared_gradient = self.calc_ewa_squared_value(gradient, last_ewa_squared_gradient)\n delta = self.alpha * gradient / (np.sqrt(ewa_squared_gradient) + self.epsilon)\n return target - delta\n\n\nclass Adam(SGD, RMSProp):\n def __init__(self, alpha=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-8):\n self.alpha = alpha\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n SGD.__init__(self, alpha, beta_1)\n RMSProp.__init__(self, alpha, beta_2, epsilon)\n\n @property\n def name(self):\n return \"Adam\"\n\n def calc_correction_momentum(self, gradient, last_momentum, round_num):\n \"\"\"\n 计算修正的动量,减少训练初期因为初始值为0所以会偏向至0的影响\n\n Args:\n gradient: 上一层的梯度\n last_momentum: 上一轮的动量\n round_num: 迭代训练的轮次数\n\n Returns:\n\n \"\"\"\n momentum = self.calc_momentum(gradient, last_momentum)\n return momentum / (1 - np.power(self.beta_1, round_num))\n\n def calc_correction_rprop(self, gradient, last_rprop, round_num):\n \"\"\"\n\n Args:\n gradient: 本轮的梯度\n last_rprop: 上一轮的指数移动平均梯度平方和\n round_num: 迭代训练的轮次数\n\n Returns:\n\n \"\"\"\n rprop = self.calc_ewa_squared_value(gradient, last_rprop)\n return rprop / (1 - np.power(self.beta_2, round_num))\n\n def update_target(self, target, gradient, last_momentum, last_rprop, round_num):\n # https://arxiv.org/pdf/1412.6980v8.pdf\n correction_momentum = self.calc_correction_momentum(gradient, last_momentum, round_num)\n correction_rprop = self.calc_correction_rprop(gradient, last_rprop, round_num)\n return target - self.alpha * correction_momentum / (np.sqrt(correction_rprop) + self.epsilon)\n\n\nclass Adamax(Adam):\n def __init__(self, alpha=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-8):\n self.alpha = alpha\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n super().__init__()\n\n @property\n def name(self):\n return \"Adamax\"\n\n def update_target(self, target, gradient, last_momentum, last_ewa_squared_gradient, round_num):\n \"\"\"取轮次中最大的梯度平方移动平均数\n\n Args:\n target:\n gradient:\n last_momentum: 上一轮的动量\n last_ewa_squared_gradient: 上一轮的指数移动平均梯度平方和\n round_num: 迭代训练的轮次数\n\n Returns:\n\n \"\"\"\n # https://arxiv.org/pdf/1412.6980v8.pdf # 7.1\n correction_momentum = self.calc_correction_momentum(gradient, last_momentum, round_num)\n max_ewa_squared_gradient = max(np.linalg.norm(last_ewa_squared_gradient), np.linalg.norm(gradient))\n return target - self.alpha / (1 - np.power(self.beta_2, round_num)) * correction_momentum / (np.sqrt(max_ewa_squared_gradient) + self.epsilon)\n"
]
| [
[
"numpy.linalg.norm",
"numpy.sqrt",
"numpy.power"
]
]
|
reillysiemens/resolving-a-dns-issue | [
"0772b173f63836a8281887389dadf718a400dd54"
]
| [
"chart.py"
]
| [
"#!/usr/bin/env python3.6\n\nimport sys\nfrom typing import Iterator\n\nimport pandas as pd\nfrom pygal import Line\nfrom pygal.style import Style\n\n\nclass GruvboxStyle(Style):\n \"\"\" A gruvbox-inspired Pygal style. \"\"\"\n\n background = '#282828'\n plot_background = '#1d2021'\n foreground = '#fdf4c1'\n foreground_strong = '#fdf4c1'\n foreground_subtle = '#fdf4c1'\n colors = ('#8ec07c', '#fa5c4b')\n\n\ndef dilute_datetimes(datetimes: pd.Series, factor: int) -> Iterator[str]:\n \"\"\" Lots of datetimes overlap and become unreadable, make some space. \"\"\"\n dilute = lambda t: t[1] if t[0] % factor == 0 else ''\n yield from map(dilute, enumerate(datetimes))\n\n\ndef generate_chart(data: pd.DataFrame) -> Line:\n line_chart = Line(\n js=(), # The tooltips are really nice, but I don't want any JS.\n style=GruvboxStyle,\n x_label_rotation=30\n )\n\n # Water those datetimes down so they don't overlap and we can read them!\n datetimes = data['Datetime']\n dilution_factor = datetimes.shape[0] // 10\n datetimes = dilute_datetimes(datetimes, factor=dilution_factor)\n\n line_chart.title = 'HTTP GET by IP vs. HTTP GET by Hostname'\n line_chart.y_title = 'Seconds'\n line_chart.x_labels = datetimes\n line_chart.add(\n title='By IP',\n values=data['Seconds for HTTP GET by IP']\n )\n line_chart.add(\n title='By Hostname',\n values=data['Seconds for HTTP GET by Hostname']\n )\n return line_chart\n\n\ndef main(argv: list) -> None:\n data = pd.read_csv(sys.argv[1])\n output = sys.argv[2]\n chart = generate_chart(data)\n chart.render_to_file(output)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"
]
| [
[
"pandas.read_csv"
]
]
|
JesterOrNot/python_calc | [
"3e939046930eca2204f8354d9f221f6127d2db91"
]
| [
"algeb/graph/graphQuadMod.py"
]
| [
"import numpy as np\nimport numpy\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.axis\nfrom sympy import *\n\n\ndef standard():\n a = input(\"What is a?: \")\n b = input(\"What is b?: \")\n c = input(\"What is c?: \")\n formula = '{}*x**2 + {}*x + {}'.format(a, b, c)\n\n def graph(formula):\n x = np.linspace(-10, 10)\n y = eval(formula)\n plt.plot(x, y)\n plt.axhline(y=0, color='k')\n plt.axvline(x=0, color='k')\n plt.xlim((-10, 10))\n plt.ylim((-10, 10))\n plt.grid(axis='both', which='both')\n plt.show()\n graph(formula)\n\n\ndef vertex():\n a = input(\"What is a?: \")\n h = input(\"What is h?: \")\n k = input(\"What is k?: \")\n formula = '{}*(x - {})**2 + {}'.format(a, h, k)\n def graph(formula):\n x = np.linspace(-10, 10)\n y = eval(formula)\n plt.plot(x, y)\n plt.axhline(y=0, color='k')\n plt.axvline(x=0, color='k')\n plt.xlim((-5, 5))\n plt.ylim((-5, 5))\n plt.grid(axis='both', which='both')\n plt.show()\n graph(formula)\n\n\ndef menu():\n print(\"What is the equasion type vertex form or standard form\")\n userInt = input(\"Which one do you want?: \").lower()\n if userInt in 'vertex':\n \tvertex()\n elif userInt in 'standard':\n \tstandard()\n else:\n print(\"That is not an option.\")\nmenu()"
]
| [
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"numpy.linspace",
"matplotlib.use",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show"
]
]
|
jessiesrr/VTdyn | [
"6f71ef94525d95221f5bd5e5290f4df10648cd18"
]
| [
"run_files/ddalpharun/multirun_mutant.py"
]
| [
"from multiprocessing import Process,Pool,Lock #parallel processing\nimport multiprocessing as mp\nfrom itertools import repeat\nfrom functools import partial\nimport sys\n\nimport os\nimport numpy as np\nimport libs.run_lib as lib\nimport libs.data as data\nimport libs.plot as vplt\n\nrand = np.random.RandomState()\n\nruns_per_batch = 4\nbatches = 2\nl = 4\ntimend = 10.\ntimestep = 1.0\n\nwidth, height = float(l)*1.5, float(l)*1.5*np.sqrt(3)/2\n\nfolder = 'mutants'\n\ninfo = \"\"\"\nsize-dependent selection with neutral mutation\nmaxtime = %.0f\ntimestep = %.0f\nN0 = %d\nbox height = %.2f\nbox width = %.2f\n\n\"\"\" %(timend,timestep,l*l,width,height)\n\nif not os.path.exists(folder): # if the folder doesn't exist create it\n os.makedirs(folder)\n\nwith open(folder+'/info.txt',\"w\",0) as infofile:\n infofile.write(info)\n\ndef run_parallel(update_file,i):\n alpha = 0.25\n rand=np.random.RandomState()\n history = lib.run_simulation_size_dependent_with_mutants(alpha,l,timestep,timend,rand)\n if 0 not in history[-1].properties['mutant']:\n fix = 1 \n # data.save_N_cell(history,folder+'/fixed',i)\n # data.save_N_mutant(history,folder+'/fixed',i)\n import libs.plot\n save_mpg_torus(history, name, index=None,key = \"mutants\", timestep=0.5)\n elif 1 not in history[-1].properties['mutant']:\n fix = 0\n else: \n fix = -1\n # data.save_N_cell(history,folder+'/incomplete',i)\n # data.save_N_mutant(history,folder+'/incomplete',i)\n update_file.write('%d\\n'%i)\n return fix\n\nfix_results = open(folder+'/fixation.txt','w',0)\ncpunum=mp.cpu_count()\npool = Pool(processes=cpunum,maxtasksperchild=1000) # creating a pool with processors equal to the number of processors\nfor i in range(batches):\n text = '\\rbatch %d of %d'%(i+1,batches)\n sys.stdout.write(text)\n sys.stdout.flush()\n update_file = open(folder+'/current_batch','w',0)\n fixation = np.array(pool.imap(partial(run_parallel,update_file),range(i*runs_per_batch,(i+1)*runs_per_batch))) # mapping of all the calls necessary into the calling function\n fixed = len(np.where(fixation==1)[0])\n nofixed = len(np.where(fixation==0)[0])\n incomplete = len(np.where(fixation==-1)[0])\n fix_results.write('%d %d %d\\n'%(fixed,nofixed,incomplete) )\n update_file.close()\npool.join()\npool.close()"
]
| [
[
"numpy.random.RandomState",
"numpy.where",
"numpy.sqrt"
]
]
|
aliostad/hexagon-rl | [
"cd4180bda26f92c0bde11a08aa13c825cd151a10"
]
| [
"dqn_centaur_ai_gym.py"
]
| [
"from keras.layers import Flatten, Conv2D, Dense, Activation\nfrom keras.optimizers import Adam\nfrom keras import Sequential\nfrom rl.agents import DQNAgent, CEMAgent\nfrom rl.memory import SequentialMemory, EpisodeParameterMemory\nfrom rl.policy import EpsGreedyQPolicy\n\n\nfrom hexagon_agent import *\nfrom random import shuffle\nfrom multi_agent import *\nimport sys\nimport hexagon_ui_api\nimport os\nimport numpy as np\nfrom ai_gym import *\n\n# ______________________________________________________________________________________________________________________________\nclass EnvDef:\n centaur_name = 'centaur'\n game_name = '1'\n HASH_POOL = 10000\n NODE_FEATURE_COUNT = 5\n DECISION_ACTION_SPACE = 2\n SHORT_MEMORY_SIZE = 1\n MAX_ROUND = 2000\n CELL_FEATURE = 1\n MAX_GRID_LENGTH = 5\n SPATIAL_INPUT = (MAX_GRID_LENGTH, MAX_GRID_LENGTH)\n SPATIAL_OUTPUT = (MAX_GRID_LENGTH * MAX_GRID_LENGTH, )\n EPISODE_REWARD = 1000\n MOVE_REWARD_MULTIPLIER = 10\n DONT_OWN_MOVE_REWARD = -5\n CANT_ATTACK_MOVE_REWARD = -3\n GAME_VERBOSE = False\n RADIUS = 3\n# __________________________________________________________________________________________________________________________\n\nclass NoneZeroEpsGreedyQPolicy(EpsGreedyQPolicy):\n \"\"\"Implement the epsilon greedy policy\n\n Eps Greedy policy either:\n\n - takes a random action with probability epsilon from Non-Zero Q-values\n - takes current best action with prob (1 - epsilon)\n \"\"\"\n\n def __init__(self, eps=.1):\n super(EpsGreedyQPolicy, self).__init__()\n self.eps = eps\n\n def select_action(self, q_values):\n \"\"\"Return the selected action\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n if np.random.uniform() < self.eps:\n copy_q_values = np.copy(q_values)\n idx = np.argmax(q_values)\n copy_q_values[idx] = 0\n for i in range(0, nb_actions):\n val = copy_q_values[i]\n copy_q_values[i] = -1e8 if val == 0 else val * np.random.uniform()\n action = np.argmax(copy_q_values)\n else:\n action = np.argmax(q_values)\n return action\n\n def get_config(self):\n \"\"\"Return configurations of EpsGreedyPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config\n\n\n# __________________________________________________________________________________________________________________________\n\nclass MaskableDQNAgent(DQNAgent):\n\n def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False,\n dueling_type='avg', mask_processor=None, *args, **kwargs):\n DQNAgent.__init__(self, model, policy=policy, test_policy=test_policy,\n enable_double_dqn=enable_double_dqn, enable_dueling_network=enable_dueling_network,\n dueling_type=dueling_type, *args, **kwargs)\n self.mask_processor = mask_processor\n\n def forward(self, observation):\n # Select an action.\n state = self.memory.get_recent_state(observation)\n q_values = self.compute_q_values(state)\n if self.mask_processor is not None:\n q_values = self.mask_processor.mask(q_values)\n if self.training:\n action = self.policy.select_action(q_values=q_values)\n else:\n action = self.test_policy.select_action(q_values=q_values)\n\n # Book-keeping.\n self.recent_observation = observation\n self.recent_action = action\n\n return action\n\n# __________________________________________________________________________________________________________________________\n\n\n\nclass DecisionModel:\n def __init__(self, modelName=None):\n \"\"\"\n\n :type theMethod: str\n \"\"\"\n self.modelName = modelName if modelName is not None else 'Decision_model_params.h5f' + str(r.uniform(0, 10000))\n model = Sequential()\n model.add(Flatten(input_shape=(1,) + (EnvDef.HASH_POOL * EnvDef.NODE_FEATURE_COUNT * EnvDef.SHORT_MEMORY_SIZE,)))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(16, activation=\"relu\"))\n model.add(Dense(EnvDef.DECISION_ACTION_SPACE))\n model.add(Activation('softmax'))\n print(model.summary())\n model.compile(loss=\"categorical_crossentropy\",\n optimizer='adadelta', metrics=['accuracy'])\n self.model = model\n\n\n# ______________________________________________________________________________________________________________________________\n\nclass SimplestAttackModel:\n def __init__(self, modelName=None):\n \"\"\"\n\n :type theMethod: str\n \"\"\"\n self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))\n\n model = Sequential()\n model.add(Flatten(\n input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))\n model.add(Dense(256, activation='relu'))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='softmax'))\n\n self.model = model\n\n\nclass SimpleAttackModel:\n def __init__(self, modelName=None):\n \"\"\"\n\n :type theMethod: str\n \"\"\"\n self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))\n\n model = Sequential()\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu',\n input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(1, (1, 1), padding='same', activation='tanh'))\n model.add(Flatten())\n model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))\n\n self.model = model\n\n\nclass AttackModel:\n def __init__(self, modelName=None):\n \"\"\"\n\n :type theMethod: str\n \"\"\"\n self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))\n\n model = Sequential()\n model.add(Conv2D(128, (5, 5), padding='same', activation='relu',\n input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(4, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(1, (3, 3), padding='same', activation='tanh'))\n model.add(Flatten())\n model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))\n\n self.model = model\n\n @staticmethod\n def prepare_x(X, batch=False):\n \"\"\"\n\n :type X: ndarray\n :type batch: bool\n :return:\n \"\"\"\n shape = EnvDef.SPATIAL_INPUT + (1, )\n if batch:\n shape = (X.shape[0], ) + shape\n return np.reshape(X, shape)\n\n @staticmethod\n def prepare_y(Y, batch=False):\n \"\"\"\n\n :type Y: ndarray\n :type batch: bool\n :return:\n \"\"\"\n shape = EnvDef.SPATIAL_OUTPUT + (1, )\n if batch:\n shape = (Y.shape[0], ) + shape\n return np.reshape(Y, shape)\n\n @staticmethod\n def process_y(Y):\n \"\"\"\n\n :type Y: ndarray\n :return:\n \"\"\"\n return np.reshape(Y, EnvDef.SPATIAL_OUTPUT)\n\n# ______________________________________________________________________________________________________________________________\n\n\n\nif __name__ == '__main__':\n\n args = menu()\n env = HierarchicalCentaurEnv(opponent_randomness=args.randomness,\n centaur_boost_likelihood=args.centaur_boost_likelihood,\n boosting_off=args.boostingoff, attack_off=args.attackoff,\n game_verbose=EnvDef.GAME_VERBOSE, radius=EnvDef.RADIUS,\n move_shuffle=args.moveshuffle, move_handicap=args.handicap)\n np.random.seed(42)\n env.seed(42)\n\n prc = CentaurDecisionProcessor()\n dec_model = DecisionModel()\n attack_model = SimpleAttackModel('Attack_model_params.h5f')\n\n prc = MultiProcessor({AgentType.BoostDecision: prc, AgentType.Attack: CentaurAttackProcessor(EnvDef.SPATIAL_INPUT, random_action=args.randomaction)})\n memory = EpisodeParameterMemory(limit=1000, window_length=1)\n decision_agent = CEMAgent(model=dec_model.model, nb_actions=EnvDef.DECISION_ACTION_SPACE, memory=memory,\n batch_size=50, nb_steps_warmup=200, train_interval=50, elite_frac=0.05)\n\n decision_agent.compile()\n memory2 = SequentialMemory(limit=100000, window_length=1)\n policy = NoneZeroEpsGreedyQPolicy()\n attack_agent = MaskableDQNAgent(attack_model.model,\n policy=policy, batch_size=16,\n processor=prc.inner_processors[AgentType.Attack],\n nb_actions=EnvDef.SPATIAL_OUTPUT[0],\n memory=memory2, nb_steps_warmup=500,\n enable_dueling_network=True,\n mask_processor=prc.inner_processors[AgentType.Attack] if args.usemasking else None)\n\n\n agent = MultiAgent({AgentType.BoostDecision: decision_agent, AgentType.Attack: attack_agent}, processor=prc, save_frequency=0.05)\n agent.inner_agents[AgentType.Attack].compile(Adam(lr=0.001), metrics=['mean_squared_logarithmic_error'])\n\n if args.model_name is not None:\n agent.inner_agents[AgentType.Attack].load_weights(args.model_name)\n\n hexagon_ui_api.run_in_background()\n if len(sys.argv) == 1:\n print('Usage: python centaur_ai_gym.py (train|test)')\n elif sys.argv[1] == 'train':\n agent.fit(env, nb_steps=300 * 1000, visualize=False, verbose=2, interim_filenames={AgentType.Attack: attack_model.modelName})\n agent.save_weights({AgentType.BoostDecision: dec_model.modelName, AgentType.Attack: attack_model.modelName}, overwrite=True)\n elif sys.argv[1] == 'test':\n agent.test(env, nb_episodes=100)\n else:\n print('argument not recognised: ' + sys.argv[1])\n"
]
| [
[
"numpy.random.seed",
"numpy.reshape",
"numpy.copy",
"numpy.argmax",
"numpy.random.uniform"
]
]
|
bingqingchen/PROF | [
"be7f77f606d8c7d6505d4b2bad2d09760e9bafe9"
]
| [
"utils/network.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.utils.data as data\nimport numpy as np\n \n# Implement a vanilla MLP here\nclass MLP(nn.Module):\n def __init__(self, input_size, hiddens, output_size):\n super(MLP, self).__init__()\n self.n_layers = len(hiddens)\n self.layers = []\n tmp = [input_size] + hiddens\n \n for i in range(self.n_layers):\n self.layers.append(nn.Linear(tmp[i], tmp[i+1]))\n self.layers.append(nn.ReLU())\n # self.layers.append(nn.BatchNorm1d(tmp[i+1]))\n self.layers.append(nn.Linear(tmp[-1], output_size))\n self.layers = nn.ModuleList(self.layers)\n \n def forward(self,x):\n out = x\n for i, l in enumerate(self.layers):\n out = l(out)\n return out\n\n\nclass LSTM(nn.Module):\n def __init__(self, n_state, n_action, n_dist, lstm_hidden = 8, hiddens = [4], lstm_layer = 2, bi = False):\n super(LSTM, self).__init__()\n \n self.rnn = nn.LSTM(n_dist, lstm_hidden, lstm_layer, dropout = 0, bidirectional = bi)\n if bi:\n self.n_direction = 2\n else:\n self.n_direction = 1\n \n self.lstm_hidden = lstm_hidden\n self.lstm_layer = lstm_layer\n \n self.encoder1 = nn.Sequential(\n nn.Linear(n_state, 4),\n nn.ReLU(),\n #nn.BatchNorm1d(32),\n nn.Linear(4, lstm_hidden*self.n_direction*self.lstm_layer),\n nn.ReLU())\n \n self.encoder2 = nn.Sequential(\n nn.Linear(n_state, 4),\n nn.ReLU(),\n #nn.BatchNorm1d(32),\n nn.Linear(4, lstm_hidden * self.n_direction*self.lstm_layer),\n nn.ReLU())\n \n n_layers = len(hiddens) + 1\n tmp = [self.n_direction * lstm_hidden] + hiddens #+ [n_action]\n \n self.decoder = []\n for i in range(n_layers-1):\n self.decoder.append(nn.Linear(tmp[i], tmp[i+1]))\n self.decoder.append(nn.ReLU())\n self.decoder = nn.ModuleList(self.decoder)\n \n # mu and sigma2 are learned separately\n self.final_layer = nn.Linear(tmp[-1], n_action)\n self.final_layer_ = nn.Linear(tmp[-1], n_action)\n \n def forward(self, state, disturbance):\n # state: n x dim\n # disturbance: T x n x dist\n n = state.shape[0]\n T = disturbance.shape[0]\n \n h0 = self.encoder1(state).reshape(n, self.n_direction*self.lstm_layer, self.lstm_hidden).transpose(0, 1) # (layer x direction) x n x Dim.\n c0 = self.encoder2(state).reshape(n, self.n_direction*self.lstm_layer, self.lstm_hidden).transpose(0, 1)\n\n out, (hn, cn) = self.rnn(disturbance, (h0, c0)) # out: T x n x (lstm_hidden x n_direction)\n #print(\"line 176\")\n out = out.reshape(T * n, self.lstm_hidden * self.n_direction)\n for layer in self.decoder:\n out = layer(out)\n mu = self.final_layer(out).reshape(T, n, -1)\n sigma_sq = self.final_layer_(out).reshape(T, n, -1)\n # out: (T x n) x n_action\n return mu, sigma_sq\n\n'''\nclass Replay_Memory():\n def __init__(self, memory_size=288, burn_in=32):\n self.memory_size = memory_size\n self.burn_in = burn_in\n # the memory is as a list of transitions (S,A,R,S,D).\n self.storage = []\n\n def sample_batch(self, batch_size=32):\n # This function returns a batch of randomly sampled transitions - i.e. state, action, reward, next state, terminal flag tuples.\n # You will feed this to your model to train.\n rand_idx = np.random.choice(len(self.storage), batch_size)\n return [self.storage[i] for i in rand_idx]\n\n def append(self, transition):\n # appends transition to the memory.\n self.storage.append(transition)\n # only keeps the latest memory_size transitions\n if len(self.storage) > self.memory_size:\n self.storage = self.storage[-self.memory_size:]\n'''\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.ReLU",
"torch.nn.LSTM"
]
]
|
AIIP-DEV/pycaret | [
"0e09cd065f5927f120d7c8a9356f95974bfaea01"
]
| [
"pycaret/internal/pipeline.py"
]
| [
"# Module: internal.pipeline\n# Author: Antoni Baum (Yard1) <[email protected]>\n# License: MIT\n\n# Provides a Pipeline supporting partial_fit (needed for tune warm start)\n# and copying over fit attributes from the final estimator, so that it can be plotted directly\n# and is considered fitted.\n\n# This pipeline is only to be used internally.\n\nfrom pycaret.internal.utils import get_all_object_vars_and_properties, is_fit_var\nimport imblearn.pipeline\nfrom sklearn.utils import _print_elapsed_time\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nfrom sklearn.utils.metaestimators import if_delegate_has_method\nimport sklearn.pipeline\nfrom pycaret.internal.validation import is_fitted\n\n\nclass Pipeline(imblearn.pipeline.Pipeline):\n def __init__(self, steps, *, memory=None, verbose=False):\n super().__init__(steps, memory=memory, verbose=verbose)\n self._fit_vars = set()\n self._carry_over_final_estimator_fit_vars()\n\n @property\n def inverse_transform(self):\n \"\"\"Apply inverse transformations in reverse order.\n\n Parameters\n ----------\n Xt : array-like of shape (n_samples, n_transformed_features)\n Data samples, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features. Must fulfill\n input requirements of last step of pipeline's\n ``inverse_transform`` method.\n\n Returns\n -------\n Xt : array-like of shape (n_samples, n_features)\n \"\"\"\n return self._inverse_transform\n\n def _inverse_transform(self, X):\n Xt = X\n reverse_iter = reversed(list(self._iter()))\n for _, _, transform in reverse_iter:\n try:\n Xt = transform.inverse_transform(Xt)\n except:\n pass\n return Xt\n\n def _carry_over_final_estimator_fit_vars(self):\n self._clear_final_estimator_fit_vars()\n if hasattr(self._final_estimator, \"fit\"):\n for k, v in get_all_object_vars_and_properties(\n self._final_estimator\n ).items():\n if is_fit_var(k):\n try:\n setattr(self, k, v)\n self._fit_vars.add(k)\n except:\n pass\n\n def _clear_final_estimator_fit_vars(self, all: bool = False):\n vars_to_remove = []\n try:\n for var in self._fit_vars:\n if (\n all\n or var\n not in get_all_object_vars_and_properties(\n self._final_estimator\n ).items()\n ):\n vars_to_remove.append(var)\n for var in vars_to_remove:\n try:\n delattr(self, var)\n self._fit_vars.remove(var)\n except:\n pass\n except:\n pass\n\n def get_sklearn_pipeline(self) -> sklearn.pipeline.Pipeline:\n return sklearn.pipeline.Pipeline(self.steps)\n\n def replace_final_estimator(self, new_final_estimator, name: str = None):\n self._clear_final_estimator_fit_vars(all=True)\n if hasattr(self._final_estimator, \"fit\"):\n self.steps[-1] = (\n self.steps[-1][0] if not name else name,\n new_final_estimator,\n )\n else:\n self.steps.append(\n (name if name else \"actual_estimator\", new_final_estimator)\n )\n self._carry_over_final_estimator_fit_vars()\n\n def set_params(self, **kwargs):\n try:\n result = super().set_params(**kwargs)\n except:\n result = self._final_estimator.set_params(**kwargs)\n\n self._carry_over_final_estimator_fit_vars()\n return result\n\n def predict(self, X, **predict_params):\n result = super().predict(X, **predict_params)\n return self.inverse_transform(result)\n\n def fit(self, X, y=None, **fit_kwargs):\n result = super().fit(X, y=y, **fit_kwargs)\n\n self._carry_over_final_estimator_fit_vars()\n return result\n\n def fit_predict(self, X, y=None, **fit_params):\n result = super().fit_predict(X, y=y, **fit_params)\n\n self._carry_over_final_estimator_fit_vars()\n return self.inverse_transform(result)\n\n def fit_resample(self, X, y=None, **fit_params):\n result = super().fit_resample(X, y=y, **fit_params)\n\n self._carry_over_final_estimator_fit_vars()\n return result\n\n @if_delegate_has_method(delegate=\"_final_estimator\")\n def fit_transform(self, X, y=None, **fit_params):\n result = super().fit_transform(X, y=y, **fit_params)\n\n self._carry_over_final_estimator_fit_vars()\n return result\n\n @if_delegate_has_method(delegate=\"_final_estimator\")\n def partial_fit(self, X, y=None, **fit_params):\n \"\"\"Fit the model.\n\n Fit all the transforms/samplers one after the other and\n transform/sample the data, then fit the transformed/sampled\n data using the final estimator.\n\n Parameters\n ----------\n X : iterable\n Training data. Must fulfill input requirements of first step of the\n pipeline.\n\n y : iterable, default=None\n Training targets. Must fulfill label requirements for all steps of\n the pipeline.\n\n **fit_params : dict of str -> object\n Parameters passed to the ``fit`` method of each step, where\n each parameter name is prefixed such that parameter ``p`` for step\n ``s`` has key ``s__p``.\n\n Returns\n -------\n self : Pipeline\n This estimator.\n \"\"\"\n try:\n self.Xt_\n except:\n self.Xt_ = None\n self.yt_ = None\n if self.Xt_ is None or self.yt_ is None:\n Xt, yt, _ = self._fit(X, y)\n self.Xt_ = Xt\n self.yt_ = yt\n else:\n Xt = self.Xt_\n yt = self.yt_\n with _print_elapsed_time(\"Pipeline\", self._log_message(len(self.steps) - 1)):\n if self._final_estimator != \"passthrough\":\n self._final_estimator.partial_fit(Xt, yt, **fit_params)\n self._carry_over_final_estimator_fit_vars()\n return self\n\n\nclass estimator_pipeline(object):\n \"\"\"\n Context which adds an estimator to pipeline.\n \"\"\"\n\n def __init__(self, pipeline: Pipeline, estimator):\n self.pipeline = clone(pipeline)\n self.estimator = estimator\n\n def __enter__(self):\n add_estimator_to_pipeline(self.pipeline, self.estimator)\n return self.pipeline\n\n def __exit__(self, type, value, traceback):\n return\n\n\ndef make_internal_pipeline(internal_pipeline_steps: list, memory=None) -> Pipeline:\n\n if not internal_pipeline_steps:\n memory = None\n internal_pipeline_steps = [(\"empty_step\", \"passthrough\")]\n\n return Pipeline(internal_pipeline_steps, memory=memory)\n\n\ndef add_estimator_to_pipeline(pipeline: Pipeline, estimator, name=\"actual_estimator\"):\n try:\n assert hasattr(pipeline._final_estimator, \"predict\")\n pipeline.replace_final_estimator(estimator, name=name)\n except:\n pipeline.steps.append((name, estimator))\n if hasattr(pipeline, \"_carry_over_final_estimator_fit_vars\"):\n pipeline._carry_over_final_estimator_fit_vars()\n\n\ndef merge_pipelines(pipeline_to_merge_to: Pipeline, pipeline_to_be_merged: Pipeline):\n pipeline_to_merge_to.steps.extend(pipeline_to_be_merged.steps)\n try:\n pipeline_to_merge_to._carry_over_final_estimator_fit_vars()\n except:\n pass\n\n\ndef get_pipeline_estimator_label(pipeline: Pipeline) -> str:\n try:\n model_step = pipeline.steps[-1]\n except:\n return \"\"\n\n return model_step[0]\n\n\ndef get_pipeline_fit_kwargs(pipeline: Pipeline, fit_kwargs: dict) -> dict:\n try:\n model_step = pipeline.steps[-1]\n except:\n return fit_kwargs\n\n if any(k.startswith(f\"{model_step[0]}__\") for k in fit_kwargs.keys()):\n return fit_kwargs\n\n return {f\"{model_step[0]}__{k}\": v for k, v in fit_kwargs.items()}\n"
]
| [
[
"sklearn.base.clone",
"sklearn.utils.metaestimators.if_delegate_has_method"
]
]
|
JelleAalbers/strax | [
"423225e9d07b62ac0c28a5e39d45dbf5e2f62b3d"
]
| [
"strax/processing/peak_building.py"
]
| [
"import numpy as np\nimport numba\n\nimport strax\nfrom strax import utils\nfrom strax.dtypes import peak_dtype, DIGITAL_SUM_WAVEFORM_CHANNEL\nexport, __all__ = strax.exporter()\n\n\n@export\[email protected]_result(dtype=peak_dtype(), chunk_size=int(1e4))\[email protected](nopython=True, nogil=True, cache=True)\ndef find_peaks(hits, adc_to_pe,\n gap_threshold=300,\n left_extension=20, right_extension=150,\n min_area=0,\n min_channels=2,\n max_duration=10_000_000,\n _result_buffer=None, result_dtype=None):\n \"\"\"Return peaks made from grouping hits together\n Assumes all hits have the same dt\n :param hits: Hit (or any interval) to group\n :param left_extension: Extend peaks by this many ns left\n :param right_extension: Extend peaks by this many ns right\n :param gap_threshold: No hits for this much ns means new peak\n :param min_area: Peaks with less than min_area are not returned\n :param min_channels: Peaks with less contributing channels are not returned\n :param max_duration: max duration time of merged peak in ns\n \"\"\"\n buffer = _result_buffer\n offset = 0\n if not len(hits):\n return\n assert hits[0]['dt'] > 0, \"Hit does not indicate sampling time\"\n assert min_channels >= 1, \"min_channels must be >= 1\"\n assert gap_threshold > left_extension + right_extension, \\\n \"gap_threshold must be larger than left + right extension\"\n assert max(hits['channel']) < len(adc_to_pe), \"more channels than to_pe\"\n # Magic number comes from\n # np.iinfo(p['dt'].dtype).max*np.shape(p['data'])[1] = 429496729400 ns\n # but numba does not like it\n assert left_extension+max_duration+right_extension < 429496729400, (\n \"Too large max duration causes integer overflow\")\n\n n_channels = len(buffer[0]['area_per_channel'])\n area_per_channel = np.zeros(n_channels, dtype=np.float32)\n\n in_peak = False\n peak_endtime = 0\n for hit_i, hit in enumerate(hits):\n p = buffer[offset]\n t0 = hit['time']\n dt = hit['dt']\n t1 = hit['time'] + dt * hit['length']\n\n if in_peak:\n # This hit continues an existing peak\n p['max_gap'] = max(p['max_gap'], t0 - peak_endtime)\n\n else:\n # This hit starts a new peak candidate\n area_per_channel *= 0\n peak_endtime = t1\n p['time'] = t0 - left_extension\n p['channel'] = DIGITAL_SUM_WAVEFORM_CHANNEL\n p['dt'] = dt\n # These are necessary as prev peak may have been rejected:\n p['n_hits'] = 0\n p['area'] = 0\n in_peak = True\n p['max_gap'] = 0\n\n # Add hit's properties to the current peak candidate\n p['n_hits'] += 1\n peak_endtime = max(peak_endtime, t1)\n hit_area_pe = hit['area'] * adc_to_pe[hit['channel']]\n area_per_channel[hit['channel']] += hit_area_pe\n p['area'] += hit_area_pe\n\n # Look at the next hit to see if THIS hit is the last in a peak.\n # If this is the final hit, it is last by definition.\n # Finally, make sure that if we include the next hit, we are not\n # exceeding the max_duration.\n is_last_hit = hit_i == len(hits) - 1\n peak_too_long = next_hit_is_far = False\n if not is_last_hit:\n # These can only be computed if there is a next hit\n next_hit = hits[hit_i + 1]\n next_hit_is_far = next_hit['time'] - peak_endtime >= gap_threshold\n # Peaks may not extend the max_duration\n peak_too_long = (next_hit['time'] - p['time']\n + next_hit['dt'] * next_hit['length']\n + left_extension\n + right_extension) > max_duration\n if is_last_hit or next_hit_is_far or peak_too_long:\n # Next hit (if it exists) will initialize the new peak candidate\n in_peak = False\n\n # Do not save if tests are not met. Next hit will erase temp info\n if p['area'] < min_area:\n continue\n n_channels = (area_per_channel != 0).sum()\n if n_channels < min_channels:\n continue\n\n # Compute final quantities\n p['length'] = (peak_endtime - p['time'] + right_extension) / dt\n if p['length'] <= 0:\n # This is most likely caused by a negative dt\n raise ValueError(\n \"Caught attempt to save nonpositive peak length?!\")\n p['area_per_channel'][:] = area_per_channel\n\n # Save the current peak, advance the buffer\n offset += 1\n if offset == len(buffer):\n yield offset\n offset = 0\n\n yield offset\n\n\n@export\[email protected](nopython=True, nogil=True, cache=True)\ndef store_downsampled_waveform(p, wv_buffer):\n \"\"\"Downsample the waveform in buffer and store it in p['data']\n\n :param p: Row of a strax peak array, or compatible type.\n Note that p['dt'] is adjusted to match the downsampling.\n :param wv_buffer: numpy array containing sum waveform during the peak\n at the input peak's sampling resolution p['dt'].\n\n The number of samples to take from wv_buffer, and thus the downsampling\n factor, is determined from p['dt'] and p['length'].\n\n When downsampling results in a fractional number of samples, the peak is\n shortened rather than extended. This causes data loss, but it is\n necessary to prevent overlaps between peaks.\n \"\"\"\n n_samples = len(p['data'])\n downsample_factor = int(np.ceil(p['length'] / n_samples))\n if downsample_factor > 1:\n # Compute peak length after downsampling.\n # Do not ceil: see docstring!\n p['length'] = int(np.floor(p['length'] / downsample_factor))\n p['data'][:p['length']] = \\\n wv_buffer[:p['length'] * downsample_factor] \\\n .reshape(-1, downsample_factor) \\\n .sum(axis=1)\n p['dt'] *= downsample_factor\n else:\n p['data'][:p['length']] = wv_buffer[:p['length']]\n\n\n@export\[email protected](nopython=True, nogil=True, cache=True)\ndef sum_waveform(peaks, records, adc_to_pe, select_peaks_indices=None):\n \"\"\"Compute sum waveforms for all peaks in peaks\n Will downsample sum waveforms if they do not fit in per-peak buffer\n\n :arg select_peaks_indices: Indices of the peaks for partial\n processing. In the form of np.array([np.int, np.int, ..]). If\n None (default), all the peaks are used for the summation.\n\n Assumes all peaks AND pulses have the same dt!\n \"\"\"\n if not len(records):\n return\n if not len(peaks):\n return\n if select_peaks_indices is None:\n select_peaks_indices = np.arange(len(peaks))\n if not len(select_peaks_indices):\n return\n dt = records[0]['dt']\n\n # Big buffer to hold even largest sum waveforms\n # Need a little more even for downsampling..\n swv_buffer = np.zeros(peaks['length'].max() * 2, dtype=np.float32)\n\n # Index of first record that could still contribute to subsequent peaks\n # Records before this do not need to be considered anymore\n left_r_i = 0\n\n n_channels = len(peaks[0]['area_per_channel'])\n area_per_channel = np.zeros(n_channels, dtype=np.float32)\n\n for peak_i in select_peaks_indices:\n p = peaks[peak_i]\n # Clear the relevant part of the swv buffer for use\n # (we clear a bit extra for use in downsampling)\n p_length = p['length']\n swv_buffer[:min(2 * p_length, len(swv_buffer))] = 0\n\n # Clear area and area per channel\n # (in case find_peaks already populated them)\n area_per_channel *= 0\n p['area'] = 0\n\n # Find first record that contributes to this peak\n for left_r_i in range(left_r_i, len(records)):\n r = records[left_r_i]\n # TODO: need test that fails if we replace < with <= here\n if p['time'] < r['time'] + r['length'] * dt:\n break\n else:\n # Records exhausted before peaks exhausted\n # TODO: this is a strange case, maybe raise warning/error?\n break\n\n # Scan over records that overlap\n for right_r_i in range(left_r_i, len(records)):\n r = records[right_r_i]\n ch = r['channel']\n multiplier = 2**r['amplitude_bit_shift']\n assert p['dt'] == r['dt'], \"Records and peaks must have same dt\"\n\n shift = (p['time'] - r['time']) // dt\n n_r = r['length']\n n_p = p_length\n\n if shift <= -n_p:\n # Record is completely to the right of the peak;\n # we've seen all overlapping records\n break\n\n if n_r <= shift:\n # The (real) data in this record does not actually overlap\n # with the peak\n # (although a previous, longer record did overlap)\n continue\n\n (r_start, r_end), (p_start, p_end) = strax.overlap_indices(\n r['time'] // dt, n_r,\n p['time'] // dt, n_p)\n\n max_in_record = r['data'][r_start:r_end].max() * multiplier\n p['saturated_channel'][ch] |= np.int8(max_in_record >= r['baseline'])\n\n bl_fpart = r['baseline'] % 1\n # TODO: check numba does casting correctly here!\n pe_waveform = adc_to_pe[ch] * (\n multiplier * r['data'][r_start:r_end]\n + bl_fpart)\n\n swv_buffer[p_start:p_end] += pe_waveform\n\n area_pe = pe_waveform.sum()\n area_per_channel[ch] += area_pe\n p['area'] += area_pe\n\n store_downsampled_waveform(p, swv_buffer)\n\n p['n_saturated_channels'] = p['saturated_channel'].sum()\n p['area_per_channel'][:] = area_per_channel\n\n\n@export\ndef find_peak_groups(peaks, gap_threshold,\n left_extension=0, right_extension=0,\n max_duration=int(1e9),\n ):\n \"\"\"Return boundaries of groups of peaks separated by gap_threshold,\n extended left and right.\n\n :param peaks: Peaks to group\n :param gap_threshold: Minimum gap between peaks\n :param left_extension: Extend groups by this many ns left\n :param right_extension: \" \" right\n :param max_duration: max duration time of merged peak in ns\n :return: time, endtime arrays of group boundaries\n \"\"\"\n # Mock up a \"hits\" array so we can just use the existing peakfinder\n # It doesn't work on raw peaks, since they might have different dts\n # TODO: is there no cleaner way?\n fake_hits = np.zeros(len(peaks), dtype=strax.hit_dtype)\n fake_hits['dt'] = 1\n fake_hits['area'] = 1\n fake_hits['time'] = peaks['time']\n fake_hits['length'] = strax.endtime(peaks) - peaks['time']\n # Probably int overflow\n assert np.all(fake_hits['length'] > 0), \"Attempt to create invalid hit\"\n fake_peaks = strax.find_peaks(\n fake_hits, adc_to_pe=np.ones(1),\n gap_threshold=gap_threshold,\n left_extension=left_extension, right_extension=right_extension,\n min_channels=1, min_area=0,\n max_duration=max_duration)\n return fake_peaks['time'], strax.endtime(fake_peaks)\n\n\n##\n# Lone hit integration\n##\n\[email protected](nogil=True, cache=True)\ndef _find_hit_integration_bounds(\n lone_hits, peaks, records, save_outside_hits, n_channels):\n \"\"\"\"Update lone hits to include integration bounds\n\n save_outside_hits: in ns!!\n \"\"\"\n result = np.zeros((len(lone_hits), 2), dtype=np.int64)\n if not len(lone_hits):\n return result\n\n # By default, use save_outside_hits to determine bounds\n result[:, 0] = lone_hits['time'] - save_outside_hits[0]\n result[:, 1] = strax.endtime(lone_hits) + save_outside_hits[1]\n\n NO_EARLIER_HIT = -1\n last_hit_index = np.ones(n_channels, dtype=np.int32) * NO_EARLIER_HIT\n\n n_peaks = len(peaks)\n FAR_AWAY = 9223372036_854775807 # np.iinfo(np.int64).max, April 2262\n peak_i = 0\n\n for hit_i, h in enumerate(lone_hits):\n ch = h['channel']\n\n # Find end of previous peak and start of next peak\n # (note peaks are disjoint from any lone hit, even though\n # lone hits may not be disjoint from each other)\n while peak_i < n_peaks and peaks[peak_i]['time'] < h['time']:\n peak_i += 1\n prev_p_end = strax.endtime(peaks[peak_i - 1]) if peak_i != 0 else 0\n next_p_start = peaks[peak_i]['time'] if peak_i != n_peaks else FAR_AWAY\n\n\n # Ensure we do not integrate parts of peaks\n # or (at least for now) beyond the record in which the hit was found\n r = records[h['record_i']]\n result[hit_i][0] = max(prev_p_end,\n r['time'],\n result[hit_i][0])\n result[hit_i][1] = min(next_p_start,\n strax.endtime(r),\n result[hit_i][1])\n\n if last_hit_index[ch] != NO_EARLIER_HIT:\n # Ensure previous hit does not integrate the over-threshold region\n # of this hit\n result[last_hit_index[ch]][1] = min(result[last_hit_index[ch]][1],\n h['time'])\n # Ensure this hit doesn't integrate anything the previous hit\n # already integrated\n result[hit_i][0] = max(result[last_hit_index[ch]][1],\n result[hit_i][0])\n\n last_hit_index[ch] = hit_i\n\n # Convert to index in record and store\n t0 = records[lone_hits['record_i']]['time']\n dt = records[lone_hits['record_i']]['dt']\n for hit_i, h in enumerate(lone_hits):\n h['left_integration'] = (result[hit_i, 0] - t0[hit_i]) // dt[hit_i]\n h['right_integration'] = (result[hit_i, 1] - t0[hit_i]) // dt[hit_i]\n\n\n@export\[email protected](nogil=True, cache=True)\ndef integrate_lone_hits(\n lone_hits, records, peaks, save_outside_hits, n_channels):\n \"\"\"Update the area of lone_hits to the integral in ADCcounts x samples\n\n :param lone_hits: Hits outside of peaks\n :param records: Records in which hits and peaks were found\n :param peaks: Peaks\n :param save_outside_hits: (left, right) *TIME* with wich we should extend\n the integration window of hits\n the integration region\n :param n_channels: number of channels\n\n TODO: this doesn't extend the integration range beyond record boundaries\n \"\"\"\n _find_hit_integration_bounds(\n lone_hits, peaks, records, save_outside_hits, n_channels)\n for hit_i, h in enumerate(lone_hits):\n r = records[h['record_i']]\n start, end = h['left_integration'], h['right_integration']\n # TODO: when we add amplitude multiplier, adjust this too!\n h['area'] = (\n r['data'][start:end].sum()\n + (r['baseline'] % 1) * (end - start))\n"
]
| [
[
"numpy.int8",
"numpy.ones",
"numpy.all",
"numpy.ceil",
"numpy.floor",
"numpy.zeros"
]
]
|
piyushkrmaurya/pinn | [
"76fb07567d224d9381fc4e5957137feccf44e7eb"
]
| [
"pinn/models.py"
]
| [
"import numpy as np\r\n\r\nnp.random.seed(0)\r\n\r\n\r\nclass Model:\r\n def __init__(self):\r\n self.compiled = False\r\n\r\n def __call__(self):\r\n raise NotImplementedError\r\n\r\n def compile(self, optimizer, loss):\r\n self.optimizer = optimizer\r\n self.loss_object = loss\r\n self.compiled = True\r\n\r\n def back_propagate(self):\r\n raise NotImplementedError\r\n\r\n def fit(self, train_x, train_y, epochs, intial_epoch=0, validation_data=None):\r\n raise NotImplementedError\r\n\r\n\r\nclass Sequential(Model):\r\n def __init__(self, layers):\r\n super(Sequential, self).__init__()\r\n self.layers = layers\r\n\r\n def __call__(self, inputs):\r\n outputs = inputs\r\n for layer in self.layers:\r\n layer(outputs)\r\n outputs = layer.forward()\r\n return outputs\r\n\r\n def forward(self, inputs):\r\n return self(inputs)\r\n\r\n def back_propagate(self, y):\r\n derivatives = {\"dZ\": [], \"dW\": [], \"db\": []}\r\n\r\n last_weights = None\r\n for layer in reversed(self.layers[1:]):\r\n if last_weights is None:\r\n dZ = self.loss_object.derivative(y, layer.outputs)\r\n if layer.activation is not None:\r\n dZ *= layer.activation.derivative(layer.outputs)\r\n derivatives[\"dZ\"].append(dZ)\r\n else:\r\n dZ = last_weights\r\n if layer.activation is not None:\r\n dZ *= layer.activation.derivative(layer.outputs)\r\n derivatives[\"dZ\"].append(dZ)\r\n derivatives[\"dW\"].append(np.dot(layer.inputs.T, dZ))\r\n derivatives[\"db\"].append(np.sum(dZ, axis=0))\r\n last_weights = np.dot(dZ, layer.weights)\r\n\r\n for layer, dW, db in zip(\r\n self.layers[1:], reversed(derivatives[\"dW\"]), reversed(derivatives[\"db\"])\r\n ):\r\n layer.weights -= self.optimizer.learning_rate * dW.T\r\n layer.bias -= self.optimizer.learning_rate * db.T\r\n\r\n def fit(self, train_x, train_y, epochs, intial_epoch=0, validation_data=None):\r\n if not self.compiled:\r\n raise Exception(\"Model must be compiled before calling fit\")\r\n\r\n if isinstance(train_x, np.ndarray):\r\n pass\r\n elif isinstance(train_x, list):\r\n train_x = np.array(train_x)\r\n else:\r\n raise ValueError(\"Examples must be python list or numpy array\")\r\n\r\n if isinstance(train_y, np.ndarray):\r\n pass\r\n elif isinstance(train_y, list):\r\n train_y = np.array(train_y)\r\n else:\r\n raise ValueError(\"Labels must be python list or numpy array\")\r\n\r\n for epoch in range(epochs):\r\n print(f\"Epoch {epoch+1}/{epochs}\")\r\n y_pred = self.forward(train_x)\r\n loss = self.loss_object.loss(train_y, y_pred)\r\n if train_y.shape != y_pred.shape:\r\n train_y = train_y.reshape(y_pred.shape)\r\n print(\"Loss:\", loss)\r\n self.back_propagate(train_y)\r\n\r\n"
]
| [
[
"numpy.dot",
"numpy.array",
"numpy.sum",
"numpy.random.seed"
]
]
|
aibhleog/Keck-Visiting-Scholar | [
"79205d94a97b4d452edae0599182f1e7c1bcd969"
]
| [
"engineering_time/test_cross_correlations.py"
]
| [
"#!/usr/bin/env python\n\nimport image_registration as ir # github.com/keflavich/image_registration\nimport astropy.io.fits as fits\nimport numpy as np\nd0 = fits.getdata('test.fits')\n\n# this test just makes sure that the cross_correlation_shifts is working\n# correctly for the MOSFIRE data\ndef test_cross_correlation_shifts():\n\tcorr = ir.cross_correlation_shifts(d0,d0)\n\tassert round(corr[0],1) == 0.0 and round(corr[1],1) == 0.0, \"Running cross-correlation on the same object should return an xshift and yshift that are essentially zero.\"\n\n\n# NEXT TEST\n# making fake data with NaNs and a stationary star\nfake = np.zeros((50,50))\nfake[10:14] = np.nan # simulating row of masked out signal\nfake[40:43,40:43] = 3 # stationary star\n\nfake1 = np.zeros((50,50))\nfake1[12:16] = np.nan # simulating row of masked out signal\nfake1[40:43,40:43] = 3 # stationary star\n \ndef test_NaNs_not_counted():\n corr = ir.cross_correlation_shifts(fake,fake1)\n assert round(corr[0],1) == 0.0 and round(corr[1],1) == 0.0, \"Running cross-correlation on fake data with different rows of NaNs has no effect. NaNs should not have an impact.\"\n\n \n# NEXT TEST\n# making fake data with NaNs and a shifted star\nfake2 = np.zeros((50,50))\nfake2[10:14] = np.nan # simulating row of masked out signal\nfake2[40:43,40:43] = 3 # shifted star\n\nfake3 = np.zeros((50,50))\nfake3[10:14] = np.nan # simulating row of masked out signal\nfake3[38:41,35:38] = 3 # shifted star\n\ndef test_star_NaNs():\n corr = ir.cross_correlation_shifts(fake2,fake3)\n assert round(corr[0]) == -5 and round(corr[1]) == -2, \"Running cross-correlation on fake data with same rows of NaNs and shifted star should have an effect. NaNs should not have an impact.\""
]
| [
[
"numpy.zeros"
]
]
|
cgosmeyer/photutils_plus | [
"9d97dc77c8df5302667b1aa6566df31c88467fbb"
]
| [
"photutils_plus/phot_tools.py"
]
| [
"\n\"\"\"\nModule for Python photomtery assisting functions.\n\nAuthor: \n\n C.M. Gosmeyer, March 2016\n\"\"\"\n\n\nimport getpass\nimport numpy as np\nimport os\nimport platform \nimport shutil\nimport socket\nimport time\n\nfrom astropy.io import ascii\nfrom astropy.io import fits\nfrom collections import OrderedDict\nfrom photutils_plus.meanclip import meanclip\n\n\n#-------------------------------------------------------------------------------# \n\ndef append_data_to_header(filename):\n \"\"\"Appends data to header.\n \n Parameters\n ----------\n filename : string\n Name of the file.\n\n \"\"\"\n with open(filename, \"a\") as header_file:\n data_file = open(filename + '.temp', \"r\")\n data_list = data_file.readlines()\n data_file.close()\n header_file.write('\\n')\n for line in data_list:\n if '#' not in line:\n header_file.write(line)\n\n # Delete the temp file.\n os.remove(filename+'.temp')\n\n#-------------------------------------------------------------------------------#\n\ndef circular_mask(arr_shape, r, x_offset=0, y_offset=0):\n \"\"\"Generates circular mask for 2D image.\n \n Parameters\n ----------\n arr_shape : tuple of int\n Shape of the array to use the mask.\n r : int\n Radius of the mask in pixels.\n x_offset, y_offset : int or float, optional\n Mask offset relative to image center.\n\n Returns\n -------\n Numpy indices of the mask, rounded to nearest integer.\n \n References\n ----------\n http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054470.html\n \"\"\"\n assert len(arr_shape) == 2, 'Image is not 2-D'\n \n ny, nx = arr_shape\n assert nx > 1 and ny > 1, 'Image is too small'\n \n assert isinstance(r, (int, long)) and r > 0, 'Radius must be int > 0'\n \n xcen = np.round(0.5 * nx - 0.5 + x_offset).astype('int')\n ycen = np.round(0.5 * ny - 0.5 + y_offset).astype('int')\n \n x1, x2 = xcen - r, xcen + r\n y1, y2 = ycen - r, ycen + r\n\n assert y1 >= 0 and y2 < ny and x1 >= 0 and x2 < nx, \\\n 'Mask falls outside image bounds'\n \n y, x = np.ogrid[-r:r, -r:r]\n i = np.where(x**2 + y**2 <= r**2)\n \n a = np.zeros(arr_shape).astype('bool')\n a[y1:y2, x1:x2][i] = True\n \n return np.where(a)\n\n\n#-------------------------------------------------------------------------------# \n\ndef multiple_source_mask(arr_shape, r, xcoords, ycoords):\n \"\"\" Returns an array of True and Falses.\n The True are are where the (square!) masks are applied.\n Includes edge-detection!\n\n Parameters\n ----------\n arr_shape : array\n The width and height of the image array, [w,h].\n r : float or integer\n The radius of the mask.\n xcoords : array\n Array of the x positions.\n ycoords : array\n Array of the y positions.\n\n Returns\n -------\n Array of booleans.\n\n \"\"\"\n ny, nx = arr_shape\n\n bool_array = np.zeros(arr_shape).astype('bool')\n\n for xc, yc in zip(xcoords, ycoords):\n x1, x2 = xc-r, xc+r\n y1, y2 = yc-r, xc+r\n\n # Make sure mask falls within the image. If it doesn't, trim.\n if y1 <= 0:\n y1 = 1\n elif y2 > ny:\n y2 = ny-1\n elif x1 <= 0:\n x1 = 1\n elif x2 > nx: \n x2 = nx-1\n \n bool_array[y1:y2, x1:x2] = True\n\n return np.where(bool_array)\n\n#-------------------------------------------------------------------------------# \n\ndef meanclip_bkgrd(imagename, ext=0, backmethod='mean', xcoords=[], ycoords=[],\n naxis1='', naxis2='',detector='', maskrad='', multiple_sources=False):\n \"\"\" After masking out sources and image border, calculates\n the mean clipped background.\n\n Based off of D. Hammer's script from \n `detectors.uvis_contam.ptscr_photom_uvis.run_daophot_uvis`\n \"\"\"\n # Read in FITS file.\n if '.fits' in imagename:\n # Create temporary image for bckgrd measurement that masks sources \n # out to 80 pixels (assign a very low number).\n tmp_imagename = imagename+'.back.fits'\n shutil.copy(imagename, tmp_imagename)\n hdulist = fits.open(tmp_imagename, mode='update')\n naxis1 = hdulist[0].header['naxis1']\n naxis2 = hdulist[0].header['naxis2']\n detector = hdulist[0].header['detector']\n maskim = hdulist[ext].data\n else:\n maskim = imagename\n\n\n if detector == 'IR' and maskrad == '':\n maskrad = 30\n elif detector == 'UVIS' and maskrad == '':\n maskrad = 80\n \n if multiple_sources:\n maskim[multiple_source_mask(maskim.shape, maskrad, xcoords, ycoords)] = -99999.0\n else:\n for xc, yc in zip(xcoords, ycoords):\n maskim[circular_mask(maskim.shape, maskrad, x_offset=(xc-naxis1/2.0), \\\n y_offset=(yc-naxis2/2.0))] = -99999.0\n\n # Also mask out sources with zero effective exposure \n # [WE ELIMINATE PIXELS WITHIN 20 OF IMAGE BORDER].\n maskim[:,0:20] = -99999.0\n maskim[:,-20:] = -99999.0\n maskim[0:20,:] = -99999.0\n maskim[-20:,:] = -99999.0\n\n # Generate initial guess for lower/upper limits (use 10 sigma).\n fmaskim = np.ndarray.flatten(maskim)\n llim = -100\n ulim = 10000.0\n init_median,init_rms = meanclip(fmaskim[(fmaskim > llim) & \\\n (fmaskim < ulim)], maxiter=7, \\\n return_median=1)\n llim = init_median - 10.0*init_rms\n ulim = init_median + 10.0*init_rms\n\n # Measure background and rms.\n\n if backmethod.lower() == 'mean':\n back,backrms=meanclip(fmaskim[(fmaskim > llim) & \\\n (fmaskim < ulim)], maxiter=7)\n elif backmethod.lower() == 'median':\n back,backrms = meanclip(fmaskim[(fmaskim > llim) & \\\n (fmaskim < ulim)], maxiter=7, \n return_median=1)\n elif backmethod.lower() == 'mode':\n backmean,backrms = meanclip(fmaskim[(fmaskim > llim) & \\\n (fmaskim < ulim)], maxiter=7)\n nbins = np.ceil(80.0/(0.1*backrms))\n cc,bb,pp = pylab.hist(fmaskim[(fmaskim > llim) & \\\n (fmaskim < ulim)], log=True, bins=nbins, \\\n range=(-40.0,40.0))\n back = bb[cc.argmax()] + (bb.max()-bb.min())/(2.0*(len(bb)-1))\n else:\n raise Exception('Background statistical method {} is not' + \\\n ' covered in our case list.'.format(backmethod))\n\n return back, backrms\n\n\n#-------------------------------------------------------------------------------# \n\ndef read_in_photfile(filename, isheader=True):\n \"\"\" Reads in my custom photometry file, created in \n :func:`write_out_photfile`.\n\n Parameters\n ----------\n filename : string\n Name of the file.\n isheader : {True, False}\n Set to True if the file contains a header.\n \"\"\"\n data = ascii.read(filename)\n\n if isheader:\n header = ascii.read(data.meta['comments'], delimiter='\\t',\n format='no_header', names=['key', 'val', 'units'])\n else:\n header=None\n\n return data, header\n\n\n#-------------------------------------------------------------------------------# \n\ndef parse_phot_header(header):\n \"\"\" Parses my photometry file header.\n\n Parameters\n ----------\n header : dictionary\n The header of photometry file.\n \"\"\"\n keys = header['key']\n vals = header['val']\n \n parsed_header = {}\n\n for key, val in zip(keys, vals):\n parsed_header[key.lower()] = val\n\n\n return parsed_header\n\n\n#-------------------------------------------------------------------------------# \n\ndef write_out_photfile(fileoutname, tab, current_params={}, func_name='', \n header=False, verbose=False):\n \"\"\" Writes output photometry file with all parameters listed at \n top in comments.\n\n Parameters\n ----------\n fileoutname : string\n Name of the output photometry file.\n tab : astropy.Table\n Table of the photometry outputs.\n current_params : dictionary\n The parameters used in the photometry function: ap_radii, \n centroid, etc.\n func_name : string\n Name of the photometry function used. \n header : {True, False}\n Set to True if header (and, presumably, the file) already exists.\n verbose : {True, False}\n Set to True for verbose mode.\n\n References\n ----------\n Fetching a function's parameters.\n http://stackoverflow.com/questions/582056/getting-list-of-parameter-names-inside-python-function\n\n Astropy ascii.read. See especially 'Comments and metadata' section.\n http://docs.astropy.org/en/stable/io/ascii/read.html\n\n Astropy ascii.write can't append. drrr\n https://github.com/astropy/astropy/issues/3684\n\n \"\"\"\n # If already have a header, just insert it. If not, create it.\n\n if not header:\n # Order the current_params dictionary\n current_params = OrderedDict(sorted(current_params.items(), key=lambda t: t[0]))\n\n # Create logistic values to be inserted at top of file's header.\n python_version = platform.python_version() # or sys.version\n user = getpass.getuser()\n host = socket.gethostname()\n date = time.strftime(\"%Y-%m-%d\")\n hms = time.strftime(\"%H:%M:%S\")\n\n # Define lists of logistics.\n logistic_keys = ['PYTHON', 'USER ', 'HOST ', 'DATE ', 'TIME ', 'FUNCTION']\n logistic_values = [python_version, user, host, date, hms, func_name]\n logistic_units = ['version', 'name', 'computer', 'yyyy-mm-dd', 'hh:mm:ss', 'name']\n\n\n # Open the file. \n open_file = open(fileoutname, 'w')\n if verbose:\n print(\"Opening {} to be written...\".format(open_file))\n\n # First write logistics.\n for key, val, unit in zip(logistic_keys, logistic_values, logistic_units):\n open_file.write(\"# {} \\t\\t {} \\t\\t {}\\n\".format(key, val, unit) )\n open_file.write(\"#\\n\")\n\n # Second write the finder function's parameters. \n units = np.arange(len(current_params.keys()))\n for key, val, unit in zip(current_params.keys(), current_params.values(), units):\n #print(\"# {} \\t\\t {} \\t\\t {}\\n\".format(key.upper(), val, unit))\n open_file.write(\"# {} \\t\\t {} \\t\\t {}\\n\".format(key.upper(), val, unit) )\n\n open_file.write(\"#\\n\")\n\n else:\n # If input a header, write it to file.\n # Open the file. \n open_file = open(fileoutname, 'w')\n for i in range(len(header[header.colnames[0]])):\n row_str = '# '\n for col in header[i]:\n row_str += str(col) + \"\\t\"\n row_str += \"\\n\"\n open_file.write(row_str)\n\n\n # Write out the column names of the table.\n colname_str = ''\n if verbose:\n print(\"printing colnames...\")\n for colname in tab.colnames:\n colname_str += colname + \"\\t\"\n if verbose:\n print(colname_str)\n colname_str += \"\\n\"\n\n open_file.write(colname_str)\n\n # Finally write out the table, row by row.\n if verbose:\n print('printing table row by row...')\n for i in range(len(tab[tab.colnames[0]])):\n row_str = ''\n for col in tab[i]:\n col = str(col).split(' pix')[0] # one day unit started appearing after all the coords. drr astropy tables drr\n row_str += str(col) + \"\\t\"\n row_str += \"\\n\"\n if verbose:\n print(row_str)\n open_file.write(row_str)\n\n # Close the file.\n open_file.close()\n \n\n"
]
| [
[
"numpy.ndarray.flatten",
"numpy.round",
"numpy.ceil",
"numpy.where",
"numpy.zeros"
]
]
|
Seb-Good/haifa-net | [
"d16bc8a6317778cb459ab752fcbc679912567ef9"
]
| [
"haifanet/networks/haifanet_v1.py"
]
| [
"\"\"\"\nhaifanet_v1.py\n--------------\nThis module provides a class and methods for building a convolutional neural network with tensorflow.\nBy: Sebastian D. Goodfellow, Ph.D., 2018\n\"\"\"\n\n# Compatibility imports\nfrom __future__ import absolute_import, division, print_function\n\n# 3rd party imports\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import f1_score\n\n# Local imports\nfrom haifanet.train.data_generator import DataGenerator\nfrom haifanet.networks.layers import fc_layer, conv_layer, dropout_layer, print_output_shape, max_pool_layer\n\n\nclass HaifaNetV1(object):\n\n \"\"\"\n Build the forward propagation computational graph for a WavNet inspired deep neural network.\n \"\"\"\n\n def __init__(self, length, channels, classes, seed=0):\n\n # Set input parameters\n self.length = length\n self.channels = channels\n self.classes = classes\n self.seed = seed\n\n def inference(self, input_layer, reuse, is_training, name, print_shape=True):\n \"\"\"Forward propagation of computational graph.\"\"\"\n # Check input layer dimensions\n assert input_layer.shape[1] == self.length\n assert input_layer.shape[2] == self.channels\n\n # Define a scope for reusing the variables\n with tf.variable_scope(name, reuse=reuse):\n\n # Set variables\n kernel_size = 3\n conv_filts = 128\n res_filts = 128\n skip_filts = 128\n skips = list()\n\n # Print shape\n print_output_shape(layer_name='input', net=input_layer, print_shape=print_shape)\n\n \"\"\"Block Series 1\"\"\"\n # --- Layer 1 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_1'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n\n # Convolution\n net = conv_layer(input_layer=input_layer, kernel_size=kernel_size, strides=1, dilation_rate=1,\n filters=res_filts, padding='SAME', activation=None, use_bias=False,\n name=layer_name + '_conv', seed=self.seed)\n\n # Max pool\n net = max_pool_layer(input_layer=net, pool_size=3, strides=2, padding='SAME',\n name=layer_name + '_maxpool')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 2 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_2'\n\n # Compute block\n outputs = self._residual_block(input_layer=net, kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=2, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 3 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_3'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=4, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 4 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_4'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=8, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 5 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_5'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=16, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 6 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_6'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=32, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 7 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_7'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=64, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 8 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_8'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=128, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 9 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_9'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=256, res=True, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_res', net=outputs['res'], print_shape=print_shape)\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # --- Layer 10 (Convolution) ----------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'layer_10'\n\n # Compute block\n outputs = self._residual_block(input_layer=outputs['res'], kernel_size=kernel_size, layer_name=layer_name,\n conv_filts=conv_filts, res_filts=res_filts, skip_filts=skip_filts,\n dilation_rate=512, res=False, skip=True)\n\n # Collect skip\n skips.append(outputs['skip'])\n\n # Print shape\n print_output_shape(layer_name=layer_name + '_skip', net=outputs['skip'], print_shape=print_shape)\n\n # Add all skips to res output\n with tf.variable_scope('skips'):\n output = tf.add_n(inputs=skips, name='add_skips')\n\n # Print shape\n print_output_shape(layer_name='output_skip_addition', net=output, print_shape=print_shape)\n\n # Activation\n with tf.variable_scope('relu') as scope:\n output = tf.nn.relu(output, name=scope.name)\n\n # Dropout\n output = dropout_layer(input_layer=output, drop_rate=0.3, seed=self.seed,\n training=is_training, name='dropout1')\n\n # Convolution\n output = conv_layer(input_layer=output, kernel_size=kernel_size, strides=1, dilation_rate=1,\n filters=256, padding='SAME', activation=tf.nn.relu, use_bias=False,\n name='conv1', seed=self.seed)\n\n # Dropout\n output = dropout_layer(input_layer=output, drop_rate=0.3, seed=self.seed,\n training=is_training, name='dropout1')\n\n # Print shape\n print_output_shape(layer_name='output_conv1', net=output, print_shape=print_shape)\n\n # Convolution\n output = conv_layer(input_layer=output, kernel_size=kernel_size, strides=1, dilation_rate=1,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=False,\n name='conv2', seed=self.seed)\n\n # Dropout\n output = dropout_layer(input_layer=output, drop_rate=0.3, seed=self.seed,\n training=is_training, name='dropout1')\n\n # Print shape\n print_output_shape(layer_name='output_conv2', net=output, print_shape=print_shape)\n\n \"\"\"Network Output\"\"\"\n # --- Global Average Pooling Layer ----------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'gap'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n\n # Reduce mean along dimension 1\n gap = tf.reduce_mean(input_tensor=output, axis=1)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=gap, print_shape=print_shape)\n\n # --- Softmax Layer -------------------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'logits'\n\n # Softmax activation\n logits = fc_layer(input_layer=gap, neurons=self.classes, activation=None, use_bias=False,\n name=layer_name, seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=logits, print_shape=print_shape)\n\n # Compute Class Activation Maps\n cams = self._get_cams(net=output, is_training=is_training)\n\n return logits, cams\n\n def _residual_block(self, input_layer, kernel_size, layer_name, conv_filts, res_filts,\n skip_filts, dilation_rate, res=True, skip=True):\n \"\"\"Wavenet residual block.\"\"\"\n # Set layer scope\n with tf.variable_scope(layer_name):\n\n # Outputs dictionary\n outputs = dict()\n\n # Convolution tanh\n conv_filt = conv_layer(input_layer=input_layer, kernel_size=kernel_size, strides=1,\n dilation_rate=dilation_rate, filters=conv_filts, padding='SAME',\n activation=tf.nn.tanh, use_bias=False, name=layer_name + '_conv_filt',\n seed=self.seed)\n\n # Convolution sigmoid\n conv_gate = conv_layer(input_layer=input_layer, kernel_size=kernel_size, strides=1,\n dilation_rate=dilation_rate, filters=conv_filts, padding='SAME',\n activation=tf.nn.sigmoid, use_bias=False, name=layer_name + '_conv_gate',\n seed=self.seed)\n\n # Combine activations\n with tf.variable_scope('gate') as scope:\n activation = tf.multiply(conv_filt, conv_gate, name=scope.name)\n\n # Residual\n if res:\n # Convolution\n outputs['res'] = conv_layer(input_layer=activation, kernel_size=1, strides=1,\n dilation_rate=dilation_rate, filters=res_filts, padding='SAME',\n activation=None, use_bias=False, name=layer_name + '_conv_res',\n seed=self.seed)\n\n # Add identity\n outputs['res'] = tf.add(outputs['res'], input_layer, name=layer_name + '_add_identity')\n\n # Skip\n if skip:\n # Convolution\n outputs['skip'] = conv_layer(input_layer=activation, kernel_size=1, strides=1,\n dilation_rate=dilation_rate, filters=skip_filts, padding='SAME',\n activation=None, use_bias=False, name=layer_name + '_conv_skip',\n seed=self.seed)\n\n return outputs\n\n def _get_cams(self, net, is_training):\n \"\"\"Collect class activation maps (CAMs).\"\"\"\n # Empty list for class activation maps\n cams = list()\n\n # Compute class activation map\n # with tf.variable_scope('cam', reuse=tf.AUTO_REUSE):\n if is_training is not None:\n for label in range(self.classes):\n cams.append(self._compute_cam(net=net, label=label))\n\n return tf.concat(cams, axis=2)\n\n def _compute_cam(self, net, label):\n \"\"\"Compute class activation map (CAM) for specified label.\"\"\"\n # Compute logits weights\n weights = self._get_logit_weights(net=net, label=label)\n\n # Compute class activation map\n cam = tf.matmul(net, weights)\n\n return cam\n\n def _get_logit_weights(self, net, label):\n \"\"\"Get logits weights for specified label.\"\"\"\n # Get number of filters in the final output\n num_filters = int(net.shape[-1])\n\n with tf.variable_scope('logits', reuse=True):\n weights = tf.gather(tf.transpose(tf.get_variable('kernel')), label)\n weights = tf.reshape(weights, [-1, num_filters, 1])\n\n # Reshape weights\n weights = self._reshape_logit_weights(net=net, weights=weights)\n\n return weights\n\n @staticmethod\n def _reshape_logit_weights(net, weights):\n \"\"\"Reshape logits shapes to batch size for multiplication with net output.\"\"\"\n return tf.tile(input=weights, multiples=[tf.shape(net)[0], 1, 1])\n\n def create_placeholders(self):\n \"\"\"Creates place holders: waveform and label.\"\"\"\n with tf.variable_scope('waveform') as scope:\n waveform = tf.placeholder(dtype=tf.float32, shape=[None, self.length, self.channels], name=scope.name)\n\n with tf.variable_scope('label') as scope:\n label = tf.placeholder(dtype=tf.int32, shape=[None], name=scope.name)\n\n return waveform, label\n\n def create_generator(self, path, mode, batch_size):\n \"\"\"Create data generator graph operation.\"\"\"\n return DataGenerator(path=path, mode=mode, shape=[self.length, self.channels],\n batch_size=batch_size, prefetch_buffer=1500, seed=0, num_parallel_calls=32)\n\n @staticmethod\n def compute_accuracy(logits, labels):\n \"\"\"Computes the model accuracy for set of logits and labels.\"\"\"\n with tf.variable_scope('accuracy'):\n return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.cast(labels, tf.int64)), 'float'))\n\n def compute_f1(self, logits, labels):\n \"\"\"Computes the model f1 score for set of logits and labels.\"\"\"\n with tf.variable_scope('f1'):\n\n # Get prediction\n predictions = tf.cast(tf.argmax(logits, axis=1), tf.int32)\n\n # Get label\n labels = tf.cast(labels, tf.int32)\n\n return tf.py_func(func=self._compute_f1, inp=[predictions, labels], Tout=[tf.float64])\n\n @staticmethod\n def _compute_f1(predictions, labels):\n \"\"\"Compute the mean f1 score.\"\"\"\n return np.mean(f1_score(labels, predictions, labels=[0, 1, 2, 3], average=None)[0:3])\n"
]
| [
[
"tensorflow.nn.relu",
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.get_variable",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.add",
"sklearn.metrics.f1_score",
"tensorflow.variable_scope",
"tensorflow.argmax",
"tensorflow.add_n",
"tensorflow.py_func"
]
]
|
xpertdev/NeoFinRL | [
"e36958fd832c9ef972b0b6d742ebf5c4f4d14957"
]
| [
"finrl_meta/data_processors/processor_tusharepro.py"
]
| [
"\nfrom email.policy import default\nimport numpy as np\nimport pandas as pd\nimport tushare as ts\nfrom tqdm import tqdm\nfrom stockstats import StockDataFrame as Sdf\nimport stockstats\nfrom finrl_meta.data_processors.basic_processor import BasicProcessor\nfrom typing import List\nimport time\nimport copy\nimport warnings\nfrom talib.abstract import CCI, DX, MACD, RSI\nfrom copy import deepcopy\nwarnings.filterwarnings(\"ignore\")\n\nclass TushareProProcessor(BasicProcessor):\n \"\"\"Provides methods for retrieving daily stock data from tusharepro API\n Attributes\n ----------\n start_date : str\n start date of the data\n end_date : str\n end date of the data\n ticker_list : list\n a list of stock tickers \n token : str\n get from https://waditu.com/ after registration\n adj: str\n Whether to use adjusted closing price. Default is None. \n If you want to use forward adjusted closing price or 前复权. pleses use 'qfq'\n If you want to use backward adjusted closing price or 后复权. pleses use 'hfq'\n Methods\n -------\n download_data()\n Fetches data from tusharepro API\n \n \"\"\"\n def __init__(self, data_source: str, **kwargs):\n BasicProcessor.__init__(self, data_source, **kwargs)\n if 'token' not in kwargs.keys() :\n raise ValueError(\"pleses input token!\")\n self.token=kwargs[\"token\"]\n if 'adj' in kwargs.keys() :\n self.adj=kwargs[\"adj\"]\n print(f\"Using {self.adj} method\")\n else:\n self.adj=None\n \n \n def get_data(self,id) -> pd.DataFrame: \n dfb = ts.pro_bar(ts_code=id, start_date=self.start,end_date=self.end,adj=self.adj)\n #df1 = ts.pro_bar(ts_code=id, start_date=self.start_date,end_date='20180101')\n #dfb=pd.concat([df, df1], ignore_index=True)\n #print(dfb.shape)\n return dfb\n\n def download_data(self, ticker_list: List[str], start_date: str, end_date: str, time_interval: str) \\\n -> pd.DataFrame:\n \"\"\"Fetches data from tusharepro API\n Parameters\n ----------\n Returns\n -------\n `pd.DataFrame`\n 7 columns: A tick symbol, date, open, high, low, close and volume \n for the specified stock ticker\n \"\"\"\n self.ticker_list = ticker_list\n self.start = start_date\n self.end = end_date\n self.time_interval = time_interval\n\n if self.time_interval!=\"1D\":\n raise ValueError('not supported currently')\n \n ts.set_token(self.token)\n \n self.df=pd.DataFrame()\n for i in tqdm(self.ticker_list,total=len(self.ticker_list)):\n df_temp=self.get_data(i)\n self.df=self.df.append(df_temp)\n #print(\"{} ok\".format(i))\n time.sleep(0.25)\n \n self.df.columns=['tic','date','open','high','low','close','pre_close','change','pct_chg','volume','amount']\n self.df = self.df.sort_values(by=['date','tic']).reset_index(drop=True)\n \n df=self.df[['tic', 'date' , 'open' , 'high' , 'low' , 'close' , 'volume' ]]\n df[\"date\"]= pd.to_datetime(df[\"date\"],format=\"%Y%m%d\")\n df[\"day\"] = df[\"date\"].dt.dayofweek \n df[\"date\"] = df.date.apply(lambda x: x.strftime(\"%Y-%m-%d\"))\n \n df = df.dropna()\n df = df.sort_values(by=['date','tic']).reset_index(drop=True)\n\n print(\"Shape of DataFrame: \", df.shape)\n\n return df\n\n def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:\n dfc=copy.deepcopy(df)\n \n dfcode=pd.DataFrame(columns=['tic'])\n dfdate=pd.DataFrame(columns=['date'])\n\n dfcode.tic=dfc.tic.unique()\n \n if \"time\" in dfc.columns.values.tolist():\n dfc = dfc.rename(columns={'time': 'date'})\n \n dfdate.date=dfc.date.unique()\n dfdate.sort_values(by=\"date\",ascending=False,ignore_index=True,inplace=True)\n \n # the old pandas may not support pd.merge(how=\"cross\")\n try: \n df1=pd.merge(dfcode,dfdate,how=\"cross\")\n except:\n print(\"Please wait for a few seconds...\")\n df1=pd.DataFrame(columns=[\"tic\",\"date\"])\n for i in range(dfcode.shape[0]):\n for j in range(dfdate.shape[0]):\n df1=df1.append(pd.DataFrame(data={\"tic\":dfcode.iat[i,0], \"date\":dfdate.iat[j,0]},index=[(i+1)*(j+1)-1]))\n \n df2=pd.merge(df1,dfc,how=\"left\",on=[\"tic\",\"date\"])\n \n\n # back fill missing data then front fill\n df3=pd.DataFrame(columns=df2.columns)\n for i in self.ticker_list:\n df4=df2[df2.tic==i].fillna(method=\"bfill\").fillna(method=\"ffill\")\n df3=pd.concat([df3, df4], ignore_index=True)\n\n df3=df3.fillna(0)\n\n # reshape dataframe\n df3 = df3.sort_values(by=['date','tic']).reset_index(drop=True)\n\n print(\"Shape of DataFrame: \", df3.shape)\n\n return df3\n\n def add_technical_indicator(self, data: pd.DataFrame, tech_indicator_list: List[str], use_stockstats: bool=True) \\\n -> pd.DataFrame:\n \"\"\"\n calculate technical indicators\n use stockstats/talib package to add technical inidactors\n :param data: (df) pandas dataframe\n :return: (df) pandas dataframe\n \"\"\"\n df = data.copy()\n if \"date\" in df.columns.values.tolist():\n df = df.rename(columns={'date': 'time'})\n \n if self.data_source == \"ccxt\":\n df = df.rename(columns={'index': 'time'})\n\n # df = df.reset_index(drop=False)\n # df = df.drop(columns=[\"level_1\"])\n # df = df.rename(columns={\"level_0\": \"tic\", \"date\": \"time\"})\n if use_stockstats: # use stockstats\n stock = stockstats.StockDataFrame.retype(df.copy())\n unique_ticker = stock.tic.unique()\n #print(unique_ticker)\n for indicator in tech_indicator_list:\n indicator_df = pd.DataFrame()\n for i in range(len(unique_ticker)):\n try:\n temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]\n temp_indicator = pd.DataFrame(temp_indicator)\n temp_indicator[\"tic\"] = unique_ticker[i]\n temp_indicator[\"time\"] = df[df.tic == unique_ticker[i]][\n \"time\"\n ].to_list()\n indicator_df = indicator_df.append(\n temp_indicator, ignore_index=True\n )\n except Exception as e:\n print(e)\n #print(indicator_df)\n df = df.merge(\n indicator_df[[\"tic\", \"time\", indicator]], on=[\"tic\", \"time\"], how=\"left\"\n )\n else: # use talib\n final_df = pd.DataFrame()\n for i in df.tic.unique():\n tic_df = df[df.tic == i]\n tic_df['macd'], tic_df['macd_signal'], tic_df['macd_hist'] = MACD(tic_df['close'], fastperiod=12,\n slowperiod=26, signalperiod=9)\n tic_df['rsi'] = RSI(tic_df['close'], timeperiod=14)\n tic_df['cci'] = CCI(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)\n tic_df['dx'] = DX(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)\n final_df = final_df.append(tic_df)\n df = final_df\n\n df = df.sort_values(by=[\"time\", \"tic\"])\n df = df.rename(columns={'time': 'date'}) # 1/11 added by hx\n df = df.dropna()\n print(\"Succesfully add technical indicators\")\n return df\n\n def get_trading_days(self, start: str, end: str) -> List[str]:\n print('not supported currently!')\n return ['not supported currently!']\n \n def add_turbulence(self, data: pd.DataFrame) \\\n -> pd.DataFrame:\n print('not supported currently!')\n return pd.DataFrame(['not supported currently!'])\n\n def calculate_turbulence(self, data: pd.DataFrame, time_period: int = 252) \\\n -> pd.DataFrame:\n print('not supported currently!')\n return pd.DataFrame(['not supported currently!'])\n \n def add_vix(self, data: pd.DataFrame) \\\n -> pd.DataFrame:\n print('not supported currently!')\n return pd.DataFrame(['not supported currently!'])\n\n # def df_to_array(self, df: pd.DataFrame, tech_indicator_list: List[str], if_vix: bool) \\\n # -> List[np.array]:\n # print('not supported currently!')\n # return pd.DataFrame(['not supported currently!'])\n\n def data_split(self, df, start, end, target_date_col=\"date\"):\n \"\"\"\n split the dataset into training or testing using date\n :param data: (df) pandas dataframe, start, end\n :return: (df) pandas dataframe\n \"\"\"\n data = df[(df[target_date_col] >= start) & (df[target_date_col] < end)]\n data = data.sort_values([target_date_col, \"tic\"], ignore_index=True)\n data.index = data[target_date_col].factorize()[0]\n return data\n\n\nimport tushare as ts\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nclass ReturnPlotter:\n \"\"\"\n An easy-to-use plotting tool to plot cumulative returns over time.\n Baseline supports equal weighting(default) and any stocks you want to use for comparison.\n \"\"\"\n def __init__(self, df_account_value, df_trade, start_date, end_date):\n self.start = start_date\n self.end = end_date\n self.trade = df_trade\n self.df_account_value = df_account_value\n \n def get_baseline(self, ticket):\n df = ts.get_hist_data(ticket, start=self.start, end=self.end)\n df.loc[:,'dt']=df.index\n df.index=range(len(df))\n df.sort_values(axis=0,by='dt',ascending=True,inplace=True)\n df[\"date\"] = pd.to_datetime(df[\"dt\"],format='%Y-%m-%d')\n return df\n\n def plot(self, baseline_ticket=None):\n \"\"\"\n Plot cumulative returns over time.\n use baseline_ticket to specify stock you want to use for comparison\n (default: equal weighted returns)\n \"\"\"\n baseline_label = \"Equal-weight portfolio\"\n tic2label = {\"399300\": \"CSI 300 Index\", \"000016\": \"SSE 50 Index\"}\n if baseline_ticket:\n # 使用指定ticket作为baseline\n baseline_df = self.get_baseline(baseline_ticket)\n baseline_df = baseline_df[baseline_df.dt != \"2020-06-26\"] # ours don't have date==\"2020-06-26\"\n baseline = baseline_df.close.tolist()\n if baseline_ticket in tic2label.keys():\n baseline_label = tic2label[baseline_ticket]\n else:\n baseline_label = baseline_ticket\n else:\n # 均等权重\n all_date = self.trade.date.unique().tolist()\n baseline = []\n for day in all_date:\n day_close = self.trade[self.trade[\"date\"]==day].close.tolist()\n avg_close = sum(day_close)/len(day_close)\n baseline.append(avg_close)\n\n ours = self.df_account_value.account_value.tolist()\n ours = self.pct(ours)\n baseline = self.pct(baseline)\n\n days_per_tick = 60 # you should scale this variable accroding to the total trading days\n time = list(range(len(ours)))\n datetimes = self.df_account_value.date.tolist()\n ticks = []\n for t, tick in zip(time, datetimes):\n if t % days_per_tick == 0: ticks.append(tick)\n\n plt.title(\"Cumulative Returns\")\n plt.plot(time, ours, label=\"DDPG Agent\", color=\"green\")\n plt.plot(time, baseline, label=baseline_label, color=\"grey\") \n plt.xticks([i*days_per_tick for i in range(len(ticks))], ticks, fontsize=7)\n\n plt.xlabel(\"Date\")\n plt.ylabel(\"Cumulative Return\")\n\n plt.legend()\n plt.show()\n\n def plot_all(self):\n baseline_label = \"Equal-weight portfolio\"\n tic2label = {\"399300\": \"CSI 300 Index\", \"000016\": \"SSE 50 Index\"}\n \n # 399300\n baseline_ticket = \"399300\"\n baseline_df = self.get_baseline(baseline_ticket)\n baseline_df = baseline_df[baseline_df.dt != \"2020-06-26\"] # ours don't have date==\"2020-06-26\"\n baseline_300 = baseline_df.close.tolist()\n baseline_label_300 = tic2label[baseline_ticket]\n\n # 000016\n baseline_ticket = \"000016\"\n baseline_df = self.get_baseline(baseline_ticket)\n baseline_df = baseline_df[baseline_df.dt != \"2020-06-26\"] # ours don't have date==\"2020-06-26\"\n baseline_50 = baseline_df.close.tolist()\n baseline_label_50 = tic2label[baseline_ticket]\n\n # 均等权重\n all_date = self.trade.date.unique().tolist()\n baseline_equal_weight = []\n for day in all_date:\n day_close = self.trade[self.trade[\"date\"]==day].close.tolist()\n avg_close = sum(day_close)/len(day_close)\n baseline_equal_weight.append(avg_close)\n\n ours = self.df_account_value.account_value.tolist()\n\n ours = self.pct(ours)\n baseline_300 = self.pct(baseline_300)\n baseline_50 = self.pct(baseline_50)\n baseline_equal_weight = self.pct(baseline_equal_weight)\n\n days_per_tick = 60 # you should scale this variable accroding to the total trading days\n time = list(range(len(ours)))\n datetimes = self.df_account_value.date.tolist()\n ticks = []\n for t, tick in zip(time, datetimes):\n if t % days_per_tick == 0: ticks.append(tick)\n\n plt.title(\"Cumulative Returns\")\n plt.plot(time, ours, label=\"DDPG Agent\", color=\"darkorange\")\n plt.plot(time, baseline_equal_weight, label=baseline_label, color=\"cornflowerblue\") # equal weight\n plt.plot(time, baseline_300, label=baseline_label_300, color=\"lightgreen\") # 399300\n plt.plot(time, baseline_50, label=baseline_label_50, color=\"silver\") # 000016\n plt.xlabel(\"Date\")\n plt.ylabel(\"Cumulative Return\")\n\n plt.xticks([i*days_per_tick for i in range(len(ticks))], ticks, fontsize=7)\n plt.legend()\n plt.show()\n\n\n def pct(self, l):\n \"\"\"Get percentage\"\"\"\n base = l[0]\n return [x/base for x in l]\n\n def get_return(self, df, value_col_name=\"account_value\"):\n df = deepcopy(df)\n df[\"daily_return\"] = df[value_col_name].pct_change(1)\n df[\"date\"] = pd.to_datetime(df[\"date\"],format='%Y-%m-%d')\n df.set_index(\"date\", inplace=True, drop=True)\n df.index = df.index.tz_localize(\"UTC\")\n return pd.Series(df[\"daily_return\"], index=df.index)"
]
| [
[
"matplotlib.pyplot.legend",
"pandas.merge",
"pandas.to_datetime",
"pandas.concat",
"pandas.Series",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
PaccMann/paccmann_polymer | [
"3b39536fcde2f71ff76f822849f094489907e58b"
]
| [
"paccmann_polymer/topologically_regularized_models/experiments/cora/run_vae.py"
]
| [
"\"\"\"Most of the code of this script has been adapted from the original repo\nof pytorch-geometric: \nhttps://github.com/rusty1s/pytorch_geometric/blob/master/examples/infomax.py\n\"\"\"\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch_geometric.datasets import Planetoid\nfrom torch_geometric.nn import GCNConv, DeepGraphInfomax\nfrom paccmann_polymer.topologically_regularized_models.baselines.cora.vae \\\n import VAE, GCVAE\n\nimport networkx as nx\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom torch_geometric.utils import k_hop_subgraph\nfrom torch_geometric.data import (\n Data, ClusterData, NeighborSampler, DataLoader, ClusterLoader\n)\nfrom paccmann_polymer.topologically_regularized_models.baselines.cora.utils \\\n import load_data\n\nimport argparse\n\n\ndef train(model, loader, optimizer, writer):\n model.train()\n # loader = NeighborSampler(data.edge_index, sizes=[10])\n # loader = DataLoader(subsampled_data, batch_size=1)\n for _iter, batch in enumerate(loader):\n optimizer.zero_grad()\n recon, mu, logvar, z = model(batch.x)\n\n # Compute distances\n graph = nx.from_edgelist(batch.edge_index.T.tolist())\n graph.add_nodes_from(list(range(len(batch.x))))\n dists = nx.floyd_warshall_numpy(graph)\n dists[np.isinf(dists)] = 0\n\n loss, (BCE, KLD,\n LRG) = model.loss(recon, batch.x, mu, logvar, z, dists)\n loss.backward()\n optimizer.step()\n writer.add_scalar('loss', loss.item(), epoch * len(loader) + _iter)\n writer.add_scalar('loss_rec', BCE.item(), epoch * len(loader) + _iter)\n writer.add_scalar('loss_KL', KLD.item(), epoch * len(loader) + _iter)\n writer.add_scalar(\n 'loss_graph', LRG.item(),\n epoch * len(loader) + _iter\n )\n\n return loss.item()\n\n\ndef test(model, data):\n model.eval()\n _, mu, _, z = model(data.x)\n acc = model.test(\n mu[data.train_mask],\n data.y[data.train_mask],\n mu[data.test_mask],\n data.y[data.test_mask],\n max_iter=150\n )\n return acc\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Cora dataset on VAE')\n parser.add_argument(\n 'dataset',\n type=str,\n choices=['Cora', 'CiteSeer', 'PubMed'],\n help='Which dataset to use'\n )\n\n args = parser.parse_args()\n dataset_name = args.dataset\n\n for name in ['gc-vae', 'gc-vae-strong']:\n graph_loss_contrib = {'vae': 0, 'gc-vae': 1, 'gc-vae-strong': 10}[name]\n\n writer = SummaryWriter(f'logs_{dataset_name}/{name}')\n\n path = os.path.join('.', 'data', dataset_name)\n dataset = Planetoid(path, dataset_name)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = GCVAE(dataset.num_features,\n graph_scale=graph_loss_contrib).to(device)\n\n data = dataset[0].to(device)\n batched_data = load_data(data)\n loader = DataLoader(batched_data)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n for epoch in range(1, 301):\n loss = train(model, loader, optimizer, writer)\n print('Epoch: {:03d}, Loss: {:.4f}'.format(epoch, loss))\n acc = test(model, data)\n print(f'{dataset_name}\\t{name}\\tAccuracy: {acc}')\n"
]
| [
[
"numpy.isinf",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
]
|
imsb-uke/podometric_u_net | [
"a33afcc186d618889df73c7ab2941dfbb63574ac"
]
| [
"network/image_generator.py"
]
| [
"import os\nimport numpy as np\nimport random\nfrom skimage.io import imread, imsave, imshow, show\nfrom skimage.transform import resize, rescale, rotate, warp, SimilarityTransform\nfrom skimage.exposure import rescale_intensity, equalize_hist\nfrom skimage.util import crop\nimport time\n\nfrom network.dataset.image_loading import load_image, load_mask\nfrom network.dataset.image_preprocessing import preprocess_image\n\n\n# def data_preprocessing(image, imagename, mask, maskname, target_img_shape, number_msk_channels, rescale,\n# contrast_stretch, histogram_equalization, do_data_augm, **data_augm_args):\n# # resize\n# # Resize the image if the image axis are larger than target_img_shape[0], ...[1]\n# if image.shape[0] == image.shape[1] and image.shape[0] > target_img_shape[0]:\n# # Scale the image down by a factor s\n# s = float(target_img_shape[0] / image.shape[0])\n# print(\"scale factor: \", s)\n# image = resize(image, (target_img_shape[0], target_img_shape[1]), anti_aliasing=True, preserve_range=True)\n# elif image.shape[0] > target_img_shape[0] or image.shape[1] > target_img_shape[1]:\n# # Find the larger axis. Find the factor s to scale to x_dim.\n# # Apply it for both dimensions\n# if image.shape[0] > image.shape[1]:\n# s = float(target_img_shape[0] / image.shape[0])\n# image = resize(image, (target_img_shape[0], round(s * image.shape[1])), preserve_range=True)\n# else:\n# s = float(target_img_shape[1] / image.shape[1])\n# print(\"scale factor: \", s)\n# image = resize(image, (round(s * image.shape[0]), target_img_shape[1]), preserve_range=True)\n#\n# # same thing for masks\n# if mask.shape[0] == mask.shape[1] and mask.shape[0] > target_img_shape[0]:\n# s = float(target_img_shape[0] / mask.shape[0])\n# if len(mask.shape) == 4:\n# mask = mask[:, :, :, 0]\n# mask = resize(mask, (target_img_shape[0], target_img_shape[1], mask.shape[2]), anti_aliasing=True, preserve_range=True)\n# elif mask.shape[0] > target_img_shape[0] or mask.shape[1] > target_img_shape[1]:\n# if mask.shape[0] > mask.shape[1]:\n# s = float(target_img_shape[0] / mask.shape[0])\n# if len(mask.shape) == 4:\n# mask = mask[:, :, :, 0]\n# mask = resize(mask, (target_img_shape[0], round(s * mask.shape[1]), mask.shape[2]), preserve_range=True)\n# else:\n# s = float(target_img_shape[1] / mask.shape[1])\n# # BUG: ValueError: len(output_shape) cannot be smaller than the image dimensions\n# # 1021, 1024 vs (2029, 2034, 2, 3)\n# if len(mask.shape) == 4:\n# mask = mask[:, :, :, 0]\n# mask = resize(mask, (round(s * mask.shape[0]), target_img_shape[1], mask.shape[2]), preserve_range=True)\n# elif len(mask.shape) == 4:\n# mask = mask[:, :, :, 0]\n#\n#\n# # Fill the border regions with 0 and put the image to the middle\n# if image.shape[0] < target_img_shape[0] or image.shape[1] < target_img_shape[1]:\n# new_image = np.zeros((target_img_shape[0], target_img_shape[1], target_img_shape[2]))\n# x_pad = int((target_img_shape[0] - image.shape[0]) / 2)\n# y_pad = int((target_img_shape[1] - image.shape[1]) / 2)\n# # print(x_pad,y_pad)\n# paste(new_image, image, (x_pad, y_pad))\n# image = new_image\n# # z-dim is the number of msk channels\n# # new_image = np.zeros((target_img_shape[0], target_img_shape[1], number_msk_channels))\n# # paste(new_image, mask, (x_pad, y_pad))\n# # mask = new_image\n#\n# if mask.shape[0] < target_img_shape[0] or mask.shape[1] < target_img_shape[1]:\n# new_image = np.zeros((target_img_shape[0], target_img_shape[1], mask.shape[2]))\n# # WTH soll das hier sein? Zweimal genau das gleiche, also wird x_pad immer 0\n# # wenn target_img_hsape with new_image.shape verglichen wird => muss mask shape sein\n# x_pad = int((target_img_shape[0] - mask.shape[0]) / 2)\n# y_pad = int((target_img_shape[1] - mask.shape[1]) / 2)\n# # print(x_pad,y_pad)\n# paste(new_image, mask, (x_pad, y_pad))\n# mask = new_image\n#\n# # rescale\n# if rescale == \"min_max\":\n# # Norm image values to 0 to 1\n# smooth = 1.\n# image = (image - np.min(image) + smooth) / (np.max(image) + np.min(image) + smooth)\n# mask = mask / 255\n# else:\n# # Norm image values according to the data format\n# image = image / rescale\n# mask = mask / 255\n#\n# # contrast stretching\n# if contrast_stretch:\n# image = contrast_stretch_image(image)\n#\n# # histogram equalization\n# if histogram_equalization:\n# image = histo_equalize_image(image)\n#\n# # data_augm\n# if do_data_augm:\n# image, mask = data_augmentation(image, imagename, mask, maskname, **data_augm_args)\n# return image, mask\n#\n#\n# def data_preprocessing_unsupervised(image, imagename, target_img_shape, number_msk_channels, rescale,\n# contrast_stretch, histogram_equalization, do_data_augm, **data_augm_args):\n# # resize\n# # Resize the image if the image axis are larger than target_img_shape[0], ...[1]\n# if image.shape[0] == image.shape[1] and image.shape[0] > target_img_shape[0]:\n# # Scale the image down by a factor s\n# s = float(target_img_shape[0] / image.shape[0])\n# print(\"scale factor: \", s)\n# image = resize(image, (target_img_shape[0], target_img_shape[1]), anti_aliasing=True, preserve_range=True)\n# elif image.shape[0] > target_img_shape[0] or image.shape[1] > target_img_shape[1]:\n# # Find the larger axis. Find the factor s to scale to x_dim.\n# # Apply it for both dimensions\n# if image.shape[0] > image.shape[1]:\n# s = float(target_img_shape[0] / image.shape[0])\n# print(\"scale factor: \", s)\n# image = resize(image, (target_img_shape[0], round(s * image.shape[1])), preserve_range=True)\n# else:\n# s = float(target_img_shape[1] / image.shape[1])\n# print(\"scale factor: \", s)\n# try:\n# image = resize(image, (round(s * image.shape[0]), target_img_shape[1]), preserve_range=True)\n# except TypeError:\n# print('TypeError: int object is not subscriptable')\n# print('The type for image is: ' + str(type(image)))\n# print('The type for image.size is: ' + str(type(image.size)))\n# print('The type for image.shape is: ' + str(type(image.shape)))\n# print('The type for target_img_shape is: ' + str(type(target_img_shape)))\n# print('The type for s is: ' + str(type(s)))\n# # Fill the border regions with 0 and put the image to the middle\n# if image.shape[0] < target_img_shape[0] or image.shape[1] < target_img_shape[1]:\n# new_image = np.zeros((target_img_shape[0], target_img_shape[1], target_img_shape[2]))\n# x_pad = int((target_img_shape[0] - new_image.shape[0]) / 2)\n# y_pad = int((target_img_shape[1] - new_image.shape[1]) / 2)\n# # print(x_pad,y_pad)\n# paste(new_image, image, (x_pad, y_pad))\n# image = new_image\n#\n# # rescale\n# if rescale == \"min_max\":\n# # Norm image values to 0 to 1\n# smooth = 1.\n# image = (image - np.min(image) + smooth) / (np.max(image) + np.min(image) + smooth)\n# else:\n# # Norm image values according to the data format\n# image = image / rescale\n#\n# # contrast stretching\n# if contrast_stretch:\n# image = contrast_stretch_image(image)\n#\n# # histogram equalization\n# if histogram_equalization:\n# image = histo_equalize_image(image)\n#\n# # data_augm\n# if do_data_augm:\n# raise ValueError(\"The function data_augmentation has not been implemented for the unsupervised task.\"\n# \"Do you need it?\")\n# # image, mask = data_augmentation(image, imagename, mask, maskname, **data_augm_args)\n# return image\n#\n#\n# def contrast_stretch_image(img, perc=99.8):\n# # Apply it on each channel\n# for channel in range(0, img.shape[2]):\n# p_lower, p_upper = np.percentile(img[:, :, channel], (100 - perc, perc))\n# img[:, :, channel] = rescale_intensity(img[:, :, channel], in_range=(p_lower, p_upper))\n# return img\n#\n#\n# def histo_equalize_image(img):\n# for channel in range(0, img.shape[2]):\n# img[:, :, channel] = equalize_hist(img[:, :, channel])\n# return img\n#\n#\n# def shifting(image, x_shift, y_shift):\n# tform = SimilarityTransform(translation=(x_shift, y_shift))\n# shifted_image = warp(image, tform, mode='constant', cval=0)\n# return shifted_image\n#\n#\n# def flipping(image, flip_horizontal, flip_vertical):\n# # Do horizontal and/or vertical flipping\n# if flip_horizontal:\n# image = np.flip(image, axis=1)\n# if flip_vertical:\n# image = np.flip(image, axis=0)\n# return image\n#\n#\n# def rotation(image, rotation_degree):\n# image = rotate(image, rotation_degree)\n# return image\n#\n#\n# def zoom(image, x_dim, y_dim, zooming_factor):\n# if len(image.shape) == 3:\n# rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=True)\n# else:\n# rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=False)\n# if zooming_factor > 1:\n# left = round((rescaled_image.shape[0] - x_dim) / 2)\n# right = left + x_dim\n# upper = round((rescaled_image.shape[1] - y_dim) / 2)\n# lower = upper + y_dim\n# cropped_image = rescaled_image[upper:lower, left:right]\n# else:\n# left = round((x_dim - rescaled_image.shape[0]) / 2)\n# right = left + rescaled_image.shape[0]\n# upper = round((y_dim - rescaled_image.shape[1]) / 2)\n# lower = upper + rescaled_image.shape[1]\n# cropped_image = np.zeros(image.shape)\n# if len(image.shape) == 2:\n# cropped_image[upper:lower, left:right] = rescaled_image\n# else:\n# cropped_image[upper:lower, left:right, :] = rescaled_image\n# return cropped_image\n#\n#\n# def zoom_resize(image, x_dim, y_dim, zooming_factor):\n# if zooming_factor > 1:\n# resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n# anti_aliasing=True, preserve_range=True)\n# # print(round(zooming_factor * x_dim))\n# # print(round(zooming_factor * y_dim))\n# left = round((round(zooming_factor * x_dim) - x_dim) / 2)\n# upper = round((round(zooming_factor * y_dim) - y_dim) / 2)\n# right = left + x_dim\n# lower = upper + y_dim\n# cropped_image = resized_image[upper:lower, left:right]\n# else:\n# resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n# anti_aliasing=True, preserve_range=True)\n# # print(round(zooming_factor * x_dim))\n# # print(round(zooming_factor * y_dim))\n# left = round((x_dim - round(zooming_factor * x_dim)) / 2)\n# upper = round((y_dim - round(zooming_factor * y_dim)) / 2)\n# right = left + round(zooming_factor * x_dim)\n# lower = upper + round(zooming_factor * y_dim)\n# # print(upper, lower, left, right)\n# cropped_image = np.zeros(image.shape)\n# if len(image.shape) == 2:\n# cropped_image[upper:lower, left:right] = resized_image\n# else:\n# cropped_image[upper:lower, left:right, :] = resized_image\n# return cropped_image\n#\n#\n# def signal_reduction(image, channel, signal_reduction_factor):\n# if channel == 0:\n# image[:, :, 0] = signal_reduction_factor * image[:, :, 0]\n# if channel == 1:\n# image[:, :, 1] = signal_reduction_factor * image[:, :, 1]\n# if channel == 2:\n# image[:, :, 2] = signal_reduction_factor * image[:, :, 2]\n# return image\n#\n#\n# def data_augmentation(image, imagename, mask, maskname, verbose, width_shift_range, height_shift_range,\n# flip_horizontal, flip_vertical, rotation_range,\n# zooming_range, c0_signal_min, c0_signal_max,\n# c1_signal_min, c1_signal_max, c2_signal_min,\n# c2_signal_max, c2_reduce_podo_signal, augm_data_path, save_augm_data, plot_augm_data):\n# # Get Arguments for the data augmentation functions\n# x_dim, y_dim = image.shape[0], image.shape[1]\n#\n# image_orig = image\n# mask_orig = mask\n#\n# # Do shifting if\n# if width_shift_range or height_shift_range:\n# shift_range_x = int(width_shift_range * x_dim)\n# shift_range_y = int(height_shift_range * y_dim)\n# x_shift = random.randint(-shift_range_x, shift_range_x + 1)\n# y_shift = random.randint(-shift_range_y, shift_range_y + 1)\n# if verbose:\n# print(\"Data augmentation: Shifting by x: \", x_shift, \" and y: \", y_shift)\n# image = shifting(image, x_shift, y_shift)\n# mask = shifting(mask, x_shift, y_shift)\n#\n# # Do flipping if\n# if flip_horizontal:\n# if random.randint(0, 1):\n# if verbose:\n# print(\"Data augmentation: horizontal (=left right) flip\")\n# image = flipping(image, flip_horizontal, 0)\n# mask = flipping(mask, flip_horizontal, 0)\n# if flip_vertical:\n# if random.randint(0, 1):\n# if verbose:\n# print(\"Data augmentation: verical (=top bottom) flip\")\n# image = flipping(image, 0, flip_vertical)\n# mask = flipping(mask, 0, flip_vertical)\n#\n# # Do rotation if\n# if rotation_range:\n# rotation_degree = (random.random() * 2 * rotation_range) - rotation_range\n# if verbose:\n# print(\"Data augmentation: Rotation by: \", rotation_degree)\n# image = rotation(image, rotation_degree)\n# mask = rotation(mask, rotation_degree)\n#\n# # Do zoom if\n# if zooming_range:\n# zooming_factor = (random.random() * 2 * zooming_range) - zooming_range + 1\n# if verbose:\n# print(\"Data augmentation: Zoom by: \", zooming_factor)\n# image = zoom(image, x_dim, y_dim, zooming_factor)\n# mask = zoom(mask, x_dim, y_dim, zooming_factor)\n#\n# # Do signal reduction if\n# if c0_signal_min < 1:\n# if c0_signal_min != c0_signal_max:\n# signal_reduction_factor = (random.random() + (c0_signal_min / (c0_signal_max - c0_signal_min))) * (\n# c0_signal_max - c0_signal_min)\n# else:\n# signal_reduction_factor = c0_signal_min\n# if verbose:\n# print(\"Data augmentation: Signal reduction by: \", signal_reduction_factor)\n# image = signal_reduction(image, 0, signal_reduction_factor)\n#\n# if c1_signal_min < 1:\n# if c1_signal_min != c1_signal_max:\n# signal_reduction_factor = (random.random() + (c1_signal_min / (c1_signal_max - c1_signal_min))) * (\n# c1_signal_max - c1_signal_min)\n# else:\n# signal_reduction_factor = c1_signal_min\n# if verbose:\n# print(\"Data augmentation: Signal reduction by: \", signal_reduction_factor)\n# image = signal_reduction(image, 1, signal_reduction_factor)\n#\n# if c2_signal_min < 1:\n# if c2_signal_min != c2_signal_max:\n# signal_reduction_factor = (random.random() + (c2_signal_min / (c2_signal_max - c2_signal_min))) * (\n# c2_signal_max - c2_signal_min)\n# else:\n# signal_reduction_factor = c2_signal_min\n# if verbose:\n# print(\"Data augmentation: Signal reduction by: \", signal_reduction_factor)\n# image = signal_reduction(image, 2, signal_reduction_factor)\n#\n# if c2_reduce_podo_signal:\n# image_podo = image[:, :, 2]\n# if len(mask.shape) == 2:\n# image_podo[(image_podo * (mask)) > 0.5] = image_podo[(image_podo * (mask)) > 0.5] * 0.75\n# else:\n# image_podo[(image_podo * (mask[:, :, 1])) > 0.5] = image_podo[(image_podo * (mask[:, :, 1])) > 0.5] * 0.75\n# image[:, :, 2] = image_podo\n#\n# # Save augmented images if\n# if save_augm_data:\n# timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n# image_out = (255 * image).astype(np.uint8)\n# imsave(os.path.join(os.path.join(augm_data_path, \"images\"), \"augm_\" + timestr + \"_\" + imagename), image_out)\n# for i in range(mask.shape[2]):\n# mask_out = (255 * mask[:, :, i]).astype(np.uint8)\n# imsave(os.path.join(os.path.join(augm_data_path, \"masks\"), \"augm_\" + timestr + \"_\" + maskname[i]), mask_out)\n#\n# # Plot augmented images if\n# if plot_augm_data:\n# # Image before augmentation\n# imshow(image_orig)\n# show()\n# # Image after augmentation\n# imshow(image)\n# show()\n#\n# # Future extentions:\n# # Do elastic deformation if this is necessary\n#\n# return image, mask\n\n\n# Image generator function\ndef image_generator(image_path, image_format, mask_foldername, mask_suffix, batch_size, data_percentage, shuffle,\n **data_gen_args):\n all_image_list = []\n for path in image_path:\n image_list = [_ for _ in os.listdir(path) if _.endswith(image_format)]\n full_image_list = [path + x for x in image_list]\n all_image_list.extend(full_image_list)\n all_image_list.sort()\n print(all_image_list)\n # mask_list = os.listdir(mask_path)\n # mask_list.sort()\n\n print(len(all_image_list))\n\n if data_percentage < 1:\n # The following loop is only important for the reduction to also work with pre-augmented data\n suffix_list = []\n for img_name in all_image_list:\n if img_name.find('ANCA') != -1:\n suffix = img_name[img_name.find('ANCA'):]\n suffix_list.append(suffix)\n elif img_name.find('HNE') != -1:\n suffix = img_name[img_name.find('HNE'):]\n suffix_list.append(suffix)\n elif img_name.find('control') != -1:\n suffix = img_name[img_name.find('control'):]\n suffix_list.append(suffix)\n suffix_set = set(suffix_list)\n rand_ind_subset = random.sample(range(len(suffix_set)), int(data_percentage * len(suffix_set)))\n # suffix_sublist = list(suffix_set)[rand_ind_subset]\n suffix_sublist = [list(suffix_set)[index] for index in rand_ind_subset]\n\n # new_image_list = []\n # for img_name in all_image_list:\n # for suffix in suffix_sublist:\n # img_name = [s for s in all_image_list if suffix in s]\n # # if img_name.find(suffix):\n # new_image_list.append(img_name)\n new_image_list = [s for s in all_image_list if any(suffix in s for suffix in suffix_sublist)]\n\n all_image_list = new_image_list\n\n print(len(all_image_list))\n\n # Supervised mode:\n if mask_suffix is not \"unsupervised\":\n i = 0\n while True:\n batch_images = []\n batch_masks = []\n\n if shuffle:\n batch_paths = np.random.choice(a=all_image_list, size=batch_size)\n else:\n batch_paths = [] # np.ndarray(shape=(1, batch_size))\n for j in range(batch_size):\n batch_paths.append(all_image_list[i])\n i = i + 1\n if i == len(all_image_list):\n i = 0\n break\n\n # print(\"Elements in batch path:\",batch_paths)\n for image_pathname in batch_paths:\n image = load_image(image_pathname)\n # Open masks and put them in one array\n maskfolder = os.path.join(os.path.split(os.path.split(image_pathname)[0])[0], mask_foldername + \"/\")\n # print(maskfolder)\n # print(image_pathname)\n maskname = []\n maskname.append(os.path.split(image_pathname)[1][0:-4] + mask_suffix[0])\n first_mask = load_mask(os.path.join(maskfolder, maskname[0]))\n mask = first_mask\n mask = np.expand_dims(mask, axis=2)\n for k in range(1, len(mask_suffix)):\n maskname.append(os.path.split(image_pathname)[1][0:-4] + mask_suffix[k])\n further_mask = load_mask(os.path.join(maskfolder, maskname[k]))\n further_mask = np.expand_dims(further_mask, axis=2)\n mask = np.concatenate((mask, further_mask), axis=2)\n\n print(\"\")\n print(image_pathname)\n imagename = os.path.split(image_pathname)[1]\n print(maskname)\n # Do preprocessing with normalisation and augmentation\n image, mask = preprocess_image(image, imagename, mask, maskname, **data_gen_args)\n\n batch_images += [image]\n batch_masks += [mask]\n\n batch_x = np.array(batch_images)\n batch_y = np.array(batch_masks)\n\n print(batch_x.shape)\n print(batch_y.shape)\n # print(\"end of batch\")\n yield (batch_x, batch_y)\n\n # Unsupervised mode:\n elif mask_suffix is \"unsupervised\":\n # Set mask and maskname so that its boolean value will be false (see more in the function data_preprocessing)\n mask = None\n maskname = None\n i = 0\n while True:\n batch_images = []\n\n if shuffle:\n batch_paths = np.random.choice(a=all_image_list, size=batch_size)\n else:\n batch_paths = [] # np.ndarray(shape=(1, batch_size))\n for j in range(batch_size):\n batch_paths.append(all_image_list[i])\n i = i + 1\n if i == len(all_image_list):\n i = 0\n break\n\n # print(\"Elements in batch path:\",batch_paths)\n for image_pathname in batch_paths:\n image = load_image(image_pathname)\n\n print(\"\")\n print(image_pathname)\n imagename = os.path.split(image_pathname)[1]\n # Do preprocessing with normalisation and augmentation\n image = preprocess_image(image, imagename, mask=None, maskname=None, **data_gen_args)\n\n batch_images += [image]\n\n batch_x = np.array(batch_images)\n\n print(batch_x.shape)\n # print(\"end of batch\")\n yield batch_x\n\n\nif __name__ == \"__main__\":\n # Run script\n from config.config import Config\n\n\n class TrainingConfig(Config):\n # GPUs and IMAGES_PER_GPU needs to be configured here!\n GPUs = ''\n IMAGES_PER_GPU = 4\n\n ZOOMING_RANGE = 0.5 # high value to check if it works\n\n WIDTH_SHIFT_RANGE = 0.5\n HEIGHT_SHIFT_RANGE = 0.5\n FLIP_HORIZONTAL = True\n FLIP_VERTICAL = True\n ROTATION_RANGE = 45\n\n SEGMENTATION_TASK = 'all' # 'glomerulus' #'all'#'podocytes'\n\n # NAMES_CLASSES = ['glomerulus', 'podocytes']\n MASK_SUFFIXES = ['_mask_kuh.png', '_mask_rad.png']\n\n SUPERVISED_MODE = False\n CONTRAST_STRETCHING = True\n HISTOGRAM_EQUALIZATION = True\n\n\n cfg = TrainingConfig()\n\n # Select the masks\n if cfg.SEGMENTATION_TASK == 'glomerulus':\n mask_suffix = []\n mask_suffix.append(cfg.MASK_SUFFIXES[0])\n elif cfg.SEGMENTATION_TASK == 'podocytes':\n mask_suffix = []\n mask_suffix.append(cfg.MASK_SUFFIXES[1])\n elif cfg.SEGMENTATION_TASK == 'all':\n mask_suffix = cfg.MASK_SUFFIXES\n\n data_path = ['/data/Dataset1/tif/']\n img_train_full = []\n for path in data_path:\n img_train_full.append(os.path.join(path, 'test/images/'))\n\n data_gen_args = dict(shuffle=False, # True,\n target_img_shape=cfg.TARGET_IMG_SHAPE,\n number_msk_channels=cfg.NUMBER_MSK_CHANNELS * cfg.NUM_OUTPUT_CH,\n rescale=cfg.RESCALE,\n contrast_stretch=cfg.CONTRAST_STRETCHING,\n histogram_equalization=cfg.HISTOGRAM_EQUALIZATION,\n do_data_augm=False, # True,\n verbose=False,\n width_shift_range=0.1,\n height_shift_range=0.1,\n flip_horizontal=True,\n flip_vertical=True,\n rotation_range=45,\n zooming_range=0.1,\n # signal_min and signal_max must be in [0,1], signal_max must be greater than signal_min\n c0_signal_min=1,\n c0_signal_max=1,\n c1_signal_min=1,\n c1_signal_max=1,\n c2_signal_min=1,\n c2_signal_max=1,\n c2_reduce_podo_signal=False,\n augm_data_path=0, # Enter a path here if augmented data shall be saved\n save_augm_data=False,\n plot_augm_data=False)\n\n if cfg.SUPERVISED_MODE:\n train_gen = image_generator(img_train_full, cfg.IMAGE_FORMAT, cfg.MASK_FOLDERNAME, mask_suffix, cfg.BATCH_SIZE,\n 0.1, **data_gen_args)\n else:\n train_gen = image_generator(img_train_full, cfg.IMAGE_FORMAT, cfg.MASK_FOLDERNAME, \"unsupervised\",\n cfg.BATCH_SIZE, 0.1, **data_gen_args)\n next(train_gen)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.expand_dims",
"numpy.random.choice"
]
]
|
ArtistVan1/Robot_control | [
"ab942c4774dba9bd874811e954b2ff0251233e4a"
]
| [
"test2.py"
]
| [
"import matplotlib.pyplot as plt\r\nimport csv\r\nimport numpy as np\r\n\r\n\r\ndata = []\r\nwith open('C:\\\\Users\\\\ytjun\\\\Desktop\\\\kubo\\\\kubo\\\\data\\\\data0.csv','r') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader:\r\n data.append(row)\r\ndatas = np.array(data,dtype=\"float\")\r\nprint(datas.shape)\r\ndatas1 = np.zeros([160,160],dtype=\"float\")\r\nprint(datas1)\r\n[rows,cols] = datas.shape\r\n\r\nfor i in range(rows):\r\n for j in range(cols):\r\n if(datas[i,j]>0):\r\n datas[i,j] =255\r\n datas1[i,j] = datas[i,j]\r\n# x = np.empty([rows,16], dtype = \"float\")\r\n# #print(datas)\r\n# for i in range(49):\r\n# if(i+ % 3 ==0 ):\r\n# for j in range(15):\r\n# x[:,j] = datas[:,i-1]\r\n# print(x.shape)\r\n# np.savetxt('new1.csv', x, delimiter = ',')\r\nprint(datas.shape)\r\nprint(datas1.shape)\r\nplt.imshow(datas1)\r\nplt.show()\r\n\r\nimport scipy.misc\r\nscipy.misc.imsave('C:\\\\Users\\\\ytjun\\\\Desktop\\\\kubo\\\\kubo\\\\test\\\\t2\\\\1.jpg', datas1)\r\nimg = plt.imread(\"C:\\\\Users\\\\ytjun\\\\Desktop\\\\kubo\\\\kubo\\\\test\\\\t2\\\\1.jpg\")\r\nplt.imshow(img)\r\nplt.show()\r\n\r\n\r\n"
]
| [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.imread",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
]
|
Gopal-Dahale/deep-sym-math | [
"7618b1e667d0f392a92872490f7aadf520dfdf4a"
]
| [
"deep_sym_math/lit_models/base.py"
]
| [
"import pytorch_lightning as pl\nimport torch\nimport torchmetrics\nimport torch.nn.functional as F\n\nOPTIMIZER = \"Adam\"\nLR = 1e-4\nONE_CYCLE_TOTAL_STEPS = 100\n\n\nclass Accuracy(torchmetrics.Metric):\n \"\"\"Accuracy Metric with a hack.\"\"\"\n\n def __init__(self, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.add_state(\"n_valid\", default=torch.zeros(1000, dtype=torch.long)) # pylint: disable=not-callable\n self.add_state(\"n_total\", default=torch.zeros(1000, dtype=torch.long)) # pylint: disable=not-callable\n\n def update(self, target, pred_mask, scores, len_target, nb_ops):\n # Update metric states\n # Correct outputs per sequence / valid top-1 predictions\n t = torch.zeros_like(pred_mask).type_as(pred_mask)\n t[pred_mask] += scores.max(1)[1] == target\n valid = (t.sum(0) == len_target - 1).long()\n\n # Stats\n self.n_valid.index_add_(-1, nb_ops, valid)\n self.n_total.index_add_(-1, nb_ops, torch.ones_like(nb_ops))\n\n def compute(self):\n # Compute final result\n _n_valid = self.n_valid.sum().item()\n _n_total = self.n_total.sum().item()\n return _n_valid / _n_total\n\n\nclass BaseLitModel(pl.LightningModule):\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n self.args = {}\n optimizer = self.args.get(\"optimizer\", OPTIMIZER)\n self.optimizer_class = getattr(torch.optim, optimizer)\n self.lr = self.args.get(\"lr\", LR)\n self.one_cycle_max_lr = self.args.get(\"one_cycle_max_lr\", None)\n self.one_cycle_total_steps = self.args.get(\"one_cycle_total_steps\",\n ONE_CYCLE_TOTAL_STEPS)\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n def configure_optimizers(self):\n optimizer = self.optimizer_class(self.parameters(), lr=self.lr)\n if self.one_cycle_max_lr is None:\n return optimizer\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer=optimizer,\n max_lr=self.one_cycle_max_lr,\n total_steps=self.one_cycle_total_steps)\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"val_loss\"\n }\n\n def forward(self, x):\n return x\n\n def training_step(self, batch, batch_idx): # pylint: disable=unused-argument\n (x, len_x), (y, len_y), _ = batch\n\n # Target words to predict\n alen = torch.arange(len_y.max(), dtype=torch.long).type_as(len_y)\n\n # Do not predict anything given the last target word\n pred_mask = alen[:, None] < len_y[None] - 1\n\n y_masked = y[1:].masked_select(pred_mask[:-1])\n assert len(y_masked) == (len_y - 1).sum().item()\n\n # Forward / Loss\n logits = self.model(x, len_x, y, len_y)\n _, train_loss = self.loss_fn(logits, y_masked, pred_mask)\n self.log(\"train_loss\", train_loss, on_step=False, on_epoch=True)\n return train_loss\n\n def validation_step(self, batch, batch_idx): # pylint: disable=unused-argument\n\n (x, len_x), (y, len_y), nb_ops = batch\n\n # Target words to predict\n alen = torch.arange(len_y.max(), dtype=torch.long).type_as(len_y)\n\n # Do not predict anything given the last target word\n pred_mask = alen[:, None] < len_y[None] - 1\n y_masked = y[1:].masked_select(pred_mask[:-1])\n assert len(y_masked) == (len_y - 1).sum().item()\n\n # Forward / Loss\n logits = self.model(x, len_x, y, len_y)\n scores, val_loss = self.loss_fn(logits, y_masked, pred_mask)\n self.log(\"val_loss\",\n val_loss,\n on_step=False,\n on_epoch=True,\n prog_bar=True)\n\n acc = self.val_acc(y_masked, pred_mask, scores, len_y, nb_ops)\n self.log(\"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n def test_step(self, batch, batch_idx): # pylint: disable=unused-argument\n (x, len_x), (y, len_y), nb_ops = batch\n\n # Target words to predict\n alen = torch.arange(len_y.max(), dtype=torch.long).type_as(x)\n\n # Do not predict anything given the last target word\n pred_mask = alen[:, None].type_as(alen) < len_y[None] - 1\n\n y_masked = y[1:].masked_select(pred_mask[:-1])\n assert len(y_masked) == (len_y - 1).sum().item()\n\n # Forward / Loss\n logits = self.model(x, len_x, y, len_y)\n scores, test_loss = self.loss_fn(logits, y_masked, pred_mask)\n self.log(\"test_loss\",\n test_loss,\n on_step=False,\n on_epoch=True,\n prog_bar=True)\n acc = self.test_acc(y_masked, pred_mask, scores, len_y, nb_ops)\n self.log(\"test_acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n def loss_fn(self, logits, y, pred_mask):\n x = logits[pred_mask.unsqueeze(-1).expand_as(logits)].view(\n -1, self.model.dim)\n assert (y == self.model.pad_index).sum().item() == 0\n scores = self.model.dec_proj(x).view(-1, self.model.n_words)\n loss = F.cross_entropy(scores, y, reduction='mean')\n return scores, loss"
]
| [
[
"torch.optim.lr_scheduler.OneCycleLR",
"torch.zeros",
"torch.nn.functional.cross_entropy",
"torch.zeros_like",
"torch.ones_like"
]
]
|
tulcod/PyBRML | [
"55357ef60e394d7146af037dbfcab97f7b29d44b"
]
| [
"brml/ismember.py"
]
| [
"#!/usr/bin/env python\n\n\"\"\"\nsame as ismember() in MATLAB\ntf: TRUE or FALSE\nindex: the index for each A in B\nA[tf] == B[index]\n\"\"\"\nimport numpy as np\n\ndef ismember(a, b):\n #print(\"judge ismember......\")\n#FIXME: data format needed to be unified\n aa = a\n a = np.array(a)\n b = np.array(b)\n #print(\"a:\", a)\n #print(\"b:\", b)\n #print(\"unifying the format......\")\n #print(\"a.ndim =\", a.ndim)\n #print(\"b.ndim =\", b.ndim)\n if a.ndim != b.ndim:\n a = np.array([aa])\n #print(\"a:\", a)\n #print(\"b:\", b)\n tf = np.in1d(a,b) # for newer versions of numpy(v1.4+)\n # tf = np.array([i in b for i in a]) # for older versions of numpy\n u = np.unique(a[tf])\n index = np.array([(np.where(b == i))[0][-1] if t else 0 for i,t in zip(a,tf)])\n return tf, index\n"
]
| [
[
"numpy.in1d",
"numpy.array",
"numpy.where",
"numpy.unique"
]
]
|
franneck94/UdemyTF | [
"23d287b9f30ecb409a41f85f8154174c2a34ad03"
]
| [
"Chapter5_DNN/Chapter5_3_MNISTClassification/mnistKerasOverUnderfitting.py"
]
| [
"from typing import Tuple\n\nimport numpy as np\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.initializers import TruncatedNormal\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.utils import to_categorical\n\nfrom tf_utils.plotting import display_convergence_error\nfrom tf_utils.plotting import display_convergence_acc\n\n\ndef get_dataset(\n num_features: int, num_classes: int\n) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n x_train = x_train.reshape(-1, num_features).astype(np.float32)[:5000]\n x_test = x_test.reshape(-1, num_features).astype(np.float32)\n\n y_train = to_categorical(\n y_train, num_classes=num_classes, dtype=np.float32\n )[:5000]\n y_test = to_categorical(y_test, num_classes=num_classes, dtype=np.float32)\n\n print(f\"x_train shape: {x_train.shape}\")\n print(f\"y_train shape: {y_train.shape}\")\n print(f\"x_test shape: {x_test.shape}\")\n print(f\"y_test shape: {y_test.shape}\")\n\n return (x_train, y_train), (x_test, y_test)\n\n\ndef build_model(num_features: int, num_classes: int) -> Sequential:\n init_w = TruncatedNormal(mean=0.0, stddev=0.01)\n init_b = Constant(value=0.0)\n\n model = Sequential()\n model.add(\n Dense(\n units=500,\n kernel_initializer=init_w,\n bias_initializer=init_b,\n input_shape=(num_features,),\n )\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b)\n )\n model.add(Activation(\"relu\"))\n model.add(\n Dense(\n units=num_classes,\n kernel_initializer=init_w,\n bias_initializer=init_b,\n )\n )\n model.add(Activation(\"softmax\"))\n model.summary()\n\n return model\n\n\ndef main() -> None:\n num_features = 784\n num_classes = 10\n\n (x_train, y_train), (x_test, y_test) = get_dataset(\n num_features, num_classes\n )\n\n model = build_model(num_features, num_classes)\n\n opt = RMSprop(learning_rate=0.001)\n\n model.compile(\n loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"]\n )\n\n history = model.fit(\n x=x_train,\n y=y_train,\n epochs=200,\n batch_size=256,\n verbose=1,\n validation_data=(x_test, y_test),\n )\n\n scores = model.evaluate(x=x_test, y=y_test, verbose=0)\n print(f\"Scores on test set: {scores}\")\n\n train_losses = history.history['loss']\n val_losses = history.history['val_loss']\n train_accuracy = history.history['accuracy']\n val_accuracy = history.history['val_accuracy']\n\n display_convergence_error(train_losses, val_losses)\n display_convergence_acc(train_accuracy, val_accuracy)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical"
]
]
|
go-jugo/ml_event_prediction_trainer | [
"0d644b737afdef078ad5b6fc2b7e2549b964b56f"
]
| [
"data_cleansing/one_hot_encode_categories.py"
]
| [
"import pandas as pd\r\nimport dask.dataframe as dd\r\nfrom ..monitoring.time_it import timing\r\nfrom ..tools.dask_repartition import dask_repartition\r\nfrom ..logger import get_logger\r\n\r\nlogger = get_logger(__name__.split(\".\", 1)[-1])\r\n\r\n\r\n@timing\r\ndef one_hot_encode_categories(df, errorcode_col, v_dask=True):\r\n\r\n non_numeric_columns_list = list(df.select_dtypes(exclude=['number', 'datetime']).columns)\r\n\r\n df_non_numeric = df[non_numeric_columns_list].astype(str)\r\n if v_dask:\r\n df_non_numeric = df_non_numeric.categorize()\r\n if len(df_non_numeric.columns) != 0:\r\n df_dummy = dd.get_dummies(df_non_numeric, prefix_sep='.', prefix=non_numeric_columns_list)\r\n df = df.drop(columns=non_numeric_columns_list)\r\n if v_dask:\r\n df = dd.concat([df, df_dummy], axis=1)\r\n else:\r\n df = pd.concat([df, df_dummy], axis=1)\r\n logger.debug('Number of Columns for one hot encoding : ' + str(len(non_numeric_columns_list)))\r\n\r\n df = dask_repartition(df)\r\n\r\n return df"
]
| [
[
"pandas.concat"
]
]
|
gcgs1/phonopy | [
"6a194a2d9514646b61a5f87168107d4c6b0d570d"
]
| [
"test/phonopy/structure/test_cell.py"
]
| [
"import unittest\n\nimport os\nimport numpy as np\nfrom phonopy.structure.atoms import PhonopyAtoms as Atoms\nfrom phonopy.structure.cells import get_supercell\nfrom phonopy.interface.phonopy_yaml import get_unitcell_from_phonopy_yaml\n\ndata_dir = os.path.dirname(os.path.abspath(__file__))\n\nclass TestSupercell(unittest.TestCase):\n\n def setUp(self):\n self._cells = []\n symbols = ['Si'] * 2 + ['O'] * 4\n lattice = [[4.65, 0, 0],\n [0, 4.75, 0],\n [0, 0, 3.25]]\n points = [[0.0, 0.0, 0.0],\n [0.5, 0.5, 0.5],\n [0.3, 0.3, 0.0],\n [0.7, 0.7, 0.0],\n [0.2, 0.8, 0.5],\n [0.8, 0.2, 0.5]]\n\n self._cells.append(Atoms(cell=lattice,\n scaled_positions=points,\n symbols=symbols))\n\n symbols = ['Si'] * 2\n lattice = [[0, 2.73, 2.73],\n [2.73, 0, 2.73],\n [2.73, 2.73, 0]]\n points = [[0.75, 0.75, 0.75],\n [0.5, 0.5, 0.5]]\n\n self._cells.append(Atoms(cell=lattice,\n scaled_positions=points,\n symbols=symbols))\n\n self._smats = []\n self._smats.append(np.diag([1, 2, 3]))\n self._smats.append([[-1, 1, 1],\n [ 1,-1, 1],\n [ 1, 1,-1]])\n\n self._fnames = (\"SiO2-123.yaml\", \"Si-conv.yaml\")\n\n def tearDown(self):\n pass\n\n def test_get_supercell(self):\n for i, (cell, smat, fname) in enumerate(zip(self._cells,\n self._smats,\n self._fnames)):\n scell = get_supercell(cell, smat)\n scell_yaml = get_unitcell_from_phonopy_yaml(os.path.join(data_dir,\n fname))\n np.testing.assert_allclose(scell.get_cell(), scell_yaml.get_cell(),\n atol=1e-5)\n pos = scell.get_scaled_positions()\n pos -= np.rint(pos)\n pos_yaml = scell_yaml.get_scaled_positions()\n pos_yaml -= np.rint(pos_yaml)\n np.testing.assert_allclose(pos, pos_yaml, atol=1e-5)\n np.testing.assert_array_equal(scell.get_atomic_numbers(),\n scell_yaml.get_atomic_numbers())\n np.testing.assert_allclose(scell.get_masses(),\n scell_yaml.get_masses(),\n atol=1e-5)\n\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestSupercell)\n unittest.TextTestRunner(verbosity=2).run(suite)\n"
]
| [
[
"numpy.diag",
"numpy.rint",
"numpy.testing.assert_allclose"
]
]
|
AnjaliRuban/babyai | [
"882feaa6e388ef2274ccebf0ec15c58966fc37ba"
]
| [
"babyai/rl/algos/cpv_reward.py"
]
| [
"import torch\nimport revtok\nimport numpy as np\nfrom torch import nn\nfrom vocab import Vocab\nimport matplotlib.pyplot as plt\nimport pdb\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n\nclass CPV(nn.Module):\n def __init__(self, primed_model='models/cpv_model.pth'):\n super().__init__()\n\n self.pad = 0\n self.seg = 1\n\n self.device = torch.device('cuda')\n primed_model = torch.load(primed_model, map_location=self.device)\n self.args = primed_model['args']\n self.vocab = primed_model['vocab']\n\n self.img_shape = 7 * 7 * 3\n\n self.embed = nn.Embedding(len(self.vocab), self.args.demb)\n self.linear = nn.Linear(self.args.demb, self.img_shape)\n self.enc = nn.LSTM(self.img_shape, self.args.dhid, bidirectional=True, batch_first=True)\n self.to(self.device)\n\n self.load_state_dict(primed_model['model'], strict=False)\n\n def encoder(self, batch, batch_size, h_0=None, c_0=None):\n '''\n Encodes a stacked tensor.\n '''\n\n if h_0 is None or c_0 is None:\n h_0 = torch.zeros(2, batch_size, self.args.dhid).type(torch.float).to(self.device) # -> 2 x B x H\n c_0 = torch.zeros(2, batch_size, self.args.dhid).type(torch.float).to(self.device) # -> 2 x B x H\n out, (h, c) = self.enc(batch, (h_0, c_0)) # -> 2 x B x H\n\n hid_sum = torch.sum(h, dim=0) # -> B x H\n\n return hid_sum, h, c\n\n def forward(self, high, context, target, high_lens, context_lens, target_lens):\n '''\n\n '''\n\n B = context.shape[0]\n\n ### High ###\n high = self.embed(high) # -> B x M x D\n high = self.linear(high) # -> B x M x 147\n high = pack_padded_sequence(high, high_lens, batch_first=True, enforce_sorted=False)\n high, _, _ = self.encoder(high, B) # -> B x H\n\n ### Context ###\n context = pack_padded_sequence(context, context_lens, batch_first=True, enforce_sorted=False)\n context, h, c = self.encoder(context)\n\n ### Target ###\n packed_target = pack_padded_sequence(target, target_lens, batch_first=True, enforce_sorted=False)\n target, _, _ = self.encoder(packed_target, B)\n\n ### Full Trajectory ###\n trajectory, _, _ = self.encoder(packed_target, B, h, c)\n\n ### Combinations ###\n output = {}\n output[\"H * C\"] = torch.matmul(high, torch.transpose(context, 0, 1)) # -> B x B\n output[\"<H, C>\"] = torch.bmm(high.reshape(B, 1, -1), context.reshape(B, -1, 1)).squeeze() # -> B\n output[\"<H, T>\"] = torch.bmm(high.reshape(B, 1, -1), target.reshape(B, -1, 1)).squeeze() # -> B\n output[\"<H, N>\"] = torch.bmm(high.reshape(B, 1, -1), trajectory.reshape(B, -1, 1)).squeeze() # -> B\n output[\"<H, C + T>\"] = torch.bmm(high.reshape(B, 1, -1), (context + target).reshape(B, -1, 1)).squeeze() # -> B\n output[\"norm(H)\"] = torch.norm(high, dim=1) # -> B\n output[\"norm(C)\"] = torch.norm(context, dim=1) # -> B\n output[\"norm(T)\"] = torch.norm(target, dim=1) # -> B\n output[\"norm(N)\"] = torch.norm(trajectory, dim=1) # -> B\n output[\"cos(H, N)\"] = F.cosine_similarity(high, trajectory) # -> B\n\n return output\n\n def compute_similarity(self, high, context, high_lens, context_lens): \n \"\"\"\n Compute similarity between a high level instruction \n and a trajectory segment. \n \"\"\"\n\n B = context.shape[0]\n\n ### High ###\n high = self.embed(high) # -> B x M x D\n high = self.linear(high) # -> B x M x 147\n high = pack_padded_sequence(high, high_lens, batch_first=True, enforce_sorted=False)\n high, _, _ = self.encoder(high, B) # -> B x H\n\n ### Context ###\n context, _ = self.enc(context)\n dir1, dir2 = torch.split(context, context.shape[-1] // 2, dim=-1)\n context = dir2 + dir1\n\n ### Combinations ###\n dot_prod = torch.bmm(context, high.view(B, high.shape[1], 1)).squeeze() # -> B x M\n norms = torch.norm(high, dim=1).view((64, 1)).expand(-1, dot_prod.shape[1])\n\n # Similarity between high and current trajectory normalized by the high's norm. \n sim = dot_prod / norms\n \n return sim\n\n def remove_spaces(self, s):\n cs = ' '.join(s.split())\n return cs\n\n def remove_spaces_and_lower(self, s):\n cs = self.remove_spaces(s)\n cs = cs.lower()\n return cs\n\n def calculate_reward(self, all_obs):\n\n # Unpack values from input. \n high = [o['mission'] for o in all_obs[0]]\n\n obs = []\n for i in range(len(all_obs[0])):\n obs.append([o[i]['image'] for o in all_obs])\n\n # Tokenize highs. \n high = [revtok.tokenize(self.remove_spaces_and_lower(h)) for h in high] # -> M\n high = [self.vocab.word2index([w.strip().lower() if w.strip().lower() in self.vocab.to_dict()['index2word'] else '<<pad>>' for w in h]) for h in high] # -> M\n\n # Put on device. \n high = torch.tensor(high, dtype=torch.long)\n high = high.reshape(len(high), -1).to(self.device) # -> B x M\n high_len = high.bool().byte().sum(dim=1).view(-1,).to(self.device)\n\n traj = torch.tensor(obs, dtype=torch.float).view(len(obs), len(obs[0]), self.img_shape).to(self.device) # B X M X 147\n traj_len = torch.full((traj.shape[0],), traj.shape[1]).long().to(self.device)\n\n # Compute CPV reward with new observation incorporated. \n with torch.no_grad(): \n self.eval()\n sims = self.compute_similarity(high, traj, high_len, traj_len)\n\n # Potential-based reward is delta in similarity between previous and current trajectory. \n reward = sims[:,1:] - sims[:,:-1]\n reward = torch.cat([torch.zeros((reward.shape[0],1), dtype=torch.float).to(self.device), reward], dim=1)\n\n return reward.detach()\n"
]
| [
[
"torch.norm",
"torch.transpose",
"torch.full",
"torch.nn.LSTM",
"torch.load",
"torch.zeros",
"torch.sum",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.split",
"torch.device"
]
]
|
stefanh-it/pyaudiodsptools | [
"3836117b02b43acb14488d7c8b3718a270c527be"
]
| [
"pyAudioDspTools/EffectEQ3BandFFT.py"
]
| [
"import numpy\nfrom .config import chunk_size, sampling_rate\n#import matplotlib.pyplot as pyplot\n\n\n\"\"\"########################################################################################\nCreating a 3 Band FFT Equalizer class/device.\nInit Parameters: \n lowshelf_frequency: Shelving frequency in Hertz [float] or [int] (for example 400.0)\n lowshelf_db: Gain in decibel [float] or [int] (for example 3.0 or -3.0)\n highshelf_frequency: Shelving frequency in Hertz [float] or [int] (for example 400 or 400.0)\n highshelf_db: Gain in decibel [float] or [int] (for example 3.0 or -3.0)\n\napplyfilter\n Applies the filter to a 44100Hz/32 bit float signal of your choice.\n Should operate with values between -1.0 and 1.0\n \nThis class introduces latency equal to the value of chunk_size. \nOptimal operation with chunk_size=512\n###########################################################################################\"\"\"\n\n\nclass CreateEQ3BandFFT:\n \"\"\"Creating a 3Band FFT EQ audio-effect class/device.\n\n Can be used to manipulate frequencies in your audio numpy-array.\n Is the faster one, the slower, non FFT based one being CreateEQ3Band.\n Is NOT overloaded with basic settings.\n This class introduces latency equal to config.chunk_size.\n\n Parameters\n ----------\n lowshelf_frequency : int or float\n Sets the frequency of the lowshelf-band in Hertz.\n lowshelf_db : int or float\n Increase or decrease the lows in decibel.\n midband_frequency : int or float\n Sets the frequency of the mid-band in Hertz. Has a fixed Q.\n midband_db : int or float\n Increase or decrease the selected mids in decibel.\n highshelf_frequency : int or float\n Sets the frequency of the highshelf-band in Hertz.\n highshelf_db : int or float\n Increase or decrease the highs in decibel.\n\n \"\"\"\n def __init__(self,lowshelf_frequency,lowshelf_db,midband_frequency,midband_db,highshelf_frequency,highshelf_db):\n #Basic\n #chunk_size = chunk_size\n self.fS = sampling_rate # Sampling rate.\n\n #Highshelf Properties\n self.fH_highshelf = highshelf_frequency\n self.highshelf_db = highshelf_db\n\n #Lowshelf Properties\n self.fH_lowshelf = lowshelf_frequency\n self.lowshelf_db = lowshelf_db\n\n #Lowshelf Properties\n self.fH_midband = midband_frequency\n self.midband_db = midband_db\n\n #Setting Kaiser-Windows properties\n self.filter_length = (chunk_size//2)-1 # Filter length, must be odd.\n self.array_slice_value_start = chunk_size + (self.filter_length // 2)\n self.array_slice_value_end = chunk_size - (self.filter_length // 2)\n\n\n ################ Create Lowcut (Finally becomes Highshelf) Sinc Filter and FFT ##################\n # Compute sinc filter.\n self.sinc_filter_highshelf = numpy.sinc(\n 2 * (self.fH_highshelf-self.fH_highshelf/4) / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))\n\n # Apply window.\n self.sinc_filter_highshelf *= numpy.kaiser(self.filter_length,6.0)\n\n # Normalize to get unity gain.\n self.sinc_filter_highshelf /= numpy.sum(self.sinc_filter_highshelf)\n\n #Spectral inversion to make lowcut out of highcut\n self.sinc_filter_highshelf = -self.sinc_filter_highshelf\n self.sinc_filter_highshelf[(self.filter_length - 1) // 2] += 1\n\n\n #Zero Padding the Sinc Filter to the length of the input array for easier processing\n #You don't need to use numpy.convolve when input-array and sinc-filter array are the same lenght, just multiply\n self.sinc_filter_highshelf = numpy.append(self.sinc_filter_highshelf, numpy.zeros(chunk_size - self.filter_length + 1))\n self.sinc_filter_highshelf = numpy.append(self.sinc_filter_highshelf, numpy.zeros(((len(self.sinc_filter_highshelf) * 2) - 3)))\n self.sinc_filter_highshelf = numpy.fft.fft(self.sinc_filter_highshelf)\n\n\n ################ Create Highcut (Finally becomes Lowshelf) Sinc Filter and FFT ##################\n # Compute sinc filter.\n self.sinc_filter_lowshelf = numpy.sinc(\n 2 * (self.fH_lowshelf + self.fH_lowshelf/4) / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))\n\n # Apply window.\n self.sinc_filter_lowshelf *= numpy.kaiser(self.filter_length,6.0)\n\n # Normalize to get unity gain.\n self.sinc_filter_lowshelf /= numpy.sum(self.sinc_filter_lowshelf)\n\n #Zero Padding the Sinc Filter to the length of the input array\n self.sinc_filter_lowshelf = numpy.append(self.sinc_filter_lowshelf, numpy.zeros(chunk_size - self.filter_length + 1))\n self.sinc_filter_lowshelf = numpy.append(self.sinc_filter_lowshelf, numpy.zeros(((len(self.sinc_filter_lowshelf) * 2) - 3)))\n self.sinc_filter_lowshelf = numpy.fft.fft(self.sinc_filter_lowshelf)\n\n\n ################ Create Midband (Lowpass+Highpass) Sinc Filter and FFT ##################\n # Compute sinc filter.\n self.sinc_filter_mid_lowpass = numpy.sinc(\n 2 * (self.fH_midband+self.fH_midband/4) / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))\n\n # Apply window.\n self.sinc_filter_mid_lowpass *= numpy.kaiser(self.filter_length,6.0)\n\n # Normalize to get unity gain.\n self.sinc_filter_mid_lowpass /= numpy.sum(self.sinc_filter_mid_lowpass)\n\n # Compute sinc filter.\n self.sinc_filter_mid_highpass = numpy.sinc(\n 2 * (self.fH_midband-self.fH_midband/4) / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))\n\n # Apply window.\n self.sinc_filter_mid_highpass *= numpy.kaiser(self.filter_length,6.0)\n\n # Normalize to get unity gain.\n self.sinc_filter_mid_highpass /= numpy.sum(self.sinc_filter_mid_highpass)\n\n #Spectral inversion to make highpass\n self.sinc_filter_mid_highpass = -self.sinc_filter_mid_highpass\n self.sinc_filter_mid_highpass[(self.filter_length - 1) // 2] += 1\n\n #Zero Padding the Sinc Filter to the length of the input array\n self.sinc_filter_mid_lowpass = numpy.append(self.sinc_filter_mid_lowpass, numpy.zeros(chunk_size - self.filter_length + 1))\n self.sinc_filter_mid_lowpass = numpy.append(self.sinc_filter_mid_lowpass, numpy.zeros(((len(self.sinc_filter_mid_lowpass) * 2) - 3)))\n self.sinc_filter_mid_lowpass = numpy.fft.fft(self.sinc_filter_mid_lowpass)\n\n #Zero Padding the Sinc Filter to the length of the input array\n self.sinc_filter_mid_highpass = numpy.append(self.sinc_filter_mid_highpass, numpy.zeros(chunk_size - self.filter_length + 1))\n self.sinc_filter_mid_highpass = numpy.append(self.sinc_filter_mid_highpass, numpy.zeros(((len(self.sinc_filter_mid_highpass) * 2) - 3)))\n self.sinc_filter_mid_highpass = numpy.fft.fft(self.sinc_filter_mid_highpass)\n\n\n\n #Initializing arrays\n self.filtered_signal = numpy.zeros(chunk_size * 3)\n self.original_signal = numpy.zeros(chunk_size * 3)\n self.float32_array_input_1 = numpy.zeros(chunk_size)\n self.float32_array_input_2 = numpy.zeros(chunk_size)\n self.float32_array_input_3 = numpy.zeros(chunk_size)\n self.cut_size = numpy.int16((self.filter_length - 1) / 2)\n\n\n def apply(self, float32_array_input):\n \"\"\"Applying the 3 Band FFT EQ to a numpy-array.\n\n Parameters\n ----------\n float32_array_input : float\n The array, which the effect should be applied on.\n\n Returns\n -------\n float\n The processed array, should be the exact same size as the input array\n\n \"\"\"\n #Loading new chunk and replacing old ones\n self.float32_array_input_3 = self.float32_array_input_2\n self.float32_array_input_2 = self.float32_array_input_1\n self.float32_array_input_1 = float32_array_input\n\n self.original_signal = numpy.concatenate(\n (self.float32_array_input_3,self.float32_array_input_2,self.float32_array_input_1),axis=None)\n\n #FFT for transforming samples from time-domain to frequency-domain\n signal_fft = numpy.fft.fft(self.original_signal)\n\n #Highshelf processing\n filtered_signal_highshelf = signal_fft * self.sinc_filter_highshelf #applying sinc filter\n\n #Lowshelf processing FFT\n filtered_signal_lowshelf = signal_fft * self.sinc_filter_lowshelf #applying sinc filter\n\n #Midband processing\n filtered_signal_midband = signal_fft * (self.sinc_filter_mid_highpass * self.sinc_filter_mid_lowpass)#applying lowpass\n #filtered_signal_midband = filtered_signal_midband*(1/self.fS)\n #filtered_signal_midband = filtered_signal_midband * self.sinc_filter_mid_highpass #applying highpass to just get mid\n\n #Highshelf processing Time-Domain\n filtered_signal_highshelf = numpy.fft.ifft(filtered_signal_highshelf)\n filtered_signal_highshelf = filtered_signal_highshelf[self.array_slice_value_start:-self.array_slice_value_end]\n filtered_signal_highshelf = (filtered_signal_highshelf*(10**(self.highshelf_db/20))) - filtered_signal_highshelf\n\n #Lowshelf processing Time-Domain\n filtered_signal_lowshelf = numpy.fft.ifft(filtered_signal_lowshelf)\n filtered_signal_lowshelf = filtered_signal_lowshelf[self.array_slice_value_start:-self.array_slice_value_end]\n filtered_signal_lowshelf = (filtered_signal_lowshelf*(10**(self.lowshelf_db/20))) - filtered_signal_lowshelf\n\n #Midband processing Time-Domain\n filtered_signal_midband = numpy.fft.ifft(filtered_signal_midband)\n filtered_signal_midband = filtered_signal_midband[self.array_slice_value_start:-self.array_slice_value_end]\n filtered_signal_midband = (filtered_signal_midband*(10**(self.midband_db/20))) - filtered_signal_midband\n\n\n #Mixing signals\n float_array_output = filtered_signal_midband + self.float32_array_input_2 + filtered_signal_lowshelf + filtered_signal_highshelf\n\n return float_array_output.real.astype('float32')\n\n\n #xf = numpy.linspace(0.0, 1.0 / (2.0 * (1.0/44100.0)), 768)\n #fig, ax = pyplot.subplots()\n\n #ax.plot(xf, 2.0 / 1536 * numpy.abs(filtered_signal_highshelf[:1536 // 2]))\n #ax.plot(xf, 2.0 / 1536 * numpy.abs(signal_fft[:1536 // 2]))\n #pyplot.show()\n\n\n\n #xf = numpy.linspace(0.0, 1.0 / (2.0 * (1.0/44100.0)), 768)\n #fig, ax = pyplot.subplots()\n\n #ax.plot(xf, 2.0 / 1536 * numpy.abs(add_signal_lowshelf[:1536 // 2]))\n #ax.plot(xf, 2.0 / 1536 * numpy.abs(filtered_signal[:1536 // 2]))\n #pyplot.show()\n # pyplot.plot(filtered_signal_highshelf)\n # pyplot.plot(self.float32_array_input_2)\n # pyplot.show()\n"
]
| [
[
"numpy.kaiser",
"numpy.fft.fft",
"numpy.arange",
"numpy.int16",
"numpy.concatenate",
"numpy.fft.ifft",
"numpy.zeros",
"numpy.sum"
]
]
|
yisheng822/faiss | [
"e5fa6cf58b436450c129bf5724e26b45eae267c1"
]
| [
"tests/test_fast_scan.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport unittest\nimport time\n\nimport numpy as np\nimport faiss\n\nfrom faiss.contrib import datasets\nimport platform\n\n\nclass TestCompileOptions(unittest.TestCase):\n\n def test_compile_options(self):\n options = faiss.get_compile_options()\n options = options.split(' ')\n for option in options:\n assert option in ['AVX2', 'NEON', 'GENERIC', 'OPTIMIZE']\n\n\nclass TestSearch(unittest.TestCase):\n\n def test_PQ4_accuracy(self):\n ds = datasets.SyntheticDataset(32, 2000, 5000, 1000)\n\n index_gt = faiss.IndexFlatL2(32)\n index_gt.add(ds.get_database())\n Dref, Iref = index_gt.search(ds.get_queries(), 10)\n\n index = faiss.index_factory(32, 'PQ16x4fs')\n index.train(ds.get_train())\n index.add(ds.get_database())\n Da, Ia = index.search(ds.get_queries(), 10)\n\n nq = Iref.shape[0]\n recall_at_1 = (Iref[:, 0] == Ia[:, 0]).sum() / nq\n assert recall_at_1 > 0.6\n # print(f'recall@1 = {recall_at_1:.3f}')\n\n\n # This is an experiment to see if we can catch performance\n # regressions. It runs 2 codes, one should be faster than the\n # other by a factor ~10 in opt mode. We check for a factor 5.\n # hopefully the jitter in executtion time will not produce\n # too many spurious test failures. Unoptimized timings are\n # not exploitable, hence the flag test on that as well.\n @unittest.skipUnless(\n 'AVX2' in faiss.get_compile_options() and\n \"OPTIMIZE\" in faiss.get_compile_options(),\n \"only test while building with avx2\")\n def test_PQ4_speed(self):\n ds = datasets.SyntheticDataset(32, 2000, 5000, 1000)\n xt = ds.get_train()\n xb = ds.get_database()\n xq = ds.get_queries()\n\n index = faiss.index_factory(32, 'PQ16x4')\n index.train(xt)\n index.add(xb)\n\n t0 = time.time()\n D1, I1 = index.search(xq, 10)\n t1 = time.time()\n pq_t = t1 - t0\n print('PQ16x4 search time:', pq_t)\n\n index2 = faiss.index_factory(32, 'PQ16x4fs')\n index2.train(xt)\n index2.add(xb)\n\n t0 = time.time()\n D2, I2 = index2.search(xq, 10)\n t1 = time.time()\n pqfs_t = t1 - t0\n print('PQ16x4fs search time:', pqfs_t)\n self.assertLess(pqfs_t * 5, pq_t)\n\n\nclass TestRounding(unittest.TestCase):\n\n def do_test_rounding(self, implem=4, metric=faiss.METRIC_L2):\n ds = datasets.SyntheticDataset(32, 2000, 5000, 200)\n\n index = faiss.index_factory(32, 'PQ16x4', metric)\n index.train(ds.get_train())\n index.add(ds.get_database())\n Dref, Iref = index.search(ds.get_queries(), 10)\n nq = Iref.shape[0]\n\n index2 = faiss.IndexPQFastScan(index)\n\n # simply repro normal search\n index2.implem = 2\n D2, I2 = index2.search(ds.get_queries(), 10)\n np.testing.assert_array_equal(I2, Iref)\n np.testing.assert_array_equal(D2, Dref)\n\n # rounded LUT with correction\n index2.implem = implem\n D4, I4 = index2.search(ds.get_queries(), 10)\n # check accuracy of indexes\n recalls = {}\n for rank in 1, 10:\n recalls[rank] = (Iref[:, :1] == I4[:, :rank]).sum() / nq\n\n min_r1 = 0.98 if metric == faiss.METRIC_INNER_PRODUCT else 0.99\n self.assertGreater(recalls[1], min_r1)\n self.assertGreater(recalls[10], 0.995)\n # check accuracy of distances\n # err3 = ((D3 - D2) ** 2).sum()\n err4 = ((D4 - D2) ** 2).sum()\n nf = (D2 ** 2).sum()\n self.assertLess(err4, nf * 1e-4)\n\n def test_implem_4(self):\n self.do_test_rounding(4)\n\n def test_implem_4_ip(self):\n self.do_test_rounding(4, faiss.METRIC_INNER_PRODUCT)\n\n def test_implem_12(self):\n self.do_test_rounding(12)\n\n def test_implem_12_ip(self):\n self.do_test_rounding(12, faiss.METRIC_INNER_PRODUCT)\n\n def test_implem_14(self):\n self.do_test_rounding(14)\n\n def test_implem_14_ip(self):\n self.do_test_rounding(12, faiss.METRIC_INNER_PRODUCT)\n\n#########################################################\n# Kernel unit test\n#########################################################\n\n\n\ndef reference_accu(codes, LUT):\n nq, nsp, is_16 = LUT.shape\n nb, nsp_2 = codes.shape\n assert is_16 == 16\n assert nsp_2 == nsp // 2\n accu = np.zeros((nq, nb), 'uint16')\n for i in range(nq):\n for j in range(nb):\n a = np.uint16(0)\n for sp in range(0, nsp, 2):\n c = codes[j, sp // 2]\n a += LUT[i, sp , c & 15].astype('uint16')\n a += LUT[i, sp + 1, c >> 4].astype('uint16')\n accu[i, j] = a\n return accu\n\n\n# disabled because the function to write to mem is not implemented currently\nclass ThisIsNotATestLoop5: # (unittest.TestCase):\n\n def do_loop5_kernel(self, nq, bb):\n \"\"\" unit test for the accumulation kernel \"\"\"\n nb = bb * 32 # databse size\n nsp = 24 # number of sub-quantizers\n\n rs = np.random.RandomState(123)\n codes = rs.randint(256, size=(nb, nsp // 2)).astype('uint8')\n LUT = rs.randint(256, size=(nq, nsp, 16)).astype('uint8')\n accu_ref = reference_accu(codes, LUT)\n\n def to_A(x):\n return faiss.array_to_AlignedTable(x.ravel())\n\n sp = faiss.swig_ptr\n\n LUT_a = faiss.AlignedTableUint8(LUT.size)\n faiss.pq4_pack_LUT(\n nq, nsp, sp(LUT),\n LUT_a.get()\n )\n\n codes_a = faiss.AlignedTableUint8(codes.size)\n faiss.pq4_pack_codes(\n sp(codes),\n nb, nsp, nb, nb, nsp,\n codes_a.get()\n )\n\n accu_a = faiss.AlignedTableUint16(nq * nb)\n accu_a.clear()\n faiss.loop5_kernel_accumulate_1_block_to_mem(\n nq, nb, nsp, codes_a.get(), LUT_a.get(), accu_a.get()\n )\n accu = faiss.AlignedTable_to_array(accu_a).reshape(nq, nb)\n np.testing.assert_array_equal(accu_ref, accu)\n\n def test_11(self):\n self.do_loop5_kernel(1, 1)\n\n def test_21(self):\n self.do_loop5_kernel(2, 1)\n\n def test_12(self):\n self.do_loop5_kernel(1, 2)\n\n def test_22(self):\n self.do_loop5_kernel(2, 2)\n\n\n\n\n\n##########################################################\n# Tests for various IndexPQFastScan implementations\n##########################################################\n\ndef verify_with_draws(testcase, Dref, Iref, Dnew, Inew):\n \"\"\" verify a list of results where there are draws in the distances (because\n they are integer). \"\"\"\n np.testing.assert_array_almost_equal(Dref, Dnew, decimal=5)\n # here we have to be careful because of draws\n for i in range(len(Iref)):\n if np.all(Iref[i] == Inew[i]): # easy case\n continue\n # we can deduce nothing about the latest line\n skip_dis = Dref[i, -1]\n for dis in np.unique(Dref):\n if dis == skip_dis:\n continue\n mask = Dref[i, :] == dis\n testcase.assertEqual(set(Iref[i, mask]), set(Inew[i, mask]))\n\n\nclass TestImplems(unittest.TestCase):\n\n def __init__(self, *args):\n unittest.TestCase.__init__(self, *args)\n self.cache = {}\n self.k = 10\n\n def get_index(self, d, metric):\n if (d, metric) not in self.cache:\n ds = datasets.SyntheticDataset(d, 1000, 2000, 200)\n target_size = d // 2\n index = faiss.index_factory(d, 'PQ%dx4' % target_size, metric)\n index.train(ds.get_train())\n index.add(ds.get_database())\n\n index2 = faiss.IndexPQFastScan(index)\n # uint8 LUT but no SIMD\n index2.implem = 4\n Dref, Iref = index2.search(ds.get_queries(), 10)\n\n self.cache[(d, metric)] = (ds, index, Dref, Iref)\n\n return self.cache[(d, metric)]\n\n def do_with_params(self, d, params, metric=faiss.METRIC_L2):\n ds, index, Dref, Iref = self.get_index(d, metric)\n\n index2 = self.build_fast_scan_index(index, params)\n\n Dnew, Inew = index2.search(ds.get_queries(), self.k)\n\n Dref = Dref[:, :self.k]\n Iref = Iref[:, :self.k]\n\n verify_with_draws(self, Dref, Iref, Dnew, Inew)\n\n\n def build_fast_scan_index(self, index, params):\n index2 = faiss.IndexPQFastScan(index)\n index2.implem = 5\n return index2\n\n\n\nclass TestImplem12(TestImplems):\n\n def build_fast_scan_index(self, index, qbs):\n index2 = faiss.IndexPQFastScan(index)\n index2.qbs = qbs\n index2.implem = 12\n return index2\n\n def test_qbs7(self):\n self.do_with_params(32, 0x223)\n\n def test_qbs7b(self):\n self.do_with_params(32, 0x133)\n\n def test_qbs6(self):\n self.do_with_params(32, 0x33)\n\n def test_qbs6_ip(self):\n self.do_with_params(32, 0x33, faiss.METRIC_INNER_PRODUCT)\n\n def test_qbs6b(self):\n # test codepath where qbs is not known at compile time\n self.do_with_params(32, 0x1113)\n\n def test_qbs6_odd_dim(self):\n self.do_with_params(30, 0x33)\n\n\n\n\nclass TestImplem13(TestImplems):\n\n def build_fast_scan_index(self, index, qbs):\n index2 = faiss.IndexPQFastScan(index)\n index2.qbs = qbs\n index2.implem = 13\n return index2\n\n def test_qbs7(self):\n self.do_with_params(32, 0x223)\n\n def test_qbs7_k1(self):\n self.k = 1\n self.do_with_params(32, 0x223)\n\n\nclass TestImplem14(TestImplems):\n\n def build_fast_scan_index(self, index, params):\n qbs, bbs = params\n index2 = faiss.IndexPQFastScan(index, bbs)\n index2.qbs = qbs\n index2.implem = 14\n return index2\n\n def test_1_32(self):\n self.do_with_params(32, (1, 32))\n\n def test_1_64(self):\n self.do_with_params(32, (1, 64))\n\n def test_2_32(self):\n self.do_with_params(32, (2, 32))\n\n def test_2_64(self):\n self.do_with_params(32, (2, 64))\n\n def test_qbs_1_32_k1(self):\n self.k = 1\n self.do_with_params(32, (1, 32))\n\n def test_qbs_1_64_k1(self):\n self.k = 1\n self.do_with_params(32, (1, 64))\n\n def test_1_32_odd_dim(self):\n self.do_with_params(30, (1, 32))\n\n def test_1_64_odd_dim(self):\n self.do_with_params(30, (1, 64))\n\n\nclass TestImplem15(TestImplems):\n\n def build_fast_scan_index(self, index, params):\n qbs, bbs = params\n index2 = faiss.IndexPQFastScan(index, bbs)\n index2.qbs = qbs\n index2.implem = 15\n return index2\n\n def test_1_32(self):\n self.do_with_params(32, (1, 32))\n\n def test_2_64(self):\n self.do_with_params(32, (2, 64))\n\nclass TestAdd(unittest.TestCase):\n\n def do_test_add(self, d, bbs):\n\n ds = datasets.SyntheticDataset(d, 2000, 5000, 200)\n\n index = faiss.index_factory(d, f'PQ{d//2}x4np')\n index.train(ds.get_train())\n\n xb = ds.get_database()\n index.add(xb[:1235])\n\n index2 = faiss.IndexPQFastScan(index, bbs)\n index2.add(xb[1235:])\n new_codes = faiss.AlignedTable_to_array(index2.codes)\n\n index.add(xb[1235:])\n index3 = faiss.IndexPQFastScan(index, bbs)\n ref_codes = faiss.AlignedTable_to_array(index3.codes)\n self.assertEqual(index3.ntotal, index2.ntotal)\n\n np.testing.assert_array_equal(ref_codes, new_codes)\n\n def test_add(self):\n self.do_test_add(32, 32)\n\n def test_add_bbs64(self):\n self.do_test_add(32, 64)\n\n def test_add_odd_d(self):\n self.do_test_add(30, 64)\n\n def test_constructor(self):\n d = 32\n ds = datasets.SyntheticDataset(d, 2000, 5000, 200)\n\n index = faiss.index_factory(d, f'PQ{d//2}x4np')\n index.train(ds.get_train())\n index.add(ds.get_database())\n Dref, Iref = index.search(ds.get_queries(), 10)\n nq = Iref.shape[0]\n\n index2 = faiss.IndexPQFastScan(d, d // 2, 4)\n index2.train(ds.get_train())\n index2.add(ds.get_database())\n Dnew, Inew = index2.search(ds.get_queries(), 10)\n\n recall_at_1 = (Iref[:, 0] == Inew[:, 0]).sum() / nq\n\n self.assertGreater(recall_at_1, 0.99)\n\n data = faiss.serialize_index(index2)\n index3 = faiss.deserialize_index(data)\n\n self.assertEqual(index2.implem, index3.implem)\n\n D3, I3 = index3.search(ds.get_queries(), 10)\n np.testing.assert_array_equal(D3, Dnew)\n np.testing.assert_array_equal(I3, Inew)\n"
]
| [
[
"numpy.unique",
"numpy.testing.assert_array_equal",
"numpy.all",
"numpy.uint16",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
]
|
satvikshukla/ds-demo | [
"40f3d1c4d2b85df84ade5667aa05bea49641611a"
]
| [
"plot.py"
]
| [
"import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom tqdm import trange\n\n\ndef get_data():\n df = pd.read_csv(\n 'https://query.data.world/s/ydtlqistcr56h7xx36ltobnvtsazvt')\n print(df.head())\n # print(df.describe())\n print(df.shape)\n print(df.dtypes)\n return df\n\n\ndef make_plots(df: pd.DataFrame):\n # histogram\n plt.hist(df['AveragePrice'])\n # plt.show()\n plt.close()\n\n # bar plot\n plt.bar(list(set(df['type'].values)), list(\n df.groupby('type')['AveragePrice'].agg('sum').values))\n plt.show()\n plt.close()\n\n\ndef make_more_plots(df: pd.DataFrame):\n\n sns.barplot(list(set(df['type'].values)), list(\n df.groupby('type')['AveragePrice'].agg('sum').values))\n # plt.show()\n plt.close()\n\n sns.scatterplot(x='Total Volume', y='AveragePrice', hue='type', data=df)\n # plt.show()\n plt.close()\n\n sns.pairplot(df.iloc[:, 8:11], palette=\"husl\", height=5.5)\n plt.show()\n plt.close()\n\n g = sns.catplot('AveragePrice', 'region', data=df,\n hue='year',\n palette='Blues',\n kind='point',\n join=False\n )\n plt.show()\n plt.close()\n\n rs = np.random.RandomState(11)\n x = rs.gamma(2, size=1000)\n y = -.5 * x + rs.normal(size=1000)\n sns.jointplot(x, y, kind=\"hex\", color=\"#4CB391\")\n plt.show()\n plt.close()\n\n\ndef make_basic_plots():\n\n # scatter plot\n x = np.random.randn(1000)\n y = np.random.randn(1000)\n plt.scatter(x, y, label='scatter plot')\n plt.legend()\n plt.xlabel('some x')\n plt.ylabel('some y')\n # plt.show()\n plt.close()\n\n # line plot\n x = np.linspace(0, 1000, 100)\n err = 100 * np.random.randn(100)\n y1 = 9.0 * x + 2.1 + err\n y2 = 7.2 * x + 9.8 + err\n plt.plot(x, y1, label='y1')\n plt.plot(x, y2, label='y2')\n plt.xlabel('some x')\n plt.ylabel('some y')\n plt.title('x vs y')\n plt.legend()\n # plt.show()\n plt.close()\n\n\ndef bonus(df: pd.DataFrame):\n print(dataframe.head())\n months = {'01': 'Jan', '02': 'Feb', '03': 'Mar', '04': 'Apr', '05': 'May', '06': 'Jun',\n '07': 'Jul', '08': 'Aug', '09': 'Sep', '10': 'Oct', '11': 'Nov', '12': 'Dec'}\n dataframe['months'] = df.apply(lambda val: months[val['Date'][5:7]], axis=1)\n print(df.head())\n\n\ndef tqq():\n sum = 0\n for _ in trange(10):\n sum += 1\n time.sleep(1.2)\n print('sum is: {}'.format(sum))\n\n\nif __name__ == '__main__':\n dataframe = get_data()\n\n # matplotlib\n # make_basic_plots()\n # make_plots(dataframe)\n\n # seaborn\n # make_more_plots(dataframe)\n\n # bonus(dataframe)\n tqq()\n pass\n"
]
| [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.random.RandomState",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
ThisIsPIRI/python-tools | [
"1dea3d44bd4259b02b2c7e78ca09e1b072711ece"
]
| [
"randomImage.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport struct\n\ndef writeImage(mat, width, height):\n\twriteAs = 'c'\n\twith open(\"test.ppm\", 'wb') as f:\n\t\tf.write(bytes(f\"P6\\n{width} {height}\\n255\\n\", 'utf-8'))\n\t\tfor i in np.nditer(mat, order='C'):\n\t\t\tf.write(struct.pack(writeAs, bytes([i])))\n\ndef allRandom(width, height):\n\tformat = 'c' #set to 'i' to repeat one R, one G, one B column and one empty column\n\trandom.seed()\n\twith open(\"test.ppm\", 'wb') as f:\n\t\tf.write(bytes(f\"P6\\n{width} {height}\\n255\\n\", 'utf-8'))\n\t\tfor i in range(width * height):\n\t\t\tf.write(struct.pack(format, bytes([random.randint(0, 255)])))\n\t\t\tf.write(struct.pack(format, bytes([random.randint(0, 255)])))\n\t\t\tf.write(struct.pack(format, bytes([random.randint(0, 255)])))\n\ndef prevRandom(width, height, vertical=False, zigzag=False, dotsDivisor=100, showProcess=True):\n\t\"\"\"Returns an np.ndarray of dimension (height, width, 3) representing an image of lines on which randomly and smoothly changing colors appear.\n\tvertical -- Whether to draw vertical lines instead of horizontal ones.\n\tzigzag -- Whether to continue the initial line in zigzag instead of making a new one for every row of pixels.\n\tdotsDivisor -- n in 1/n, the probability of a completely new color(dot) spawning instead of a slight diversion from the last one.\n\tWill disable dots if less than or equal to 0.\n\tshowProcess -- Whether to show the process live through pyplot.\"\"\"\n\trandom.seed()\n\t\n\tmainAxis = width if vertical else height\n\tsubAxis = height if vertical else width\n\tmat = np.empty((mainAxis, subAxis, 3), dtype=np.uint8)\n\t\n\tif showProcess:\n\t\tplt.ion()\n\t\tplt.figure(1)\n\t\timgPlot = plt.imshow(mat)\n\t\t\n\tfor i in range(mainAxis):\n\t\tgoingReverse = zigzag and i % 2 == 1\n\t\tr = reversed(range(subAxis)) if goingReverse else range(subAxis)\n\t\tfor j in r:\n\t\t\t#Use a completely new color on dotsDivisor hit or a non-zigzag line start\n\t\t\tif (dotsDivisor > 0 and random.randrange(0, dotsDivisor) == 0) or (j == 0 and not zigzag):\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tmat[i,j,k] = random.randint(0, 255)\n\t\t\t#Use the color one pixel above at new zigzag line start\n\t\t\telif zigzag and (j == ((subAxis - 1) if goingReverse else 0)): # i will never be 0 if above if is False and zigzag is True\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tmat[i,j,k] = max(0, min(255, mat[i-1,j,k] + random.randint(-20, 20)))\n\t\t\telse:\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tmat[i,j,k] = max(0, min(255, mat[i,j+(1 if goingReverse else -1),k] + random.randint(-20, 20)))\n\t\t\t\t\t\n\t\tif showProcess:\n\t\t\timgPlot.set_data(mat)\n\t\t\tplt.draw()\n\t\t\tplt.pause(0.001)\n\n\tif showProcess:\n\t\tplt.close()\n\treturn np.transpose(mat, (1, 0, 2)) if vertical else mat\n\ndef neighborRandom(width, height, dotsDivisor=100):\n\t\"\"\"Returns an np.ndarray of dimension (height, width, 3) representing an image with pastel-like textures and colors.\n\tSet dotsDivisor to a negative number or 0 to disable random dots appearing throughout the image.\"\"\"\n\tformat = 'c'\n\trandom.seed()\n\tmat = np.empty((height, width, 3), dtype=np.uint8)\n\t\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tif (dotsDivisor > 0 and random.randrange(0, dotsDivisor) == 0) or (i == 0 and j == 0):\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tmat[i,j,k] = random.randint(0, 255)\n\t\t\telse:\n\t\t\t\tbase = None\n\t\t\t\tif i == 0:\n\t\t\t\t\tbase = mat[i][j - 1]\n\t\t\t\telif j == 0:\n\t\t\t\t\tbase = mat[i - 1][j]\n\t\t\t\telse:\n\t\t\t\t\tbase = ((mat[i][j - 1] + mat[i - 1][j]) / 2).astype(np.int32) # Slightly faster than list comprehension.\n\t\t\t\t# TODO: fix a small number of dots appearing even when disabled\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tmat[i,j,k] = max(0, min(255, base[k] + random.randint(-20, 20)))\t\t\t\n\treturn mat\n\ndef main():\n\twidth, height = [int(x) for x in input(\"Width and height(separated by a space): \").split()]\n\tdivisor = 100 if input(\"dots?(y/n): \") == 'y' else -1\n\tif input(\"2d?(y/n): \") == 'y':\n\t\twriteImage(neighborRandom(width, height, divisor), width, height)\n\telse:\n\t\twriteImage(prevRandom(width, height, vertical=(input(\"vertical?(y/n): \") == 'y'),\n\t\t\tzigzag=(input(\"zigzag?(y/n): \") == 'y'),\n\t\t\tdotsDivisor=divisor, showProcess=(input(\"show process?(y/n): \") == 'y')), width, height)\n\nif __name__ == \"__main__\":\n\tmain()\n"
]
| [
[
"matplotlib.pyplot.imshow",
"numpy.nditer",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.close",
"numpy.transpose",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"numpy.empty",
"matplotlib.pyplot.figure"
]
]
|
izumiya-keisuke/pytorch-example | [
"3fa9a531bccc9ce95e0e0e8222de24ed1af35e34"
]
| [
"pytorch_example/lightning/train.py"
]
| [
"\"\"\"\nCopyright 2022 Keisuke Izumiya\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom typing import Union\n\nimport torch.cuda\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom pytorch_lightning import LightningDataModule, LightningModule, Trainer\nfrom torch import Tensor\n\nfrom .data import DataModel\n\n\nclass Model(LightningModule):\n def __init__(self) -> None:\n super().__init__()\n\n self._module: nn.Module = self._make_model()\n\n def forward(self, x: Tensor) -> Tensor:\n return self._module(x)\n\n def configure_optimizers(self) -> optim.Optimizer:\n return optim.Adadelta(self.parameters())\n\n def training_step(self, batch: tuple[Tensor, Tensor], idx: int) -> Tensor:\n predict: Tensor = self(batch[0])\n loss: Tensor = self._calc_loss(predict, batch[1])\n return loss\n\n def training_epoch_end(self, outputs: list[Tensor]) -> None:\n loss_sum: Union[float, Tensor] = 0.0\n for output in outputs:\n output: dict[str, Tensor]\n\n loss_sum += output[\"loss\"]\n\n self.log(\"loss/val\", loss_sum / len(outputs))\n\n def validation_step(self, batch: tuple[Tensor, Tensor], idx: int) -> dict[str, Tensor]:\n predict: Tensor = self(batch[0])\n loss: Tensor = self._calc_loss(predict, batch[1])\n acc: Tensor = self._calc_acc(predict, batch[1])\n return {\"loss\": loss, \"acc\": acc}\n\n def validation_epoch_end(self, outputs: list[dict[str, Tensor]]) -> None:\n loss_sum: Union[float, Tensor] = 0.0\n acc_sum: Union[float, Tensor] = 0.0\n for output in outputs:\n output: dict[str, Tensor]\n\n loss_sum += output[\"loss\"]\n acc_sum += output[\"acc\"]\n\n self.log(\"loss/val\", loss_sum / len(outputs))\n self.log(\"acc/val\", acc_sum / len(outputs))\n\n def test_step(self, batch: tuple[Tensor, Tensor], idx: int) -> Tensor:\n predict: Tensor = self(batch[0])\n acc: Tensor = self._calc_acc(predict, batch[1])\n return acc\n\n def test_epoch_end(self, outputs: list[Tensor]) -> None:\n self.log(\"acc/test\", sum(outputs) / len(outputs))\n\n @staticmethod\n def _make_model() -> nn.Module:\n mid_dim: int = 128\n return nn.Sequential(\n nn.Flatten(),\n nn.Linear(1 * 28 * 28, mid_dim),\n nn.BatchNorm1d(mid_dim),\n nn.ReLU(),\n nn.Linear(mid_dim, 10),\n )\n\n @staticmethod\n def _calc_loss(predict: Tensor, label: Tensor) -> Tensor:\n return F.cross_entropy(predict, label)\n\n @staticmethod\n def _calc_acc(predict: Tensor, label: Tensor) -> Tensor:\n return (predict.argmax(-1) == label).sum() / label.shape[0]\n\n\ndef train() -> None:\n data_model: LightningDataModule = DataModel()\n model: LightningModule = Model()\n\n total_epochs: int = 10\n trainer: Trainer = Trainer(max_epochs=total_epochs, gpus=torch.cuda.device_count())\n trainer.fit(model, data_model)\n trainer.test(dataloaders=data_model)\n"
]
| [
[
"torch.nn.BatchNorm1d",
"torch.nn.functional.cross_entropy",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.nn.ReLU"
]
]
|
TeamPMG/CuteGirlsGenerator-Organized- | [
"22c684ab144ec77c14b6860cb87ad4974d24f7c8"
]
| [
"models/beta/model2web.py"
]
| [
"import numpy as np\nimport chainer\nfrom chainer import Chain, Variable\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import serializers,cuda\n\nimport math\nimport os\nimport glob\nimport pickle\nfrom PIL import Image\n\nNUMBER_OF_TAG = 1539\nxp = np\n\n#network\n\nclass EqualizedConv2d(chainer.Chain):\n def __init__(self, in_dim, out_dim,ksize=3,stride=1,pad=1):\n w = chainer.initializers.Normal(1.0) # equalized learning rate\n self.inv_c = np.sqrt(2.0/(in_dim*ksize**2))\n super(EqualizedConv2d,self).__init__(\n normalize=normalize(),\n )\n with self.init_scope():\n self.c1=L.Convolution2D(in_dim, out_dim, ksize, stride, pad, initialW=w)\n def __call__(self,x,normalize = True,dropout = False):\n h = self.c1(x * self.inv_c)\n\n if normalize:\n h = self.normalize(h)\n\n if dropout:\n F.dropout(h)\n return h\n\nclass EqualizedLinear(chainer.Chain):\n def __init__(self, in_dim, out_dim):\n w = chainer.initializers.Normal(1.0) # equalized learning rate\n self.inv_c = np.sqrt(2.0/(in_dim))\n super(EqualizedLinear,self).__init__(\n )\n with self.init_scope():\n self.l1=L.Linear(in_dim, out_dim, initialW=w)\n def __call__(self,x):\n h = self.l1(x * self.inv_c)\n return h\n\nclass g_block(chainer.Chain):\n def __init__(self, in_dim, out_dim):\n super(g_block,self).__init__(\n normalize=normalize(),\n dc1=EqualizedConv2d(in_dim, out_dim, 3, stride=1, pad=1),\n dc2=EqualizedConv2d(out_dim, out_dim, 3, stride=1, pad=1),\n to_RGB=EqualizedConv2d(out_dim, 3, 1, stride=1, pad=0),\n cr1 = crop(),\n )\n def __call__(self,x,to_rgb=True):\n h = F.unpooling_2d(x, 2, 2, 0, outsize=(x.shape[2]*2, x.shape[3]*2))\n h_r = h\n h = F.leaky_relu(self.dc1(h))\n h = F.leaky_relu(self.dc2(h))\n if to_rgb:\n #h = F.tanh(self.to_RGB(h))\n h = self.to_RGB(h,False)\n h = h + h_r\n h = self.cr1(h)\n return h\n\nclass normalize(chainer.Chain):\n def __init__(self):\n super(normalize,self).__init__(\n )\n def __call__(self,x):\n eps = 1e-8\n mean=F.mean(x ** 2,axis=1,keepdims=True)\n mean=F.sqrt(mean + eps)\n mean = F.broadcast_to(mean, (x.shape))\n h = x / mean\n return x\nclass crop(chainer.Chain):\n def __init__(self):\n super(crop,self).__init__()\n def __call__(self,h):\n h = F.minimum(h,xp.ones(h.shape).astype(np.float32))\n h = F.maximum(h,-1 * xp.ones(h.shape).astype(np.float32))\n\n return h\n\nclass generator(chainer.Chain):\n def __init__(self, width, height, z_size):\n super(generator,self).__init__(\n normalize=normalize(),\n c0 = EqualizedConv2d(z_size, 512, 4, stride=1, pad=3),\n c1 = EqualizedConv2d(512, 512, 3, stride=1, pad=1),\n\n b0=g_block(3,512),\n b1=g_block(3,512),\n b2=g_block(3,256),\n b3=g_block(3,128),\n b4=g_block(3,64),\n b5=g_block(3,32),\n b6=g_block(3,16),\n\n to_RGB=EqualizedConv2d(512, 3, 1, stride=1, pad=0),\n cr1 = crop()\n )\n\n def __call__(self, noise, tag,depth,alpha):\n h = F.reshape(noise,(len(noise),-1,1,1))\n h = F.leaky_relu(self.c0(h))\n h = F.leaky_relu(self.c1(h))\n h = self.to_RGB(h,False)\n\n for i in range(depth):\n h = getattr(self, \"b%d\" % i)(h,True)\n\n #h = getattr(self, \"b%d\" % (depth-1))(h,True)\n\n\n h = self.cr1(h)\n return h\n\n\ndef combine_images(generated_images):\n total = generated_images.shape[0]\n cols = int(math.sqrt(total))\n rows = math.ceil(float(total)/cols)\n width, height = generated_images.shape[1:3]\n combined_image = np.zeros((width*cols, height*rows,3),\n dtype=generated_images.dtype)\n #coreturn combined_image\n\n for index, image in enumerate(generated_images):\n i = index % cols\n j = int(index/cols)\n combined_image[width*i:width*(i+1), height*j:height*(j+1),0:3] = image[:,:,0:3 ]\n return combined_image\n\ndef save_images(images,file_name):\n print(images.shape)\n Image.fromarray(images.astype(np.uint8))\\\n .save(\"%s.png\" % (file_name))\n\ndef save_generated_image(image,name):\n Imag=combine_images(image)\n save_images(Imag,name)\n\nz_size = 128\n\nnoise=np.random.normal(0, 0.5, [32,z_size])\n\nclass normalize2(chainer.Chain):\n def __init__(self):\n super(normalize2,self).__init__(\n )\n def __call__(self,x):\n eps = 1e-8\n mean=F.mean(x ** 2,axis=1,keepdims=True)\n mean=(mean + eps) ** 0.5\n mean = F.broadcast_to(mean, (x.shape))\n h = x / mean\n return h\n\nclass EqualizedConv2d2(chainer.Chain):\n def __init__(self, in_dim, out_dim,ksize=3,stride=1,pad=1):\n w = chainer.initializers.Normal(1.0) # equalized learning rate\n self.inv_c = np.sqrt(2.0/(in_dim*ksize**2))\n super(EqualizedConv2d2,self).__init__(\n normalize=normalize2(),\n )\n with self.init_scope():\n self.c1=L.Convolution2D(in_dim, out_dim, ksize, stride, pad, initialW=w)\n def __call__(self,x,normalize = True,dropout = False):\n h = self.c1(x * self.inv_c)\n\n if normalize:\n h = self.normalize(h)\n\n if dropout:\n F.dropout(h)\n return h\n\nclass g_block2(chainer.Chain):\n def __init__(self, in_dim, out_dim):\n super(g_block2,self).__init__(\n normalize=normalize2(),\n dc1=EqualizedConv2d2(in_dim, out_dim, 3, stride=1, pad=1),\n dc2=EqualizedConv2d2(out_dim, out_dim, 3, stride=1, pad=1),\n to_RGB=EqualizedConv2d2(out_dim, 3, 1, stride=1, pad=0),\n )\n def __call__(self,x,to_rgb=False):\n h = F.unpooling_2d(x, 2, 2, 0, outsize=(x.shape[2]*2, x.shape[3]*2))\n h = F.leaky_relu(self.dc1(h))\n h = F.leaky_relu(self.dc2(h))\n if to_rgb:\n #h = F.tanh(self.to_RGB(h))\n h = self.to_RGB(h,False)\n return h\n\nclass generator2(chainer.Chain):\n def __init__(self, width, height, z_size):\n dims = [64, 64, 64, 64, 64, 64, 64, 64]\n super(generator2,self).__init__(\n normalize=normalize2(),\n c0 = EqualizedConv2d2(z_size, dims[0], 4, stride=1, pad=3),\n c1 = EqualizedConv2d2(dims[0], dims[0], 3, stride=1, pad=1),\n\n b0=g_block2(dims[0],dims[1]),\n b1=g_block2(dims[1],dims[2]),\n b2=g_block2(dims[2],dims[3]),\n b3=g_block2(dims[3],dims[4]),\n b4=g_block2(dims[4],dims[5]),\n b5=g_block2(dims[5],dims[6]),\n b6=g_block2(dims[6],dims[7]),\n\n to_RGB=EqualizedConv2d2(dims[0], 3, 1, stride=1, pad=0),\n\n )\n\n def __call__(self, noise, tag,depth,alpha):\n h = F.reshape(noise,(len(noise),-1,1,1))\n h = F.leaky_relu(self.c0(h,False))\n h = F.leaky_relu(self.c1(h))\n\n for i in range(depth-1):\n h = getattr(self, \"b%d\" % i)(h)\n\n if 0<depth and alpha < 1:\n h2 = getattr(self, \"b%d\" % (depth-1))(h,True)\n if depth==1:\n #h = F.tanh(self.to_RGB(h))\n h = self.to_RGB(h,False)\n else:\n h = getattr(self, \"b%d\" % (depth-2)).to_RGB(h,False)\n h=F.unpooling_2d(h, 2, 2, 0, outsize=(2*h.shape[2], 2*h.shape[3]))\n\n h=h*(1.0-alpha)+h2*alpha\n elif depth == 0:\n #h = F.tanh(self.to_RGB(h))\n h = self.to_RGB(h,False)\n else:\n h = getattr(self, \"b%d\" % (depth-1))(h,True)\n\n h = F.minimum(h,xp.ones(h.shape).astype(np.float32))\n #h = F.maximum(h,-1 * xp.ones(h.shape).astype(np.float32))\n\n return h\n\nz_size = 128\ng2 = generator2(512, 512, z_size)\nserializers.load_npz(\"generator03.model\", g2)\ng = generator(512, 512, z_size)\nserializers.load_npz(\"generator_SRB.model\", g)\nx = chainer.Variable(np.zeros((1,z_size), dtype=np.float32))\ny = g2(x,0,5,1)\ny = g.b5(y)\ny = F.average_pooling_2d(y, (1,2), (1,2))\n\n\n\nnoise=np.random.normal(0, 0.5, [1,z_size]).astype(np.float32)\nimage = g2(noise,np.zeros(NUMBER_OF_TAG),5,1)\nimage = g.b5(image)\nimage = F.average_pooling_2d(image, (1,2), (1,2))\nimage = image.data[0]\nimage = image.transpose(1,2,0)\nsave_images((image * 127.5)+127.5,\"test\")\n\n\nfrom webdnn.frontend.chainer import ChainerConverter\nfrom webdnn.backend import generate_descriptor\nfrom webdnn.util import flags\n\ngraph = ChainerConverter().convert([x], [y])\n\nflags.optimize.REPLACE_SCALAR_OPERATOR = 0\nflags.optimize.OPTIMIZE_MEMORY_ALLOCATION=0\nexec_info = generate_descriptor(\"webassembly\", graph) # also \"webassembly\", \"webgl\", \"fallback\" are available.\nexec_info.save(\"./output\")\n"
]
| [
[
"numpy.random.normal",
"numpy.zeros",
"numpy.sqrt"
]
]
|
Singularity-AI/FasterRCNN | [
"6ade7f987b64a2f7f5307ea024d1e3e764628fda"
]
| [
"lib/model/rpn/proposal_layer.py"
]
| [
"from __future__ import absolute_import\n# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\n# --------------------------------------------------------\n# Reorganized and modified by Jianwei Yang and Jiasen Lu\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport yaml\nfrom model.utils.config import cfg\nfrom .generate_anchors import generate_anchors\nfrom .bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch\nfrom torchvision.ops import nms\nimport pdb\n\nDEBUG = False\n\nclass _ProposalLayer(nn.Module):\n \"\"\"\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n\n def __init__(self, feat_stride, scales, ratios):\n super(_ProposalLayer, self).__init__()\n\n self._feat_stride = feat_stride\n self._anchors = torch.from_numpy(generate_anchors(scales=np.array(scales),\n ratios=np.array(ratios))).float()\n self._num_anchors = self._anchors.size(0)\n\n # rois blob: holds R regions of interest, each is a 5-tuple\n # (n, x1, y1, x2, y2) specifying an image batch index n and a\n # rectangle (x1, y1, x2, y2)\n # top[0].reshape(1, 5)\n #\n # # scores blob: holds scores for R regions of interest\n # if len(top) > 1:\n # top[1].reshape(1, 1, 1, 1)\n\n def forward(self, input):\n\n # Algorithm:\n #\n # for each (H, W) location i\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas at cell i to each of the A anchors\n # clip predicted boxes to image\n # remove predicted boxes with either height or width < threshold\n # sort all (proposal, score) pairs by score from highest to lowest\n # take top pre_nms_topN proposals before NMS\n # apply NMS with threshold 0.7 to remaining proposals\n # take after_nms_topN proposals after NMS\n # return the top proposals (-> RoIs top, scores top)\n\n\n # the first set of _num_anchors channels are bg probs\n # the second set are the fg probs\n scores = input[0][:, self._num_anchors:, :, :]\n bbox_deltas = input[1]\n im_info = input[2]\n cfg_key = input[3]\n\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n min_size = cfg[cfg_key].RPN_MIN_SIZE\n\n batch_size = bbox_deltas.size(0)\n\n feat_height, feat_width = scores.size(2), scores.size(3)\n shift_x = np.arange(0, feat_width) * self._feat_stride\n shift_y = np.arange(0, feat_height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose())\n shifts = shifts.contiguous().type_as(scores).float()\n\n A = self._num_anchors\n K = shifts.size(0)\n\n self._anchors = self._anchors.type_as(scores)\n # anchors = self._anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(1, 0, 2).contiguous()\n anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)\n anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)\n\n # Transpose and reshape predicted bbox transformations to get them\n # into the same order as the anchors:\n\n bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()\n bbox_deltas = bbox_deltas.view(batch_size, -1, 4)\n\n # Same story for the scores:\n scores = scores.permute(0, 2, 3, 1).contiguous()\n scores = scores.view(batch_size, -1)\n\n # Convert anchors into proposals via bbox transformations\n proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)\n\n # 2. clip predicted boxes to image\n proposals = clip_boxes(proposals, im_info, batch_size)\n # proposals = clip_boxes_batch(proposals, im_info, batch_size)\n\n # assign the score to 0 if it's non keep.\n # keep = self._filter_boxes(proposals, min_size * im_info[:, 2])\n\n # trim keep index to make it euqal over batch\n # keep_idx = torch.cat(tuple(keep_idx), 0)\n\n # scores_keep = scores.view(-1)[keep_idx].view(batch_size, trim_size)\n # proposals_keep = proposals.view(-1, 4)[keep_idx, :].contiguous().view(batch_size, trim_size, 4)\n\n # _, order = torch.sort(scores_keep, 1, True)\n\n scores_keep = scores\n proposals_keep = proposals\n _, order = torch.sort(scores_keep, 1, True)\n\n output = scores.new(batch_size, post_nms_topN, 5).zero_()\n for i in range(batch_size):\n # # 3. remove predicted boxes with either height or width < threshold\n # # (NOTE: convert min_size to input image scale stored in im_info[2])\n proposals_single = proposals_keep[i]\n scores_single = scores_keep[i]\n\n # # 4. sort all (proposal, score) pairs by score from highest to lowest\n # # 5. take top pre_nms_topN (e.g. 6000)\n order_single = order[i]\n\n if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():\n order_single = order_single[:pre_nms_topN]\n\n proposals_single = proposals_single[order_single, :]\n scores_single = scores_single[order_single].view(-1,1)\n\n # 6. apply nms (e.g. threshold = 0.7)\n # 7. take after_nms_topN (e.g. 300)\n # 8. return the top proposals (-> RoIs top)\n keep_idx_i = nms(proposals_single, scores_single.squeeze(1), nms_thresh)\n keep_idx_i = keep_idx_i.long().view(-1)\n\n if post_nms_topN > 0:\n keep_idx_i = keep_idx_i[:post_nms_topN]\n proposals_single = proposals_single[keep_idx_i, :]\n scores_single = scores_single[keep_idx_i, :]\n\n # padding 0 at the end.\n num_proposal = proposals_single.size(0)\n output[i,:,0] = i\n output[i,:num_proposal,1:] = proposals_single\n\n return output\n\n def backward(self, top, propagate_down, bottom):\n \"\"\"This layer does not propagate gradients.\"\"\"\n pass\n\n def reshape(self, bottom, top):\n \"\"\"Reshaping happens during the call to forward.\"\"\"\n pass\n\n def _filter_boxes(self, boxes, min_size):\n \"\"\"Remove all boxes with any side smaller than min_size.\"\"\"\n ws = boxes[:, :, 2] - boxes[:, :, 0] + 1\n hs = boxes[:, :, 3] - boxes[:, :, 1] + 1\n keep = ((ws >= min_size.view(-1,1).expand_as(ws)) & (hs >= min_size.view(-1,1).expand_as(hs)))\n return keep\n"
]
| [
[
"numpy.arange",
"numpy.array",
"numpy.meshgrid",
"torch.sort"
]
]
|
YQ-Wang/mlflow | [
"5508e6784afe0442705471bf4c10cd1ca13bafe3"
]
| [
"tests/pyfunc/test_model_export_with_loader_module_and_data_path.py"
]
| [
"import os\nimport pickle\nimport yaml\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport sklearn.datasets\nimport sklearn.linear_model\nimport sklearn.neighbors\n\nimport mlflow\nimport mlflow.pyfunc\nfrom mlflow.pyfunc import PyFuncModel\nimport mlflow.pyfunc.model\nimport mlflow.sklearn\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model, infer_signature, ModelSignature\nfrom mlflow.models.utils import _read_example\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.types import Schema, ColSpec, TensorSpec\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.file_utils import TempDir\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\n\nclass TestModel(object):\n @staticmethod\n def predict(pdf):\n return pdf\n\n\ndef _load_pyfunc(path):\n with open(path, \"rb\") as f:\n return pickle.load(f, encoding=\"latin1\") # pylint: disable=unexpected-keyword-arg\n\n\[email protected]\ndef pyfunc_custom_env_file(tmpdir):\n conda_env = os.path.join(str(tmpdir), \"conda_env.yml\")\n _mlflow_conda_env(\n conda_env,\n additional_pip_deps=[\n \"scikit-learn\",\n \"pytest\",\n \"cloudpickle\",\n \"-e \" + os.path.dirname(mlflow.__path__[0]),\n ],\n )\n return conda_env\n\n\[email protected]\ndef pyfunc_custom_env_dict():\n return _mlflow_conda_env(\n additional_pip_deps=[\n \"scikit-learn\",\n \"pytest\",\n \"cloudpickle\",\n \"-e \" + os.path.dirname(mlflow.__path__[0]),\n ],\n )\n\n\[email protected](scope=\"module\")\ndef iris_data():\n iris = sklearn.datasets.load_iris()\n x = iris.data[:, :2]\n y = iris.target\n return x, y\n\n\[email protected](scope=\"module\")\ndef sklearn_knn_model(iris_data):\n x, y = iris_data\n knn_model = sklearn.neighbors.KNeighborsClassifier()\n knn_model.fit(x, y)\n return knn_model\n\n\[email protected]\ndef model_path(tmpdir):\n return os.path.join(str(tmpdir), \"model\")\n\n\[email protected]\ndef test_model_save_load(sklearn_knn_model, iris_data, tmpdir, model_path):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n model_config = Model(run_id=\"test\", artifact_path=\"testtest\")\n mlflow.pyfunc.save_model(\n path=model_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n mlflow_model=model_config,\n )\n\n reloaded_model_config = Model.load(os.path.join(model_path, \"MLmodel\"))\n assert model_config.__dict__ == reloaded_model_config.__dict__\n assert mlflow.pyfunc.FLAVOR_NAME in reloaded_model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in reloaded_model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(model_path)\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n\n\[email protected]\ndef test_signature_and_examples_are_saved_correctly(sklearn_knn_model, iris_data):\n data = iris_data\n signature_ = infer_signature(*data)\n example_ = data[0][\n :3,\n ]\n for signature in (None, signature_):\n for example in (None, example_):\n with TempDir() as tmp:\n with open(tmp.path(\"skmodel\"), \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n path = tmp.path(\"model\")\n mlflow.pyfunc.save_model(\n path=path,\n data_path=tmp.path(\"skmodel\"),\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n signature=signature,\n input_example=example,\n )\n mlflow_model = Model.load(path)\n assert signature == mlflow_model.signature\n if example is None:\n assert mlflow_model.saved_input_example_info is None\n else:\n assert np.array_equal(_read_example(mlflow_model, path), example)\n\n\ndef test_column_schema_enforcement():\n m = Model()\n input_schema = Schema(\n [\n ColSpec(\"integer\", \"a\"),\n ColSpec(\"long\", \"b\"),\n ColSpec(\"float\", \"c\"),\n ColSpec(\"double\", \"d\"),\n ColSpec(\"boolean\", \"e\"),\n ColSpec(\"string\", \"g\"),\n ColSpec(\"binary\", \"f\"),\n ColSpec(\"datetime\", \"h\"),\n ]\n )\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n pdf = pd.DataFrame(\n data=[[1, 2, 3, 4, True, \"x\", bytes([1]), \"2021-01-01 00:00:00.1234567\"]],\n columns=[\"b\", \"d\", \"a\", \"c\", \"e\", \"g\", \"f\", \"h\"],\n dtype=np.object,\n )\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n pdf[\"d\"] = pdf[\"d\"].astype(np.float64)\n pdf[\"h\"] = pdf[\"h\"].astype(np.datetime64)\n # test that missing column raises\n with pytest.raises(MlflowException) as ex:\n res = pyfunc_model.predict(pdf[[\"b\", \"d\", \"a\", \"e\", \"g\", \"f\", \"h\"]])\n assert \"Model is missing inputs\" in str(ex)\n\n # test that extra column is ignored\n pdf[\"x\"] = 1\n\n # test that columns are reordered, extra column is ignored\n res = pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.input_names()]).all())\n\n expected_types = dict(zip(input_schema.input_names(), input_schema.pandas_types()))\n # MLflow datetime type in input_schema does not encode precision, so add it for assertions\n expected_types[\"h\"] = np.dtype(\"datetime64[ns]\")\n actual_types = res.dtypes.to_dict()\n assert expected_types == actual_types\n\n # Test conversions\n # 1. long -> integer raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.int64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n # 2. integer -> long works\n pdf[\"b\"] = pdf[\"b\"].astype(np.int32)\n res = pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.input_names()]).all())\n assert res.dtypes.to_dict() == expected_types\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n\n # 3. unsigned int -> long works\n pdf[\"b\"] = pdf[\"b\"].astype(np.uint32)\n res = pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.input_names()]).all())\n assert res.dtypes.to_dict() == expected_types\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n\n # 4. unsigned int -> int raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.uint32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n\n # 5. double -> float raises\n pdf[\"c\"] = pdf[\"c\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n\n # 6. float -> double works, double -> float does not\n pdf[\"d\"] = pdf[\"d\"].astype(np.float32)\n res = pyfunc_model.predict(pdf)\n assert res.dtypes.to_dict() == expected_types\n assert \"Incompatible input types\" in str(ex)\n pdf[\"d\"] = pdf[\"d\"].astype(np.float64)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n\n # 7. int -> float raises\n pdf[\"c\"] = pdf[\"c\"].astype(np.int32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n\n # 8. int -> double works\n pdf[\"d\"] = pdf[\"d\"].astype(np.int32)\n pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.input_names()]).all())\n assert res.dtypes.to_dict() == expected_types\n\n # 9. long -> double raises\n pdf[\"d\"] = pdf[\"d\"].astype(np.int64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"d\"] = pdf[\"d\"].astype(np.float64)\n\n # 10. any float -> any int raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.float32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n # 10. any float -> any int raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n pdf[\"b\"] = pdf[\"b\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n\n pdf[\"b\"] = pdf[\"b\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n assert \"Incompatible input types\" in str(ex)\n\n # 11. objects work\n pdf[\"b\"] = pdf[\"b\"].astype(np.object)\n pdf[\"d\"] = pdf[\"d\"].astype(np.object)\n pdf[\"e\"] = pdf[\"e\"].astype(np.object)\n pdf[\"f\"] = pdf[\"f\"].astype(np.object)\n pdf[\"g\"] = pdf[\"g\"].astype(np.object)\n res = pyfunc_model.predict(pdf)\n assert res.dtypes.to_dict() == expected_types\n\n # 12. datetime64[D] (date only) -> datetime64[x] works\n pdf[\"h\"] = pdf[\"h\"].astype(\"datetime64[D]\")\n res = pyfunc_model.predict(pdf)\n assert res.dtypes.to_dict() == expected_types\n pdf[\"h\"] = pdf[\"h\"].astype(\"datetime64[s]\")\n\n # 13. np.ndarrays can be converted to dataframe but have no columns\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf.values)\n assert \"Model is missing inputs\" in str(ex)\n\n # 14. dictionaries of str -> list/nparray work\n arr = np.array([1, 2, 3])\n d = {\n \"a\": arr.astype(\"int32\"),\n \"b\": arr.astype(\"int64\"),\n \"c\": arr.astype(\"float32\"),\n \"d\": arr.astype(\"float64\"),\n \"e\": [True, False, True],\n \"g\": [\"a\", \"b\", \"c\"],\n \"f\": [bytes(0), bytes(1), bytes(1)],\n \"h\": np.array([\"2020-01-01\", \"2020-02-02\", \"2020-03-03\"], dtype=np.datetime64),\n }\n res = pyfunc_model.predict(d)\n assert res.dtypes.to_dict() == expected_types\n\n # 15. dictionaries of str -> list[list] fail\n d = {\n \"a\": [arr.astype(\"int32\")],\n \"b\": [arr.astype(\"int64\")],\n \"c\": [arr.astype(\"float32\")],\n \"d\": [arr.astype(\"float64\")],\n \"e\": [[True, False, True]],\n \"g\": [[\"a\", \"b\", \"c\"]],\n \"f\": [[bytes(0), bytes(1), bytes(1)]],\n \"h\": [np.array([\"2020-01-01\", \"2020-02-02\", \"2020-03-03\"], dtype=np.datetime64)],\n }\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(d)\n assert \"Incompatible input types\" in str(ex)\n\n # 16. conversion to dataframe fails\n d = {\n \"a\": [1],\n \"b\": [1, 2],\n \"c\": [1, 2, 3],\n }\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(d)\n assert \"This model contains a column-based signature, which suggests a DataFrame input.\" in str(\n ex\n )\n\n\ndef _compare_exact_tensor_dict_input(d1, d2):\n \"\"\"Return whether two dicts of np arrays are exactly equal\"\"\"\n if d1.keys() != d2.keys():\n return False\n return all(np.array_equal(d1[key], d2[key]) for key in d1)\n\n\ndef test_tensor_multi_named_schema_enforcement():\n m = Model()\n input_schema = Schema(\n [\n TensorSpec(np.dtype(np.uint64), (-1, 5), \"a\"),\n TensorSpec(np.dtype(np.short), (-1, 2), \"b\"),\n TensorSpec(np.dtype(np.float32), (2, -1, 2), \"c\"),\n ]\n )\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n inp = {\n \"a\": np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]], dtype=np.uint64),\n \"b\": np.array([[0, 0], [1, 1], [2, 2]], dtype=np.short),\n \"c\": np.array([[[0, 0], [1, 1]], [[2, 2], [3, 3]]], dtype=np.float32),\n }\n\n # test that missing column raises\n inp1 = {k: v for k, v in inp.items()}\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp1.pop(\"b\"))\n assert \"Model is missing inputs\" in str(ex)\n\n # test that extra column is ignored\n inp2 = {k: v for k, v in inp.items()}\n inp2[\"x\"] = 1\n\n # test that extra column is removed\n res = pyfunc_model.predict(inp2)\n assert res == {k: v for k, v in inp.items() if k in {\"a\", \"b\", \"c\"}}\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n # test that variable axes are supported\n inp3 = {\n \"a\": np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2]], dtype=np.uint64),\n \"b\": np.array([[0, 0], [1, 1]], dtype=np.short),\n \"c\": np.array([[[0, 0]], [[2, 2]]], dtype=np.float32),\n }\n res = pyfunc_model.predict(inp3)\n assert _compare_exact_tensor_dict_input(res, inp3)\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n # test that type casting is not supported\n inp4 = {k: v for k, v in inp.items()}\n inp4[\"a\"] = inp4[\"a\"].astype(np.int32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp4)\n assert \"dtype of input int32 does not match expected dtype uint64\" in str(ex)\n\n # test wrong shape\n inp5 = {\n \"a\": np.array([[0, 0, 0, 0]], dtype=np.uint),\n \"b\": np.array([[0, 0], [1, 1]], dtype=np.short),\n \"c\": np.array([[[0, 0]]], dtype=np.float32),\n }\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp5)\n assert \"Shape of input (1, 4) does not match expected shape (-1, 5)\" in str(ex)\n\n # test non-dictionary input\n inp6 = [\n np.array([[0, 0, 0, 0, 0]], dtype=np.uint64),\n np.array([[0, 0], [1, 1]], dtype=np.short),\n np.array([[[0, 0]]], dtype=np.float32),\n ]\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp6)\n assert \"Model is missing inputs ['a', 'b', 'c'].\" in str(ex)\n\n # test empty ndarray does not work\n inp7 = {k: v for k, v in inp.items()}\n inp7[\"a\"] = np.array([])\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp7)\n assert \"Shape of input (0,) does not match expected shape\" in str(ex)\n\n # test dictionary of str -> list does not work\n inp8 = {k: list(v) for k, v in inp.items()}\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(inp8)\n assert \"This model contains a tensor-based model signature with input names\" in str(ex)\n assert (\n \"suggests a dictionary input mapping input name to a numpy array, but a dict\"\n \" with value type <class 'list'> was found\"\n ) in str(ex)\n\n # test dataframe input fails at shape enforcement\n pdf = pd.DataFrame(data=[[1, 2, 3]], columns=[\"a\", \"b\", \"c\"],)\n pdf[\"a\"] = pdf[\"a\"].astype(np.uint64)\n pdf[\"b\"] = pdf[\"b\"].astype(np.short)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Shape of input (1,) does not match expected shape (-1, 5)\" in str(ex)\n\n\ndef test_schema_enforcement_single_named_tensor_schema():\n m = Model()\n input_schema = Schema([TensorSpec(np.dtype(np.uint64), (-1, 2), \"a\")])\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n inp = {\n \"a\": np.array([[0, 0], [1, 1]], dtype=np.uint64),\n }\n\n # sanity test that dictionary with correct input works\n res = pyfunc_model.predict(inp)\n assert res == inp\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n # test single np.ndarray input works and is converted to dictionary\n res = pyfunc_model.predict(inp[\"a\"])\n assert res == inp\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n # test list does not work\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[0, 0], [1, 1]])\n assert \"Model is missing inputs ['a']\" in str(ex)\n\n\ndef test_schema_enforcement_named_tensor_schema_1d():\n m = Model()\n input_schema = Schema(\n [TensorSpec(np.dtype(np.uint64), (-1,), \"a\"), TensorSpec(np.dtype(np.float32), (-1,), \"b\")]\n )\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n pdf = pd.DataFrame(data=[[0, 0], [1, 1]], columns=[\"a\", \"b\"])\n pdf[\"a\"] = pdf[\"a\"].astype(np.uint64)\n pdf[\"b\"] = pdf[\"a\"].astype(np.float32)\n d_inp = {\n \"a\": np.array(pdf[\"a\"], dtype=np.uint64),\n \"b\": np.array(pdf[\"b\"], dtype=np.float32),\n }\n\n # test dataframe input works for 1d tensor specs and input is converted to dict\n res = pyfunc_model.predict(pdf)\n assert _compare_exact_tensor_dict_input(res, d_inp)\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n # test that dictionary works too\n res = pyfunc_model.predict(d_inp)\n assert res == d_inp\n expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))\n actual_types = {k: v.dtype for k, v in res.items()}\n assert expected_types == actual_types\n\n\ndef test_missing_value_hint_is_displayed_when_it_should():\n m = Model()\n input_schema = Schema([ColSpec(\"integer\", \"a\")])\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n pdf = pd.DataFrame(data=[[1], [None]], columns=[\"a\"],)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n hint = \"Hint: the type mismatch is likely caused by missing values.\"\n assert \"Incompatible input types\" in str(ex.value.message)\n assert hint in str(ex.value.message)\n pdf = pd.DataFrame(data=[[1.5], [None]], columns=[\"a\"],)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n assert hint not in str(ex.value.message)\n pdf = pd.DataFrame(data=[[1], [2]], columns=[\"a\"], dtype=np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex.value.message)\n assert hint not in str(ex.value.message)\n\n\ndef test_column_schema_enforcement_no_col_names():\n m = Model()\n input_schema = Schema([ColSpec(\"double\"), ColSpec(\"double\"), ColSpec(\"double\")])\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n test_data = [[1.0, 2.0, 3.0]]\n\n # Can call with just a list\n assert pyfunc_model.predict(test_data).equals(pd.DataFrame(test_data))\n\n # Or can call with a DataFrame without column names\n assert pyfunc_model.predict(pd.DataFrame(test_data)).equals(pd.DataFrame(test_data))\n\n # # Or can call with a np.ndarray\n assert pyfunc_model.predict(pd.DataFrame(test_data).values).equals(pd.DataFrame(test_data))\n\n # Or with column names!\n pdf = pd.DataFrame(data=test_data, columns=[\"a\", \"b\", \"c\"])\n assert pyfunc_model.predict(pdf).equals(pdf)\n\n # Must provide the right number of arguments\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[1.0, 2.0]])\n assert \"the provided value only has 2 inputs.\" in str(ex)\n\n # Must provide the right types\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[1, 2, 3]])\n assert \"Can not safely convert int64 to float64\" in str(ex)\n\n # Can only provide data type that can be converted to dataframe...\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(set([1, 2, 3]))\n assert \"Expected input to be DataFrame or list. Found: set\" in str(ex)\n\n # 9. dictionaries of str -> list/nparray work\n d = {\"a\": [1.0], \"b\": [2.0], \"c\": [3.0]}\n assert pyfunc_model.predict(d).equals(pd.DataFrame(d))\n\n\ndef test_tensor_schema_enforcement_no_col_names():\n m = Model()\n input_schema = Schema([TensorSpec(np.dtype(np.float32), (-1, 3))])\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n test_data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)\n\n # Can call with numpy array of correct shape\n assert np.array_equal(pyfunc_model.predict(test_data), test_data)\n\n # Or can call with a dataframe\n assert np.array_equal(pyfunc_model.predict(pd.DataFrame(test_data)), test_data)\n\n # Can not call with a list\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n assert \"This model contains a tensor-based model signature with no input names\" in str(ex)\n\n # Can not call with a dict\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict({\"blah\": test_data})\n assert \"This model contains a tensor-based model signature with no input names\" in str(ex)\n\n # Can not call with a np.ndarray of a wrong shape\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(np.array([[1.0, 2.0], [4.0, 5.0]]))\n assert \"Shape of input (2, 2) does not match expected shape (-1, 3)\" in str(ex)\n\n # Can not call with a np.ndarray of a wrong type\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(test_data.astype(np.uint32))\n assert \"dtype of input uint32 does not match expected dtype float32\" in str(ex)\n\n # Can call with a np.ndarray with more elements along variable axis\n test_data2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=np.float32)\n assert np.array_equal(pyfunc_model.predict(test_data2), test_data2)\n\n # Can not call with an empty ndarray\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(np.ndarray([]))\n assert \"Shape of input () does not match expected shape (-1, 3)\" in str(ex)\n\n\[email protected]\ndef test_model_log_load(sklearn_knn_model, iris_data, tmpdir):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path\n )\n )\n\n model_config = Model.load(os.path.join(pyfunc_model_path, \"MLmodel\"))\n assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)\n assert model_config.to_yaml() == reloaded_model.metadata.to_yaml()\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n\n\[email protected]\ndef test_model_log_load_no_active_run(sklearn_knn_model, iris_data, tmpdir):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n assert mlflow.active_run() is None\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path\n )\n )\n\n model_config = Model.load(os.path.join(pyfunc_model_path, \"MLmodel\"))\n assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n mlflow.end_run()\n\n\[email protected]\ndef test_save_model_with_unsupported_argument_combinations_throws_exception(model_path):\n with pytest.raises(MlflowException) as exc_info:\n mlflow.pyfunc.save_model(path=model_path, data_path=\"/path/to/data\")\n assert \"Either `loader_module` or `python_model` must be specified\" in str(exc_info)\n\n\[email protected]\ndef test_log_model_with_unsupported_argument_combinations_throws_exception():\n with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:\n mlflow.pyfunc.log_model(artifact_path=\"pyfunc_model\", data_path=\"/path/to/data\")\n assert \"Either `loader_module` or `python_model` must be specified\" in str(exc_info)\n\n\[email protected]\ndef test_log_model_persists_specified_conda_env_file_in_mlflow_model_directory(\n sklearn_knn_model, tmpdir, pyfunc_custom_env_file\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n conda_env=pyfunc_custom_env_file,\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != pyfunc_custom_env_file\n\n with open(pyfunc_custom_env_file, \"r\") as f:\n pyfunc_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == pyfunc_custom_env_parsed\n\n\[email protected]\ndef test_log_model_persists_specified_conda_env_dict_in_mlflow_model_directory(\n sklearn_knn_model, tmpdir, pyfunc_custom_env_dict\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n conda_env=pyfunc_custom_env_dict,\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == pyfunc_custom_env_dict\n\n\[email protected]\ndef test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n sklearn_knn_model, tmpdir\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.pyfunc.model.get_default_conda_env()\n"
]
| [
[
"numpy.array_equal",
"numpy.ndarray",
"numpy.dtype",
"pandas.DataFrame",
"numpy.array"
]
]
|
Sepidak/spikeGUI | [
"25ae60160308c0a34e7180f3e39a1c4dc6aad708"
]
| [
"analysis_guis/threads/thread_workers.py"
]
| [
"# module import\r\nimport gc\r\nimport os\r\nimport copy\r\nimport random\r\nimport platform\r\nimport numpy as np\r\nimport pickle as p\r\nimport pandas as pd\r\nimport multiprocessing as mp\r\nfrom numpy.matlib import repmat\r\n\r\n# scipy module imports\r\nfrom scipy.stats import norm, linregress\r\nfrom scipy.spatial.distance import *\r\nfrom scipy.interpolate import PchipInterpolator as pchip\r\nfrom scipy.interpolate import InterpolatedUnivariateSpline as IUS\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.optimize import curve_fit\r\nfrom scipy.signal import periodogram, hamming, boxcar, find_peaks\r\n\r\n# sklearn module imports\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\n\r\n# statsmodels module imports\r\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\r\n\r\n# pyqt5 module import\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\n\r\n# custom module imports\r\nimport analysis_guis.common_func as cf\r\nimport analysis_guis.calc_functions as cfcn\r\nimport analysis_guis.rotational_analysis as rot\r\nfrom analysis_guis.dialogs.rotation_filter import RotationFilteredData\r\nfrom analysis_guis.cluster_read import ClusterRead\r\nfrom probez.spike_handling import spike_io\r\n\r\n# other parameters\r\ndcopy = copy.deepcopy\r\ndefault_dir_file = os.path.join(os.getcwd(), 'default_dir.p')\r\ninterp_arr = lambda xi, y: np.vstack([interp1d(np.linspace(0, 1, len(x)), x, kind='nearest')(xi) for x in y])\r\ncell_perm_ind = lambda n_cell_tot, n_cell: np.sort(np.random.permutation(n_cell_tot)[:n_cell])\r\nset_sf_cell_perm = lambda spd_sf, n_pool, n_cell: [x[:, :, cell_perm_ind(n_pool, n_cell)] for x in spd_sf]\r\ngrp_expt_indices = lambda i_expt0: [np.where(i_expt0 == i)[0] for i in np.unique(i_expt0)]\r\n\r\n# lambda function declarations\r\nlin_func = lambda x, a: a * x\r\n\r\n########################################################################################################################\r\n########################################################################################################################\r\n\r\n\r\nclass WorkerThread(QThread):\r\n # creates the signal object\r\n work_started = pyqtSignal()\r\n work_progress = pyqtSignal(str, float)\r\n work_finished = pyqtSignal(object)\r\n work_error = pyqtSignal(str, str)\r\n work_plot = pyqtSignal(object)\r\n\r\n def __init__(self, parent=None, main_gui=None):\r\n # creates the worker object\r\n super(WorkerThread, self).__init__(parent)\r\n\r\n self.update_pbar = True\r\n self.is_running = False\r\n self.forced_quit = False\r\n self.sub_job = None\r\n self.is_ok = True\r\n self.data = None\r\n\r\n # other initialisations\r\n self.main_gui = main_gui\r\n self.thread_job_primary = None\r\n self.thread_job_secondary = None\r\n self.thread_job_para = None\r\n\r\n def set_worker_func_type(self, thread_job_primary, thread_job_secondary=None, thread_job_para=None):\r\n '''\r\n\r\n :param func_type:\r\n :return:\r\n '''\r\n\r\n # updates the worker primary/secondary job type and parameters\r\n self.thread_job_primary = thread_job_primary\r\n self.thread_job_secondary = thread_job_secondary\r\n self.thread_job_para = thread_job_para\r\n\r\n def run(self):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n w_prog, w_err = self.work_progress, self.work_error\r\n\r\n # updates the running/forced quit flagsv\r\n self.is_running = True\r\n self.forced_quit = False\r\n self.is_ok = True\r\n\r\n # updates the running parameter and enables the progress group parameters\r\n self.work_started.emit()\r\n\r\n # runs the job based on the type\r\n thread_data = None\r\n if self.thread_job_primary == 'init_data_file':\r\n # case is initialising the data file\r\n self.init_cluster_data()\r\n\r\n elif self.thread_job_primary == 'init_pool_object':\r\n # case is initialising the pool worker object\r\n thread_data = self.init_pool_worker()\r\n\r\n ##################################\r\n #### DATA I/O FUNCTIONS ####\r\n ##################################\r\n\r\n elif self.thread_job_primary == 'load_data_files':\r\n # case is loading the data files\r\n thread_data = self.load_data_file()\r\n\r\n elif self.thread_job_primary == 'save_multi_expt_file':\r\n # retrieves the parameters\r\n data, out_info = self.thread_job_para[0], self.thread_job_para[1]\r\n\r\n # case is loading the data files\r\n thread_data = self.save_multi_expt_file(data, out_info)\r\n\r\n elif self.thread_job_primary == 'save_multi_comp_file':\r\n # retrieves the parameters\r\n data, out_info = self.thread_job_para[0], self.thread_job_para[1]\r\n\r\n # case is loading the data files\r\n thread_data = self.save_multi_comp_file(data, out_info)\r\n\r\n elif self.thread_job_primary == 'run_calc_func':\r\n # case is the calculation functions\r\n calc_para, plot_para = self.thread_job_para[0], self.thread_job_para[1]\r\n data, pool, g_para = self.thread_job_para[2], self.thread_job_para[3], self.thread_job_para[4]\r\n\r\n ################################################\r\n #### CLUSTER CLASSIFICATION FUNCTIONS ####\r\n ################################################\r\n\r\n if self.thread_job_secondary == 'Fixed/Free Cluster Matching':\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['clust'])\r\n\r\n # case is determining the cluster matches\r\n self.det_cluster_matches(data, calc_para, w_prog)\r\n\r\n elif self.thread_job_secondary == 'Cluster Cross-Correlogram':\r\n # case is the cc-gram type determinations\r\n thread_data = self.calc_ccgram_types(calc_para, data.cluster)\r\n\r\n ######################################\r\n #### AHV ANALYSIS FUNCTIONS ####\r\n ######################################\r\n\r\n elif ' (Fixed)' in self.thread_job_secondary or \\\r\n (self.thread_job_secondary == 'Correlation Significance Overlap'):\r\n\r\n # ensures the smoothing window is an odd integer (if smoothing)\r\n if calc_para['is_smooth']:\r\n if calc_para['n_smooth'] % 2 != 1:\r\n # if not, then output an error message to screen\r\n e_str = 'The median smoothing filter window span must be an odd integer.'\r\n w_err.emit(e_str, 'Incorrect Smoothing Window Span')\r\n\r\n # sets the error flag and exits the function\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # initialises the rotation filter class object (if not already set)\r\n if plot_para['rot_filt'] is None:\r\n plot_para['rot_filt'] = cf.init_rotation_filter_data(False)\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['vel', 'vel_sf_fix'], other_para=False)\r\n\r\n # calculates the shuffled kinematic spiking frequencies\r\n cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, dcopy(calc_para), w_prog, roc_calc=False)\r\n cfcn.calc_shuffled_kinematic_spike_freq(data, dcopy(calc_para), w_prog)\r\n\r\n # runs any specific additional function\r\n fit_func = ['Correlation Comparison (Fixed)',\r\n 'Correlation Fit Parameters (Fixed)',\r\n 'Individual Cell Correlation (Fixed)']\r\n if self.thread_job_secondary in fit_func:\r\n # case is the correlation fit parameters\r\n self.calc_corr_fit_para(data, plot_para, dcopy(calc_para), w_prog)\r\n\r\n elif (' (Freely Moving)' in self.thread_job_secondary):\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['vel_sf_free'], other_para=False)\r\n\r\n # updates the bin velocity\r\n data.rotation.vel_bin_corr = calc_para['vel_bin']\r\n\r\n elif 'Fixed/Free Spiking Correlation' in self.thread_job_secondary:\r\n\r\n # determines if the freely moving data file has been loaded\r\n if not hasattr(data.externd, 'free_data'):\r\n # if the data-file has not been loaded then output an error to screen and exit\r\n e_str = 'The freely moving spiking frequency/statistics data file must be loaded ' \\\r\n 'before being able to run this function.\\n\\nPlease load this data file and try again.'\r\n w_err.emit(e_str, 'Freely Moving Data Missing?')\r\n\r\n # exits the function with an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['ff_corr', 'vel'], other_para=False)\r\n\r\n # calculates the shuffled kinematic spiking frequencies\r\n cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False, use_raw=True)\r\n\r\n # calculates the fixed/free correlations (if not already set)\r\n if not data.comp.ff_corr.is_set:\r\n self.calc_fix_free_correlation(data, calc_para, w_prog)\r\n\r\n ################################################\r\n #### FREELY MOVING ANALYSIS FUNCTIONS ####\r\n ################################################\r\n\r\n elif self.thread_job_secondary == 'Freely Moving Cell Fit Residual':\r\n\r\n # ensures the calculation fields are\r\n self.calc_cell_fit_residual(data, calc_para, w_prog)\r\n\r\n ######################################\r\n #### EYE TRACKING FUNCTIONS ####\r\n ######################################\r\n\r\n elif self.thread_job_secondary in ['Eye Movement Event Signals']:\r\n\r\n # check to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])\r\n\r\n # calculates the eye-tracking metrics (if not calculated)\r\n if len(data.externd.eye_track.t_evnt) == 0:\r\n self.calc_eye_track_metrics(data, calc_para, w_prog)\r\n\r\n elif 'Eye Movement Correlation' in self.thread_job_secondary:\r\n\r\n # check to see if any parameters have been altered/\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])\r\n\r\n # calculates the eye-tracking metrics (if not calculated)\r\n if len(data.externd.eye_track.t_evnt) == 0:\r\n self.calc_eye_track_metrics(data, calc_para, w_prog)\r\n\r\n # calculates the eye-tracking metrics\r\n if len(data.externd.eye_track.t_sp_h) == 0:\r\n self.calc_eye_track_corr(data, calc_para, w_prog)\r\n\r\n ######################################\r\n #### ROC ANALYSIS FUNCTIONS ####\r\n ######################################\r\n\r\n elif self.thread_job_secondary == 'Direction ROC Curves (Single Cell)':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])\r\n\r\n # case is the shuffled cluster distances\r\n if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 100.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Direction ROC Curves (Whole Experiment)':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 33.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(data, calc_para, 66.)\r\n self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)\r\n\r\n elif self.thread_job_secondary in ['Direction ROC AUC Histograms',\r\n 'Direction ROC Spiking Rate Heatmap']:\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 100., True):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif 'Velocity ROC Curves' in self.thread_job_secondary:\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)\r\n\r\n # calculates the binned kinematic spike frequencies\r\n cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)\r\n self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 50.)\r\n\r\n elif self.thread_job_secondary == 'Velocity ROC Significance':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)\r\n\r\n # calculates the binned kinematic spike frequencies\r\n cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)\r\n\r\n # calculates the kinematic roc curves and their significance\r\n self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 0.)\r\n self.calc_kinematic_roc_significance(data, calc_para, g_para)\r\n\r\n elif self.thread_job_secondary == 'Condition ROC Curve Comparison':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['phase'])\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(data, calc_para, 66.)\r\n self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)\r\n\r\n elif self.thread_job_secondary == 'Direction ROC Significance':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.,\r\n force_black_calc=True):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(data, calc_para, 66.)\r\n self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)\r\n\r\n if cf.det_valid_vis_expt(data, True):\r\n if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n ###############################################\r\n #### COMBINED ANALYSIS LDA FUNCTIONS ####\r\n ###############################################\r\n\r\n elif self.thread_job_secondary == 'Rotation/Visual Stimuli Response Statistics':\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(data, calc_para, 50.)\r\n\r\n # calculates the direction/selection group types\r\n if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n\r\n elif self.thread_job_secondary == 'Combined Direction ROC Curves (Whole Experiment)':\r\n # checks that the conditions are correct for running the function\r\n if not self.check_combined_conditions(calc_para, plot_para):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])\r\n\r\n # initisalises the rotational filter (if not initialised already)\r\n if plot_para['rot_filt'] is None:\r\n plot_para['rot_filt'] = cf.init_rotation_filter_data(False)\r\n\r\n # adds motordrifting (if the visual expt type)\r\n _plot_para, _calc_para = dcopy(plot_para), dcopy(calc_para)\r\n if calc_para['vis_expt_type'] == 'MotorDrifting':\r\n _plot_para['rot_filt']['t_type'].append('MotorDrifting')\r\n\r\n # resets the flags to use the full rotation/visual phases\r\n _calc_para['use_full_rot'], _calc_para['use_full_vis'] = True, True\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(data, pool, _calc_para, _plot_para, g_para, False, 33.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(data, _calc_para, 66.)\r\n if (calc_para['vis_expt_type'] == 'UniformDrifting') and \\\r\n (calc_para['grp_stype'] != 'Wilcoxon Paired Test'):\r\n # sets up the visual rotation filter\r\n r_filt_v = cf.init_rotation_filter_data(False)\r\n r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']\r\n\r\n # retrieves the visual filter object\r\n plot_exp_name, plot_all_expt = plot_para['plot_exp_name'], plot_para['plot_all_expt']\r\n r_obj_vis, ind_type = cf.split_unidrift_phases(data, r_filt_v, None, plot_exp_name, plot_all_expt,\r\n 'Whole Experiment', 2.)\r\n\r\n # calculates the full uniform-drifting curves\r\n self.calc_ud_roc_curves(data, r_obj_vis, ind_type, 66.)\r\n\r\n # calculates the direction selection types\r\n if not self.calc_dirsel_group_types(data, pool, _calc_para, _plot_para, g_para):\r\n self.is_ok = False\r\n\r\n # calculates the partial roc curves\r\n self.calc_partial_roc_curves(data, calc_para, plot_para, 66.)\r\n\r\n elif self.thread_job_secondary in ['Normalised Kinematic Spiking Frequency']:\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=False)\r\n\r\n # calculates the binned kinematic spike frequencies\r\n cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False)\r\n\r\n ######################################################\r\n #### DEPTH-BASED SPIKING ANALYSIS FUNCTIONS ####\r\n ######################################################\r\n\r\n elif self.thread_job_secondary == 'Depth Spiking Rate Comparison':\r\n # make a copy of the plotting/calculation parameters\r\n _plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth\r\n _plot_para['plot_exp_name'] = None\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])\r\n\r\n # reduces the data clusters to only include the RSPd/RSPg cells\r\n _data = cfcn.get_rsp_reduced_clusters(data)\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, True,\r\n 33., r_data=r_data, force_black_calc=True):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)\r\n\r\n ############################################\r\n #### SPIKING FREQUENCY CALCULATION ####\r\n ############################################\r\n\r\n # initialisations\r\n r_filt = _plot_para['rot_filt']\r\n r_data.ch_depth, r_data.ch_region, r_data.ch_layer = \\\r\n cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # rotation filtered object calculation\r\n r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,\r\n t_ofs=t_ofs, t_phase=t_phase)\r\n\r\n # calculates the individual trial/mean spiking rates and sets up the plot/stats arrays\r\n sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)\r\n s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)\r\n r_data.plt, r_data.stats, r_data.ind, r_data.r_filt = s_plt, sf_stats, ind, dcopy(r_filt)\r\n\r\n elif self.thread_job_secondary == 'Depth Spiking Rate Comparison (Multi-Sensory)':\r\n # checks that the conditions are correct for running the function\r\n if not self.check_combined_conditions(calc_para, plot_para):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n else:\r\n # otherwise, make a copy of the plotting/calculation parameters\r\n _plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth\r\n _plot_para['plot_exp_name'], r_filt = None, _plot_para['rot_filt']\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])\r\n\r\n # adds motordrifting (if it is the visual expt type)\r\n if calc_para['vis_expt_type'] == 'MotorDrifting':\r\n _plot_para['rot_filt']['t_type'].append('MotorDrifting')\r\n\r\n # reduces the data clusters to only include the RSPd/RSPg cells\r\n _data = cfcn.get_rsp_reduced_clusters(data)\r\n\r\n # calculates the phase roc-curves for each cell\r\n if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, False, 33., r_data=r_data):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # calculates the phase roc curve/significance values\r\n self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)\r\n if (calc_para['vis_expt_type'] == 'UniformDrifting'):\r\n # sets up the visual rotation filter\r\n r_filt_v = cf.init_rotation_filter_data(False)\r\n r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']\r\n\r\n # retrieves the visual filter object\r\n r_obj_vis, ind_type = cf.split_unidrift_phases(_data, r_filt_v, None, None, True,\r\n 'Whole Experiment', 2., t_phase, t_ofs)\r\n\r\n # calculates the full uniform-drifting curves\r\n self.calc_ud_roc_curves(_data, r_obj_vis, ind_type, 66., r_data=r_data)\r\n\r\n # calculates the individual trial/mean spiking rates and sets up the plot/stats arrays\r\n sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj_vis)\r\n s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_vis, sp_f0, sp_f, ind_type, 2)\r\n r_data.plt_vms, r_data.stats_vms, r_data.ind_vms = s_plt, sf_stats, ind, r_filt_v\r\n r_data.r_filt_vms = dcopy(r_filt_v)\r\n else:\r\n # resets the uniform drifting fields\r\n r_data.plt_vms, r_data.stats_vms, r_data.ind_vms, r_data.r_filt_vms = None, None, None, None\r\n\r\n ############################################\r\n #### SPIKING FREQUENCY CALCULATION ####\r\n ############################################\r\n\r\n # rotation filtered object calculation\r\n r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,\r\n t_phase=t_phase, t_ofs=t_ofs)\r\n r_data.ch_depth_ms, r_data.ch_region_ms, r_data.ch_layer_ms = \\\r\n cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])\r\n\r\n # calculates the individual trial/mean spiking rates and sets up the plot/stats arrays\r\n sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)\r\n s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)\r\n r_data.plt_rms, r_data.stats_rms, r_data.ind_rms = s_plt, sf_stats, ind\r\n r_data.r_filt_rms = dcopy(r_filt)\r\n\r\n ##########################################################\r\n #### ROTATION DISCRIMINATION ANALYSIS FUNCTIONS ####\r\n ##########################################################\r\n\r\n elif self.thread_job_secondary == 'Rotation Direction LDA':\r\n # if the solver parameter have not been set, then initalise them\r\n d_data = data.discrim.dir\r\n\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)\r\n\r\n # sets up the lda values\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,\r\n w_prog, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n d_data=d_data, w_prog=w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Temporal Duration/Offset LDA':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)\r\n\r\n # if the temporal data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.temp.lda is None:\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.temp,\r\n w_prog, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # if an update in the calculations is required, then run the temporal LDA analysis\r\n if status == 2:\r\n if not self.run_temporal_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Individual LDA':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.indiv)\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n d_data=data.discrim.dir, w_prog=w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # if the individual data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.indiv.lda is None:\r\n # runs the individual LDA\r\n if not self.run_individual_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Shuffled LDA':\r\n # checks to see if any parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.shuffle)\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n d_data=data.discrim.dir, w_prog=w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # runs the shuffled LDA\r\n if not self.run_shuffled_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Pooled Neuron LDA':\r\n # resets the minimum cell count and checks if the pooled parameters have been altered\r\n # calc_para['lda_para']['n_cell_min'] = calc_para['n_cell_min']\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.part)\r\n\r\n # if the pooled data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.part.lda is None:\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,\r\n w_prog, True, w_err=w_err)\r\n if not calc_para['pool_expt']:\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n # elif status == 2:\r\n # # if an update in the calculations is required, then run the rotation LDA analysis\r\n # if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n # d_data=data.discrim.dir, w_prog=w_prog):\r\n # self.is_ok = False\r\n # self.work_finished.emit(thread_data)\r\n # return\r\n\r\n # runs the partial LDA\r\n if not self.run_pooled_lda(pool, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Individual Cell Accuracy Filtered LDA':\r\n # check to see if the individual LDA calculations have been performed\r\n if data.discrim.indiv.lda is None:\r\n # if the individual LDA has not been run, then output an error to screen\r\n e_str = 'The Individual LDA must be run first before this analysis can be performed'\r\n w_err.emit(e_str, 'Missing Individual LDA Data')\r\n\r\n # sets the ok flag to false and exit the function\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n #\r\n _calc_para = dcopy(calc_para)\r\n _calc_para['comp_cond'] = dcopy(data.discrim.indiv.ttype)\r\n\r\n #########################################\r\n #### ROTATION LDA CALCULATIONS ####\r\n #########################################\r\n\r\n # sets the min/max accuracy values\r\n _calc_para['lda_para']['y_acc_min'] = 0\r\n _calc_para['lda_para']['y_acc_max'] = 100\r\n\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.dir)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.dir,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n d_data=data.discrim.dir, w_prog=w_prog, pW=50.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n #########################################\r\n #### FILTERED LDA CALCULATIONS ####\r\n #########################################\r\n\r\n # sets the min/max accuracy values\r\n _calc_para['lda_para']['y_acc_min'] = _calc_para['y_acc_min']\r\n _calc_para['lda_para']['y_acc_max'] = _calc_para['y_acc_max']\r\n\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.filt)\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.filt,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n d_data=data.discrim.filt, w_prog=w_prog, pW=50., pW0=50.):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n else:\r\n # otherwise, update the calculation parameters\r\n data.discrim.filt.yaccmn = _calc_para['y_acc_min']\r\n data.discrim.filt.yaccmx = _calc_para['y_acc_max']\r\n\r\n elif self.thread_job_secondary == 'LDA Group Weightings':\r\n # checks to see if the data class as changed parameters\r\n d_data, w_prog = data.discrim.wght, self.work_progress\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)\r\n\r\n # sets up the lda values\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,\r\n w_prog, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not self.run_wght_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n #######################################################\r\n #### SPEED DISCRIMINATION ANALYSIS FUNCTIONS ####\r\n #######################################################\r\n\r\n elif self.thread_job_secondary == 'Speed LDA Accuracy':\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdacc)\r\n\r\n # if the pooled data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.spdc.lda is None:\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdacc,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n if not self.run_speed_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Speed LDA Comparison (Individual Experiments)':\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdc)\r\n\r\n # if the pooled data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.spdc.lda is None:\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdc,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not self.run_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n elif self.thread_job_secondary == 'Speed LDA Comparison (Pooled Experiments)':\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdcp)\r\n\r\n # if the pooled data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.spdcp.lda is None:\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdcp,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n # elif status == 2:/\r\n\r\n # if an update in the calculations is required, then run the rotation LDA analysis\r\n if not self.run_pooled_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,\r\n w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # # calculates the psychometric curves\r\n # w_prog.emit('Calculating Pyschometric Curves', 100.)\r\n # cfcn.calc_all_psychometric_curves(data.discrim.spdcp, float(calc_para['vel_bin']), calc_para['use_all'])\r\n\r\n elif self.thread_job_secondary == 'Velocity Direction Discrimination LDA':\r\n # checks to see if any base LDA calculation parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spddir)\r\n\r\n # if the pooled data parameters have changed/has not been initialised then calculate the values\r\n if data.discrim.spddir.lda is None:\r\n\r\n # sets up the important arrays for the LDA\r\n r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spddir,\r\n w_prog, True, w_err=w_err)\r\n if status == 0:\r\n # if there was an error in the calculations, then return an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n elif status == 2:\r\n if not self.run_speed_dir_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell,\r\n n_trial_max, w_prog):\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n #######################################\r\n #### MISCELLANEOUS FUNCTIONS ####\r\n #######################################\r\n\r\n elif self.thread_job_secondary == 'Velocity Multilinear Regression Dataframe Output':\r\n # checks to see if any base spiking frequency dataframe parameters have been altered\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['spikedf'], other_para=data.spikedf)\r\n\r\n # checks to see if the overlap duration is less than the time bin size\r\n if calc_para['t_over'] >= calc_para['bin_sz']:\r\n # if not, then output an error to screen\r\n e_str = 'Bin Overlap Duration must be less than the Time Bin Size.\\n' \\\r\n 'Reset these parameters before running this function.'\r\n w_err.emit(e_str, 'Incorrect Function Parameters')\r\n\r\n # exits the function with an error flag\r\n self.is_ok = False\r\n self.work_finished.emit(thread_data)\r\n return\r\n\r\n # only continue if the spiking frequency dataframe has not been set up\r\n if not data.spikedf.is_set:\r\n self.setup_spiking_freq_dataframe(data, calc_para)\r\n\r\n elif self.thread_job_secondary == 'Autocorrelogram Theta Index Calculations':\r\n # case to see if any parameters have changed\r\n self.check_altered_para(data, calc_para, plot_para, g_para, ['theta'], other_para=data.theta_index)\r\n\r\n # only continue if the theta index dataframe has not been setup\r\n if not data.theta_index.is_set:\r\n self.calc_auto_ccgram_fft(data, calc_para)\r\n\r\n ###############################\r\n #### OTHER FUNCTIONS ####\r\n ###############################\r\n\r\n elif self.thread_job_secondary == 'Shuffled Cluster Distances':\r\n # case is the shuffled cluster distances\r\n thread_data = self.calc_shuffled_cluster_dist(calc_para, data.cluster)\r\n\r\n elif self.thread_job_primary == 'update_plot':\r\n pass\r\n\r\n # emits the finished work signal\r\n self.work_finished.emit(thread_data)\r\n\r\n ############################################\r\n #### THREAD CALCULATION FUNCTIONS ####\r\n ############################################\r\n\r\n def load_data_file(self):\r\n '''\r\n\r\n :param exp_file:\r\n :return:\r\n '''\r\n\r\n # retrieves the job parameters\r\n load_dlg, loaded_exp, is_multi = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]\r\n if not np.any([not x in loaded_exp for x in load_dlg.exp_name]):\r\n # if there are no new experiments to load, then exit the function\r\n return None\r\n else:\r\n n_file = len(load_dlg.exp_files)\r\n dpw, p_rlx, data = 1.0 / n_file, 0.05, []\r\n _, f_extn = os.path.splitext(load_dlg.exp_files[0])\r\n\r\n #\r\n for i_file in range(n_file):\r\n if not self.is_running:\r\n # if the user cancelled, then exit\r\n return None\r\n else:\r\n # updates the progress bar string\r\n p_str, pw0 = 'Loading File {0} of {1}'.format(i_file+1, n_file), i_file / n_file\r\n self.work_progress.emit(p_str, 100.0 * pw0)\r\n\r\n # sets the experiment file and name\r\n if load_dlg.exp_name[i_file] not in loaded_exp:\r\n # loads the data from the data file\r\n with open(load_dlg.exp_files[i_file], 'rb') as fp:\r\n data_nw = p.load(fp)\r\n\r\n # setting of other fields\r\n if isinstance(data_nw, dict):\r\n data_nw['expFile'] = load_dlg.exp_files[i_file]\r\n\r\n # re-calculates the signal features (single experiment only)\r\n if f_extn == '.cdata':\r\n if np.shape(data_nw['sigFeat'])[1] == 5:\r\n # memory allocation for the signal features\r\n xi = np.array(range(data_nw['nPts']))\r\n sFeat = np.zeros((data_nw['nC'], 2))\r\n\r\n for i in range(data_nw['nC']):\r\n # creates the piecewise-polynomial of the mean signal\r\n pp, t_max = pchip(xi, data_nw['vMu'][:, i]), data_nw['sigFeat'][i, 2]\r\n t_min = np.argmin(data_nw['vMu'][int(t_max):, i]) + t_max\r\n v_max_2 = data_nw['vMu'][int(t_max), i] / 2.0\r\n v_min = np.min(data_nw['vMu'][int(t_max):, i])\r\n v_half = data_nw['vMu'][int(data_nw['sigFeat'][i, 1]), i] / 2.0\r\n\r\n ##################################################\r\n #### POST-STIMULI SPIKE HALF-WIDTH TIME ####\r\n ##################################################\r\n\r\n # determines the point/voltage of the pmaximum proceding the minimum\r\n bnd_1 = [(data_nw['sigFeat'][i, 0], data_nw['sigFeat'][i, 1])]\r\n bnd_2 = [(data_nw['sigFeat'][i, 1], data_nw['sigFeat'][i, 2])]\r\n bnd_3 = [(data_nw['sigFeat'][i, 2], t_min)]\r\n\r\n # determines the location of the half-width points\r\n t_hw1_lo = cfcn.opt_time_to_y0((pp, v_half), bnd_1)\r\n t_hw1_hi = cfcn.opt_time_to_y0((pp, v_half), bnd_2)\r\n t_hw2_lo = cfcn.opt_time_to_y0((pp, v_max_2), bnd_2)\r\n t_hw2_hi = cfcn.opt_time_to_y0((pp, v_max_2), bnd_3)\r\n t_rlx = cfcn.opt_time_to_y0((pp, v_min + p_rlx * (v_max_2 - v_min)), bnd_3)\r\n\r\n # determine if it is feasible to find the 2nd peak half-width point\r\n if (t_hw2_hi is None) or (t_rlx is None):\r\n # if not, then linearly extrapolate past the end point of the signal\r\n xi2 = np.array(range(2*xi[-1]))\r\n ppL = IUS(xi, data_nw['vMu'][:, i], k=1)\r\n\r\n # determines the half-width/relaxtion time from the extrapolated signal\r\n bnd_4 = [(data_nw['sigFeat'][i, 2], xi2[-1])]\r\n t_hw2_hi = cfcn.opt_time_to_y0((ppL, v_max_2), bnd_4)\r\n t_rlx = cfcn.opt_time_to_y0((ppL, v_min + p_rlx * (v_max_2 - v_min)), bnd_4)\r\n\r\n # calculates the new signal features\r\n data_nw['sigFeat'][i, 3] = t_hw1_lo\r\n data_nw['sigFeat'][i, 4] = t_hw1_hi\r\n sFeat[i, 0] = t_hw2_hi - t_hw2_lo\r\n sFeat[i, 1] = t_rlx - t_max\r\n\r\n # concatenates the new signal feature date\r\n data_nw['sigFeat'] = np.concatenate((data_nw['sigFeat'], sFeat), axis=1)\r\n\r\n # sets the cell cluster include indices (if not already set)\r\n if 'clInclude' not in data_nw['expInfo']:\r\n data_nw['expInfo']['clInclude'] = np.ones(data_nw['nC'], dtype=bool)\r\n\r\n # appends the new data dictionary to the overall data list\r\n data.append(data_nw)\r\n\r\n # appends the current filename to the data dictionary and returns the object\r\n return data\r\n\r\n def save_multi_expt_file(self, data, out_info):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # updates the progressbar\r\n self.work_progress.emit('Saving Data To File...', 50.0)\r\n\r\n # sets the file extension (based on the data type)\r\n if hasattr(data.comp, 'data'):\r\n f_extn = 'mdata' if len(data.comp.data) == 0 else 'mcomp'\r\n else:\r\n f_extn = 'mdata'\r\n\r\n # sets the output file name\r\n out_file = os.path.join(out_info['inputDir'], '{0}.{1}'.format(out_info['dataName'], f_extn))\r\n\r\n # outputs the data to file\r\n with open(out_file, 'wb') as fw:\r\n p.dump(data, fw)\r\n\r\n # updates the progressbar\r\n self.work_progress.emit('Data Save Complete!', 100.0)\r\n\r\n def save_multi_comp_file(self, data, out_info):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # updates the progressbar\r\n self.work_progress.emit('Saving Data To File...', 50.0)\r\n\r\n # memory allocation\r\n n_file = len(out_info['exptName'])\r\n\r\n # sets the output file name\r\n out_file = os.path.join(out_info['inputDir'], '{0}.mcomp'.format(out_info['dataName']))\r\n\r\n # output data file\r\n data_out = {\r\n 'data': np.empty((n_file, 2), dtype=object),\r\n 'c_data': np.empty(n_file, dtype=object),\r\n 'ff_corr': data.comp.ff_corr if hasattr(data.comp, 'ff_corr') else None,\r\n 'f_data': data.externd.free_data if hasattr(data.externd, 'free_data') else None\r\n }\r\n\r\n for i_file in range(n_file):\r\n # retrieves the index of the data field corresponding to the current experiment\r\n fix_file = out_info['exptName'][i_file].split('/')[0]\r\n i_comp = cf.det_comp_dataset_index(data.comp.data, fix_file)\r\n\r\n # creates the multi-experiment data file based on the type\r\n data_out['c_data'][i_file] = data.comp.data[i_comp]\r\n data_out['data'][i_file, 0], data_out['data'][i_file, 1] = \\\r\n cf.get_comp_datasets(data, c_data=data_out['c_data'][i_file], is_full=True)\r\n\r\n # outputs the data to file\r\n with open(out_file, 'wb') as fw:\r\n p.dump(data_out, fw)\r\n\r\n # updates the progressbar\r\n self.work_progress.emit('Data Save Complete!', 100.0)\r\n\r\n def init_pool_worker(self):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # creates the pool worker object\r\n p = mp.Pool(int(np.floor(1.5 * mp.cpu_count())))\r\n\r\n # returns the object\r\n return p\r\n\r\n def init_cluster_data(self):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n def map_cluster_depths():\r\n '''\r\n\r\n :param cluster_depth:\r\n :return:\r\n '''\r\n\r\n # retrieves the cluster depths from the spike I/O class object\r\n cluster_depth = sp_io.get_cluster_depths(cluster_ids)\r\n\r\n # sets the mapped cluster depths based on the file type\r\n if (exp_info['dmapFile'] is None) or (len(exp_info['dmapFile']) == 0):\r\n # no map is given so return the original depth values\r\n return cluster_depth, None\r\n else:\r\n # otherwise, map the cluster depth values from the probe to actual values\r\n data = np.array(pd.read_csv(exp_info['dmapFile']))\r\n if np.size(data, axis=1) < 4:\r\n # if the mapping file is not correct, then output an error to screen\r\n e_str = 'Channel mapping file does not have the correct format.\\n\\n' \\\r\n 'Re-select a valid file before attempting to initialise the combined data files.'\r\n self.work_error.emit(e_str, 'Invalid Channel Mapping File')\r\n\r\n # return none values indicating the error\r\n return None, None\r\n else:\r\n # otherwise, return the mapped channel depths and the other mapping values\r\n return np.array([data[data[:, 1] == x, 0][0] for x in cluster_depth]), data[:, :4]\r\n\r\n # retrieves the job parameters\r\n exp_info, out_name, g_para = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]\r\n\r\n # sets the global parameters\r\n n_hist = int(g_para['n_hist'])\r\n n_spike = int(g_para['n_spike'])\r\n cluster_ids = None\r\n\r\n # retrieves the spike I/O data and sets the cluster IDs based on the cluster type\r\n sp_io = spike_io.SpikeIo(exp_info['srcDir'], exp_info['traceFile'], int(exp_info['nChan']))\r\n if exp_info['clusterType'] == 'Good':\r\n # case is the good clusters\r\n if hasattr(sp_io, 'good_cluster_ids'):\r\n cluster_ids = sp_io.good_cluster_ids\r\n elif exp_info['clusterType'] == 'MUA':\r\n # case is the multi-unit clusters\r\n if hasattr(sp_io, 'MUA_cluster_ids'):\r\n cluster_ids = sp_io.MUA_cluster_ids\r\n\r\n if cluster_ids is None:\r\n e_str = 'Cluster group file is missing? Please re-run with cluster-group file in the source data directory'\r\n self.work_error.emit(e_str, 'Cluster Group File Missing!')\r\n return\r\n\r\n # retrieves the clusters spike data and channel depths\r\n self.work_progress.emit('Reshaping Cluster Data...', 0.0)\r\n clusters = [ClusterRead(sp_io, cid) for cid in cluster_ids]\r\n\r\n # determines the channel depths mapping\r\n depth, channel_map_data = map_cluster_depths()\r\n if depth is None:\r\n # if the file has an incorrect format, then exit the function\r\n return\r\n\r\n # determines if the mapping values were set correctly\r\n if channel_map_data is not None:\r\n # if so, then determine the region/recording layers\r\n y_coords = channel_map_data[:, 3]\r\n depthLo, depthHi = np.array(exp_info['depthLo']).astype(int), np.array(exp_info['depthHi']).astype(int)\r\n indD = np.array([next((i for i in range(len(depthHi)) if x <= depthHi[i]), len(depthHi)-1) for x in y_coords])\r\n chRegion = np.array(exp_info['regionName'])[indD][depth.astype(int)]\r\n chLayer = np.array(exp_info['recordLayer'])[indD][depth.astype(int)]\r\n\r\n else:\r\n # otherwise, return N/A for the region/recording layers\r\n chRegion, chLayer = ['N/A'] * len(clusters), ['N/A'] * len(clusters)\r\n depthLo, depthHi = None, None\r\n\r\n # sets the signal point-wise/ISI bin vectors\r\n xi_pts_H = np.linspace(-200, 100, n_hist + 1)\r\n xi_isi_H = np.linspace(0, 1000, n_hist + 1)\r\n\r\n # creates the recording/experimental information sub-dictionaries\r\n expInfo = {'name': exp_info['expName'], 'date': exp_info['expDate'], 'cond': exp_info['expCond'],\r\n 'type': exp_info['expType'], 'sex': exp_info['expSex'], 'age': exp_info['expAge'],\r\n 'probe': exp_info['expProbe'], 'lesion': exp_info['lesionType'], 'channel_map': channel_map_data,\r\n 'cluster_type': exp_info['clusterType'], 'other_info': exp_info['otherInfo'],\r\n 'record_state': exp_info['recordState'], 'record_coord': exp_info['recordCoord'],\r\n 'depth_lo': depthLo, 'depth_hi': depthHi}\r\n\r\n # memory allocation\r\n pW0, pW1, nFeat = 20.0, 60.0, 5\r\n nC, nSample = len(clusters), np.size(sp_io.traces, axis=0)\r\n sFreq, vGain = float(exp_info['sFreq']), float(exp_info['vGain'])\r\n\r\n # sets the data file dictionary object\r\n A = {\r\n 'vSpike': np.empty(nC, dtype=object), 'tSpike': np.empty(nC, dtype=object),\r\n 'vMu': None, 'vSD': None, 'ccGram': None, 'ccGramXi': None, 'sigFeat': np.zeros((nC, nFeat)),\r\n 'clustID': cluster_ids, 'expInfo': expInfo, 'chDepth': depth, 'chRegion': chRegion, 'chLayer': chLayer,\r\n 'sFreq': sFreq, 'nC': nC, 'nPts': None, 'tExp': nSample / sFreq, 'vGain': vGain,\r\n 'isiHist': np.empty(nC, dtype=object), 'isiHistX': xi_isi_H,\r\n 'ptsHist': np.empty(nC, dtype=object), 'ptsHistX': xi_pts_H,\r\n 'rotInfo': None,\r\n }\r\n\r\n # sets up the rotation analysis data dictionary\r\n A['rotInfo'] = rot.load_rot_analysis_data(A, exp_info, sp_io, w_prog=self.work_progress, pW0=pW0)\r\n\r\n # sets up the sub-job flags\r\n self.sub_job = np.zeros(nC, dtype=bool)\r\n\r\n # retrieves the cluster data\r\n for i, c in enumerate(clusters):\r\n if not self.is_running:\r\n # if the user cancelled, then exit the function\r\n return\r\n else:\r\n # updates the main gui progressnbar\r\n pW = pW0 + pW1 * (i + 1) / nC\r\n self.work_progress.emit('Processing Cluster {0} of {1}'.format(i + 1, nC), pW)\r\n\r\n ###################################################\r\n #### DATA RETRIEVAL & MEMORY ALLOCATIONS ####\r\n ###################################################\r\n\r\n # retrieves the spike voltage/timing\r\n v_spike = c.channel_waveforms\r\n t_spike = 1000.0 * sp_io.get_spike_times_in_cluster(cluster_ids[i]) / sFreq\r\n\r\n # memory allocation (only for the first cluster)\r\n if i == 0:\r\n A['nPts'] = np.size(v_spike, axis=0)\r\n A['vMu'] = np.zeros((A['nPts'], nC), dtype=float)\r\n A['vSD'] = np.zeros((A['nPts'], nC), dtype=float)\r\n xi = np.array(range(A['nPts']))\r\n\r\n ###############################################\r\n #### MAIN METRIC CALCULATION/STORAGE ####\r\n ###############################################\r\n\r\n # sets the values into the final array\r\n A['vSpike'][i] = v_spike[:, :n_spike] * vGain\r\n A['tSpike'][i] = t_spike[:np.size(v_spike, axis=1)]\r\n\r\n # calculates the mean/standard deviation of the voltage spikes\r\n A['vMu'][:, i] = np.mean(v_spike, axis=1) * vGain\r\n A['vSD'][:, i] = np.std(v_spike, axis=1) * vGain\r\n\r\n ######################################\r\n #### HISTOGRAM CALCULATIONS ####\r\n ######################################\r\n\r\n # calculates the point-wise histograms\r\n A['ptsHist'][i] = np.zeros((A['nPts'], n_hist), dtype=int)\r\n for iPts in range(A['nPts']):\r\n H = np.histogram(v_spike[iPts, :], bins=xi_pts_H)\r\n A['ptsHist'][i][iPts, :] = H[0]\r\n\r\n # calculates the ISI histograms\r\n dT = np.diff(A['tSpike'][i])\r\n dT = dT[dT <= xi_isi_H[-1]]\r\n H_isi = np.histogram(dT, bins=xi_isi_H, range=(xi_isi_H[0], xi_isi_H[-1]))\r\n A['isiHist'][i] = H_isi[0]\r\n\r\n ###########################################\r\n #### SIGNAL FEATURE CALCULATIONS ####\r\n ###########################################\r\n\r\n # creates the piecewise-polynomial of the mean signal\r\n pp = pchip(xi, A['vMu'][:, i])\r\n\r\n # determines the point/voltage of the pmaximum proceding the minimum\r\n i_min = np.argmin(A['vMu'][:, i])\r\n i_max1 = np.argmax(A['vMu'][:i_min, i])\r\n i_max2 = np.argmax(A['vMu'][i_min:, i]) + i_min\r\n\r\n # determines the location of the half-width points\r\n v_half = (min(pp(i_max1), pp(i_max2)) + pp(i_min)) / 2.0\r\n t_lo = cfcn.opt_time_to_y0((pp, v_half), [(i_max1, i_min)])\r\n t_hi = cfcn.opt_time_to_y0((pp, v_half), [(i_min, i_max2)])\r\n\r\n # sets the signal features into the final array\r\n A['sigFeat'][i, :] = [i_max1, i_min, i_max2, t_lo, t_hi]\r\n\r\n # memory garbage collection\r\n gc.collect()\r\n\r\n ######################################################\r\n #### CLUSTER CROSS-CORRELOGRAM CALCULATIONS ####\r\n ######################################################\r\n\r\n # memory allocation\r\n win_size = 50\r\n\r\n # calculates the cross-correlation between each signal from each cluster\r\n for i_row in range(nC):\r\n if not self.is_running:\r\n # if the user cancelled, then exit the function\r\n return\r\n else:\r\n # updates the main gui progressbar\r\n pW = (pW0 + pW1) + (100.0 - (pW0 + pW1)) * (i_row + 1) / (nC + 1)\r\n self.work_progress.emit('Calculating CC-Grams...', pW)\r\n\r\n # calculates the cross-correlograms between each of the other clusters\r\n for j_row in range(nC):\r\n if (i_row == 0) and (j_row == 0):\r\n # case is the first cluster so allocate memory and set the time bin array\r\n ccGram, A['ccGramXi'] = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)\r\n A['ccGram'] = np.zeros((nC, nC, len(ccGram)))\r\n A['ccGram'][i_row, j_row, :] = ccGram\r\n else:\r\n # otherwise, set the new values directly into the array\r\n A['ccGram'][i_row, j_row, :], _ = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)\r\n\r\n #################################\r\n #### FINAL DATA OUTPUT ####\r\n #################################\r\n\r\n # dumps the cluster data to file\r\n self.work_progress.emit('Outputting Data To File...', 99.0)\r\n cf.save_single_file(out_name, A)\r\n\r\n ##########################################\r\n #### CLUSTER MATCHING FUNCTIONS ####\r\n ##########################################\r\n\r\n def det_cluster_matches(self, data, calc_para, w_prog):\r\n '''\r\n\r\n :param exp_name:\r\n :param comp_dlg:\r\n :return:\r\n '''\r\n\r\n # retrieves the comparison dataset\r\n i_comp = cf.det_comp_dataset_index(data.comp.data, calc_para['calc_comp'])\r\n c_data, data.comp.last_comp = data.comp.data[i_comp], i_comp\r\n\r\n # if there is no further calculation necessary, then exit the function\r\n if c_data.is_set:\r\n return\r\n\r\n # updates the cluster matching parameters\r\n c_data.is_set = True\r\n c_data.d_max = calc_para['d_max']\r\n c_data.r_max = calc_para['r_max']\r\n c_data.sig_corr_min = calc_para['sig_corr_min']\r\n c_data.isi_corr_min = calc_para['isi_corr_min']\r\n c_data.sig_diff_max = calc_para['sig_diff_max']\r\n c_data.sig_feat_min = calc_para['sig_feat_min']\r\n c_data.w_sig_feat = calc_para['w_sig_feat']\r\n c_data.w_sig_comp = calc_para['w_sig_comp']\r\n c_data.w_isi = calc_para['w_isi']\r\n\r\n # retrieves the fixed/free cluster dataframes\r\n data_fix, data_free = cf.get_comp_datasets(data, c_data=c_data, is_full=True)\r\n\r\n def det_overall_cluster_matches(is_feas, D):\r\n '''\r\n\r\n :param data_fix:\r\n :param data_free:\r\n :param D:\r\n :return:\r\n '''\r\n\r\n # calculates the pair-wise SS distances between each the fixed/free mean signals\r\n iDsort, n_rows = np.argsort(D.T, axis=None), np.size(D, axis=0)\r\n\r\n # memory allocation\r\n isFix = np.zeros(data_fix['nC'], dtype=bool)\r\n isFree = np.zeros(data_free['nC'], dtype=bool)\r\n i_match = -np.ones(data_fix['nC'], dtype=int)\r\n\r\n # determines the overall unique\r\n for i in range(len(iDsort)):\r\n # determines the indices of the next best match\r\n iR, iC = cfcn.ind2sub(n_rows, iDsort[i])\r\n if not (isFix[iR] or isFree[iC]) and is_feas[iR, iC]:\r\n # if there is not already a match, then update the match arrays\r\n i_match[iR] = iC\r\n isFix[iR], isFree[iC] = True, True\r\n if all(isFix) or all(isFree):\r\n # if all matches are found, then exit the loop\r\n break\r\n\r\n # returns the final match array\r\n return i_match\r\n\r\n def det_cluster_matches_old(c_data, is_feas, d_depth):\r\n '''\r\n\r\n :param data_fix:\r\n :param data_free:\r\n :return:\r\n '''\r\n\r\n # parameters\r\n z_max = 1.0\r\n\r\n # calculates the inter-signal euclidean distances\r\n DD = cdist(data_fix['vMu'].T, data_free['vMu'].T)\r\n\r\n # determines the matches based on the signal euclidean distances\r\n c_data.i_match_old = det_overall_cluster_matches(is_feas, DD)\r\n\r\n # calculates the correlation coefficients between the best matching signals\r\n for i in range(data_fix['nC']):\r\n # calculation of the z-scores\r\n i_match = c_data.i_match_old[i]\r\n if i_match >= 0:\r\n # z-score calculations\r\n dW = data_fix['vMu'][:, i] - data_free['vMu'][:, i_match]\r\n c_data.z_score[:, i] = np.divide(dW, data_fix['vSD'][:, i])\r\n\r\n # calculates the correlation coefficient\r\n CC = np.corrcoef(data_fix['vMu'][:, i], data_free['vMu'][:, i_match])\r\n c_data.sig_corr_old[i] = CC[0, 1]\r\n c_data.sig_diff_old[i] = DD[i, i_match]\r\n c_data.d_depth_old[i] = d_depth[i, i_match]\r\n\r\n # sets the acceptance flag. for a cluster to be accepted, the following must be true:\r\n # * the maximum absolute z-score must be < z_max\r\n # * the correlation coefficient between the fixed/free signals must be > sig_corr_min\r\n c_data.is_accept_old[i] = np.max(np.abs(c_data.z_score[:, i])) < z_max and \\\r\n c_data.sig_corr_old[i] > c_data.sig_corr_min\r\n else:\r\n # sets NaN values for all the single value metrics\r\n c_data.sig_corr[i] = np.nan\r\n c_data.d_depth_old[i] = np.nan\r\n\r\n # ensures the group is rejected\r\n c_data.is_accept_old[i] = False\r\n\r\n def det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog):\r\n '''\r\n\r\n :param data_fix:\r\n :param data_free:\r\n :return:\r\n '''\r\n\r\n # parameters\r\n pW = 100.0 / 7.0\r\n\r\n # memory allocation\r\n signal_metrics = np.zeros((data_fix['nC'], data_free['nC'], 4))\r\n isi_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))\r\n isi_metrics_norm = np.zeros((data_fix['nC'], data_free['nC'], 3))\r\n total_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))\r\n\r\n # initialises the comparison data object\r\n w_prog.emit('Calculating Signal DTW Indices', pW)\r\n c_data = cfcn.calc_dtw_indices(c_data, data_fix, data_free, is_feas)\r\n\r\n # calculates the signal feature metrics\r\n w_prog.emit('Calculating Signal Feature Metrics', 2.0 * pW)\r\n signal_feat = cfcn.calc_signal_feature_diff(data_fix, data_free, is_feas)\r\n\r\n # calculates the signal direct matching metrics\r\n w_prog.emit('Calculating Signal Comparison Metrics', 3.0 * pW)\r\n cc_dtw, dd_dtw, dtw_scale = \\\r\n cfcn.calc_signal_corr(c_data.i_dtw, data_fix, data_free, is_feas)\r\n\r\n signal_metrics[:, :, 0] = cc_dtw\r\n signal_metrics[:, :, 1] = 1.0 - dd_dtw\r\n signal_metrics[:, :, 2] = dtw_scale\r\n signal_metrics[:, :, 3] = \\\r\n cfcn.calc_signal_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)\r\n\r\n # calculates the ISI histogram metrics\r\n w_prog.emit('Calculating ISI Histogram Comparison Metrics', 4.0 * pW)\r\n isi_metrics[:, :, 0], isi_metrics_norm[:, :, 0] = \\\r\n cfcn.calc_isi_corr(data_fix, data_free, is_feas)\r\n isi_metrics[:, :, 1], isi_metrics_norm[:, :, 1] = \\\r\n cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)\r\n # isi_metrics[:, :, 2], isi_metrics_norm[:, :, 2] = \\\r\n # cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_wasserstein, max_norm=False)\r\n # isi_metrics[:, :, 3], isi_metrics_norm[:, :, 3] = \\\r\n # cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_bhattacharyya, max_norm=True)\r\n\r\n # sets the isi relative spiking rate metrics\r\n isi_metrics[:, :, 2] = np.nan\r\n for i_row in range(np.size(r_spike, axis=0)):\r\n isi_metrics[i_row, is_feas[i_row, :], 2] = r_spike[i_row, is_feas[i_row, :]]\r\n isi_metrics_norm[:, :, 2] = cfcn.norm_array_rows(isi_metrics[:, :, 2], max_norm=False)\r\n\r\n # calculates the array euclidean distances (over all measures/clusters)\r\n weight_array = [c_data.w_sig_feat, c_data.w_sig_comp, c_data.w_isi]\r\n total_metrics[:, :, 0] = cfcn.calc_array_euclidean(signal_feat)\r\n total_metrics[:, :, 1] = cfcn.calc_array_euclidean(signal_metrics)\r\n total_metrics[:, :, 2] = cfcn.calc_array_euclidean(isi_metrics_norm)\r\n total_metrics_mean = cfcn.calc_weighted_mean(total_metrics, W=weight_array)\r\n\r\n # determines the unique overall cluster matches\r\n w_prog.emit('Determining Overall Cluster Matches', 5.0 * pW)\r\n c_data.i_match = det_overall_cluster_matches(is_feas, -total_metrics_mean)\r\n\r\n # matches which are from different regions are to be removed\r\n ii = np.where(c_data.i_match >= 0)[0]\r\n same_region = data_fix['chRegion'][ii] == data_free['chRegion'][c_data.i_match[ii]]\r\n c_data.i_match[ii[~same_region]] = -1\r\n\r\n # calculates the correlation coefficients between the best matching signals\r\n w_prog.emit('Setting Final Match Metrics', 6.0 * pW)\r\n for i in range(data_fix['nC']):\r\n # calculation of the z-scores\r\n i_match = c_data.i_match[i]\r\n if i_match >= 0:\r\n # sets the signal feature metrics\r\n c_data.match_intersect[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i, i_match,\r\n True, cfcn.calc_hist_intersect)\r\n c_data.match_wasserstain[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,\r\n i_match, True, cfcn.calc_wasserstein)\r\n c_data.match_bhattacharyya[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,\r\n i_match, True, cfcn.calc_bhattacharyya)\r\n\r\n # sets the signal difference metrics\r\n c_data.d_depth[i] = d_depth[i, i_match]\r\n c_data.dtw_scale[i] = dtw_scale[i, i_match]\r\n c_data.sig_corr[i] = cc_dtw[i, i_match]\r\n c_data.sig_diff[i] = max(0.0, 1 - dd_dtw[i, i_match])\r\n c_data.sig_intersect[i] = signal_metrics[i, i_match, 2]\r\n\r\n # sets the isi metrics\r\n c_data.isi_corr[i] = isi_metrics[i, i_match, 0]\r\n c_data.isi_intersect[i] = isi_metrics[i, i_match, 1]\r\n\r\n # sets the total match metrics\r\n c_data.signal_feat[i, :] = signal_feat[i, i_match, :]\r\n c_data.total_metrics[i, :] = total_metrics[i, i_match, :]\r\n c_data.total_metrics_mean[i] = total_metrics_mean[i, i_match]\r\n\r\n # sets the acceptance flag. for a cluster to be accepted, the following must be true:\r\n # * the ISI correlation coefficient must be > isi_corr_min\r\n # * the signal correlation coefficient must be > sig_corr_min\r\n # * the inter-signal euclidean distance must be < sig_diff_max\r\n # * all signal feature metric similarity scores must be > sig_feat_min\r\n c_data.is_accept[i] = (c_data.isi_corr[i] > c_data.isi_corr_min) and \\\r\n (c_data.sig_corr[i] > c_data.sig_corr_min) and \\\r\n (c_data.sig_diff[i] > (1 - c_data.sig_diff_max)) and \\\r\n (np.all(c_data.signal_feat[i, :] > c_data.sig_feat_min))\r\n else:\r\n # sets NaN values for all the single value metrics\r\n c_data.d_depth[i] = np.nan\r\n c_data.dtw_scale[i] = np.nan\r\n c_data.sig_corr[i] = np.nan\r\n c_data.sig_diff[i] = np.nan\r\n c_data.sig_intersect[i] = np.nan\r\n c_data.isi_corr[i] = np.nan\r\n c_data.isi_intersect[i] = np.nan\r\n c_data.signal_feat[i, :] = np.nan\r\n c_data.total_metrics[i, :] = np.nan\r\n c_data.total_metrics_mean[i] = np.nan\r\n\r\n # ensures the group is rejected\r\n c_data.is_accept[i] = False\r\n\r\n # determines the number of spikes\r\n n_spike_fix = [len(x) / data_fix['tExp'] for x in data_fix['tSpike']]\r\n n_spike_free = [len(x) / data_free['tExp'] for x in data_free['tSpike']]\r\n\r\n # calculates the relative spiking rates (note - ratios are coverted so that they are all > 1)\r\n r_spike = np.divide(repmat(n_spike_fix, data_free['nC'], 1).T,\r\n repmat(n_spike_free, data_fix['nC'], 1))\r\n r_spike[r_spike < 1] = 1 / r_spike[r_spike < 1]\r\n\r\n # calculates the pair-wise distances between the fixed/free probe depths\r\n d_depth = np.abs(np.subtract(repmat(data_fix['chDepth'], data_free['nC'], 1).T,\r\n repmat(data_free['chDepth'], data_fix['nC'], 1)))\r\n\r\n # determines the feasible fixed/free cluster groupings such that:\r\n # 1) the channel depth has to be <= d_max\r\n # 2) the relative spiking rates between clusters is <= r_max\r\n is_feas = np.logical_and(r_spike <= c_data.r_max, d_depth <= c_data.d_max)\r\n\r\n # determines the cluster matches from the old/new methods\r\n det_cluster_matches_old(c_data, is_feas, d_depth)\r\n det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog)\r\n\r\n def calc_ccgram_types(self, calc_para, data):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # determines the indices of the experiment to be analysed\r\n if calc_para['calc_all_expt']:\r\n # case is all experiments are to be analysed\r\n i_expt = list(range(len(data)))\r\n else:\r\n # case is a single experiment is being analysed\r\n i_expt = [cf.get_expt_index(calc_para['calc_exp_name'], data)]\r\n\r\n # memory allocation\r\n d_copy = copy.deepcopy\r\n A, B, C = np.empty(len(i_expt), dtype=object), [[] for _ in range(5)], [[] for _ in range(4)]\r\n c_type, t_dur, t_event, ci_lo, ci_hi, ccG_T = d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A)\r\n\r\n #\r\n for i_ex in i_expt:\r\n # sets the experiment ID info based on the number of experiments being analysed\r\n if len(i_expt) == 1:\r\n # only one experiment is being analysed\r\n expt_id = None\r\n else:\r\n # multiple experiments are being analysed\r\n expt_id = [(i_ex+1), len(i_expt)]\r\n\r\n # retrieves the cluster information\r\n t_dur[i_ex], t_event[i_ex] = d_copy(C), d_copy(C)\r\n c_type[i_ex], ci_lo[i_ex], ci_hi[i_ex], ccG_T[i_ex] = d_copy(B), d_copy(B), d_copy(B), d_copy(B)\r\n ccG, ccG_xi, t_spike = data[i_ex]['ccGram'], data[i_ex]['ccGramXi'], data[i_ex]['tSpike']\r\n\r\n c_id = data[i_ex]['clustID']\r\n\r\n # runs the cc-gram type calculation function\r\n c_type0, t_dur[i_ex], t_event[i_ex], ci_hi0, ci_lo0, ccG_T0 = cfcn.calc_ccgram_types(\r\n ccG, ccG_xi, t_spike, calc_para=calc_para, expt_id=expt_id, w_prog=self.work_progress, c_id=c_id)\r\n\r\n # sets the final values into their respective groupings\r\n for i in range(5):\r\n # sets the final type values and lower/upper bound confidence interval signals\r\n if len(c_type0[i]):\r\n #\r\n c_type[i_ex][i] = np.vstack(c_type0[i])\r\n\r\n # sorts the values by the reference cluster index\r\n i_sort = np.lexsort((c_type[i_ex][i][:, 1], c_type[i_ex][i][:, 0]))\r\n c_type[i_ex][i] = c_type[i_ex][i][i_sort, :]\r\n\r\n # reorders the duration/timing of the events (if they exist)\r\n if i < len(t_dur[i_ex]):\r\n t_dur[i_ex][i] = np.array(t_dur[i_ex][i])[i_sort]\r\n t_event[i_ex][i] = np.array(t_event[i_ex][i])[i_sort]\r\n\r\n ci_lo[i_ex][i] = (np.vstack(ci_lo0[i]).T)[:, i_sort]\r\n ci_hi[i_ex][i] = (np.vstack(ci_hi0[i]).T)[:, i_sort]\r\n ccG_T[i_ex][i] = (np.vstack(ccG_T0[i]).T)[:, i_sort]\r\n\r\n # returns the data as a dictionary\r\n return {'c_type': c_type, 't_dur': t_dur, 't_event': t_event,\r\n 'ci_lo': ci_lo, 'ci_hi': ci_hi, 'ccG_T': ccG_T, 'calc_para': calc_para}\r\n\r\n def calc_shuffled_cluster_dist(self, calc_para, data):\r\n '''\r\n\r\n :return:\r\n '''\r\n\r\n # FINISH ME!\r\n pass\r\n\r\n ##########################################\r\n #### CLUSTER MATCHING FUNCTIONS ####\r\n ##########################################\r\n\r\n def calc_fix_free_correlation(self, data, calc_para, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param plot_para:\r\n :param calc_para:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n i_bin = ['5', '10'].index(calc_para['vel_bin'])\r\n tt_key = {'DARK1': 'Black', 'DARK': 'Black', 'LIGHT1': 'Uniform', 'LIGHT2': 'Uniform'}\r\n f_data, r_data, ff_corr = data.externd.free_data, data.rotation, data.comp.ff_corr\r\n n_bin = 2 * int(f_data.v_max / float(calc_para['vel_bin']))\r\n\r\n # determines matching experiment index and fix-to-free cell index arrays\r\n i_expt, f2f_map = cf.det_matching_fix_free_cells(data, apply_filter=False)\r\n\r\n # determines the global indices for each file\r\n nC = [len(x) for x in r_data.r_obj_kine.clust_ind[0]]\r\n ind_g = [np.arange(i0, i0 + n) for i0, n in zip(np.cumsum([0] + nC)[:-1], nC)]\r\n\r\n # memory allocation\r\n n_file, t_type = len(i_expt), f_data.t_type\r\n nan_bin = np.nan * np.ones(n_bin)\r\n ff_corr.sf_fix = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.sf_free = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.sf_corr = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.sf_corr_sh = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.sf_corr_sig = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.sf_grad = np.empty((n_file, len(t_type)), dtype=object)\r\n ff_corr.clust_id = np.empty(n_file, dtype=object)\r\n ff_corr.ind_g = np.empty(n_file, dtype=object)\r\n\r\n # sets the velocity spiking rates (depending on calculation type)\r\n if r_data.is_equal_time:\r\n # case is resampled spiking times\r\n vel_sf = dcopy(r_data.vel_sf_rs)\r\n else:\r\n # case is non-resampled spiking times\r\n vel_sf = dcopy(r_data.vel_sf)\r\n\r\n # loops through each external data file retrieving the spike frequency data and calculating correlations\r\n n_cell_tot, i_cell_tot = np.sum(np.array(nC)[i_expt]), 0\r\n for i_file in range(n_file):\r\n # initialisations for the current external data file\r\n ind_nw = ind_g[i_expt[i_file]]\r\n i_f2f = f2f_map[i_file][:, 1]\r\n s_freq = dcopy(f_data.s_freq[i_file][i_bin, :])\r\n\r\n # retrieves the spiking frequency data between the matched fixed/free cells for the current experiment\r\n for i_tt, tt in enumerate(t_type):\r\n # sets the fixed/free spiking frequency values\r\n ff_corr.sf_fix[i_file, i_tt] = np.nanmean(vel_sf[tt_key[tt]][:, :, ind_nw], axis=0).T\r\n ff_corr.sf_free[i_file, i_tt] = np.vstack([s_freq[i_tt][ii] if ii >= 0 else nan_bin for ii in i_f2f])\r\n\r\n # sets the cluster ID values\r\n is_ok = i_f2f >= 0\r\n i_expt_fix = cf.get_global_expt_index(data, data.comp.data[i_expt[i_file]])\r\n fix_clust_id = np.array(data._cluster[i_expt_fix]['clustID'])[is_ok]\r\n free_clust_id = np.array(data.externd.free_data.cell_id[i_file])[f2f_map[i_file][is_ok, 1]]\r\n ff_corr.clust_id[i_file] = np.vstack((fix_clust_id, free_clust_id)).T\r\n ff_corr.ind_g[i_file] = ind_nw\r\n\r\n # removes any spiking frequency data for where there is no matching data\r\n cfcn.calc_shuffled_sf_corr(ff_corr, i_file, calc_para, [i_cell_tot, n_cell_tot], w_prog)\r\n\r\n # increments the progressbar counter\r\n i_cell_tot += len(ind_nw)\r\n\r\n # sets the parameter values\r\n ff_corr.vel_bin = int(calc_para['vel_bin'])\r\n ff_corr.n_shuffle_corr = calc_para['n_shuffle']\r\n ff_corr.split_vel = int(calc_para['split_vel'])\r\n ff_corr.is_set = True\r\n\r\n ######################################\r\n #### EYE TRACKING FUNCTIONS ####\r\n ######################################\r\n\r\n def calc_eye_track_metrics(self, data, calc_para, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n def calc_position_diff(p0, dt, calc_para):\r\n '''\r\n\r\n :param p:\r\n :param dt:\r\n :param calc_para:\r\n :return:\r\n '''\r\n\r\n # retrieves the position values and calculates the rolling difference\r\n is_ok, n_frm = ~p0.isna(), p0.shape[0]\r\n\r\n # calculates the mid-point derivative values\r\n dp0 = p0.rolling(window=3, center=True).apply(lambda x: (x[2] - x[0]) / 2)\r\n\r\n # calculates the end-point derivative values (for the first/last valid values)\r\n i_ok = np.where(is_ok)[0]\r\n i0, i1 = i_ok[0], i_ok[-1]\r\n dp0.iloc[i0] = sum(np.multiply([-3, 4, -1], np.array(p0.iloc[i0:i0+3]).astype(float))) / 2\r\n dp0.iloc[i1] = sum(np.multiply([ 3, -4, 1], np.array(p0.iloc[i1-3:i1]).astype(float))) / 2\r\n\r\n # calculates the rolling median\r\n if calc_para['use_med_filt']:\r\n dp0_med = dp0.rolling(window=3, center=True).median()\r\n else:\r\n dp0_med = dp0\r\n\r\n # converts pd dataframes to float np-arrays (sets any NaN derivative values to zero)\r\n p = np.array(p0).astype(float)\r\n dp = np.array(dp0_med).astype(float) / (1000. * dt)\r\n dp[~is_ok] = 0\r\n\r\n # removes any outliers (regions where the derivative is greater than dp_max)\r\n i_grp = cf.get_index_groups(np.abs(dp) > calc_para['dp_max'])\r\n for ig in cf.expand_index_groups(i_grp, 2, n_frm):\r\n dp[ig], p[ig] = 0, np.nan\r\n\r\n # removes the baseline component (if required)\r\n if calc_para['rmv_baseline']:\r\n w_frm = 70 / n_frm\r\n dp_bl = lowess(dp, np.arange(n_frm), w_frm, return_sorted=False)\r\n dp -= dp_bl\r\n\r\n # returns the derivative array\r\n return dp - np.nanmean(dp), p\r\n\r\n def det_movement_events(p_pos, dp_pos, calc_para, n_pre, n_post, t_frm):\r\n '''\r\n\r\n :param dp_pos:\r\n :return:\r\n '''\r\n\r\n def get_event_sig_seg(p_pos, i_grp0, n_pre, n_post, n_frm):\r\n '''\r\n\r\n :param p_pos:\r\n :param i_grp0:\r\n :param n_frm:\r\n :return:\r\n '''\r\n\r\n def get_sig_seg(y_sig, i_grp0, n_pp, n_frm=None):\r\n '''\r\n\r\n :param dp_pos:\r\n :param i_grp0:\r\n :param n_frm:\r\n :return:\r\n '''\r\n\r\n if n_frm is None:\r\n # case is the signal values preceding the onset point\r\n return list(y_sig[max(0, (i_grp0 - n_pp)):(i_grp0 + 1)])\r\n else:\r\n # case is the signal values proceding the onset point\r\n return list(y_sig[(i_grp0 + 1):min(n_frm - 1, i_grp0 + (1 + n_pp))])\r\n\r\n return np.array(get_sig_seg(p_pos, i_grp0, n_pre) + get_sig_seg(p_pos, i_grp0, n_post, n_frm))\r\n\r\n # initialisations\r\n n_frm, i_ofs = len(t_frm), 1\r\n t_evnt, y_evnt = [], []\r\n n_sd, dp_max, n_event_win = calc_para['n_sd'], calc_para['dp_max'], n_pre + n_post + 1\r\n\r\n # thresholds the position derivative values\r\n b_arr, sgn_arr = np.abs(dp_pos) >= np.nanstd(dp_pos) * n_sd, np.sign(dp_pos)\r\n if np.any(b_arr):\r\n # if there are any derivative values greater than threshold, then determine the index groups of the\r\n # continguous points that are greater than threshold. from this determine the max absolute amplitudes within\r\n # these groups and the start indices of each group\r\n i_grp = cf.get_index_groups(b_arr)\r\n grp_mx, i_grp0 = [np.max(np.abs(dp_pos[x])) for x in i_grp], np.array([(x[0] - i_ofs) for x in i_grp])\r\n\r\n # determines the groups that are within the event window (and have a position derivative less than the\r\n # maximum derivative parameter value, dp_max)\r\n di_grp0 = np.diff(i_grp0)\r\n is_ok = np.array([(x >= n_pre) and (x <= (n_frm - n_post)) for x in i_grp0])\r\n for ig in np.where(di_grp0 < n_event_win)[0]:\r\n if sgn_arr[i_grp0[ig]] * sgn_arr[i_grp0[ig + 1]] < 0:\r\n # if the thresholded groups have differing derivative signs, then ignore both groups\r\n is_ok[ig:ig+2] = False\r\n else:\r\n # otherwise, remove the thresholded group with the lower amplitude peak\r\n is_ok[1 + (grp_mx[ig] > grp_mx[ig + 1])] = False\r\n\r\n # memory allocation\r\n n_evnt = len(is_ok)\r\n t_evnt0, y_evnt0 = np.zeros(n_evnt), np.zeros((n_evnt, n_event_win))\r\n\r\n # removes the ignored contiguous groups\r\n for i in range(n_evnt):\r\n if is_ok[i]:\r\n y_evnt_nw = get_event_sig_seg(p_pos, i_grp0[i], n_pre, n_post, n_frm)\r\n if not np.any(np.isnan(y_evnt_nw)):\r\n y_evnt0[i, :], t_evnt0[i] = y_evnt_nw, t_frm[i_grp0[i]]\r\n else:\r\n is_ok[i] = False\r\n\r\n # removes the\r\n t_evnt0, y_evnt0 = t_evnt0[is_ok], y_evnt0[is_ok]\r\n\r\n # appends the time stamps of the events for both eye movement types\r\n i_sgn = np.array([int(sgn_arr[x + i_ofs] > 0) for x in i_grp0[is_ok]])\r\n t_evnt.append([t_evnt0[i_sgn == i] for i in range(2)])\r\n\r\n # sets the sub-signal/mean sub-signal values for both eye movement types\r\n y_evnt_tmp = [y_evnt0[i_sgn == i, :] for i in range(2)]\r\n y_evnt.append([np.subtract(x, x[:, n_pre][:, None]) if len(x) else [] for x in y_evnt_tmp])\r\n\r\n else:\r\n # if no event, then set empty time/signal events for both types\r\n t_evnt.append([[], []])\r\n y_evnt.append([[], []])\r\n\r\n # returns the event time/signal arrays\r\n return t_evnt, y_evnt\r\n\r\n # retrieves the eye-tracking class object\r\n et_class = data.externd.eye_track\r\n n_file = len(et_class.et_data)\r\n\r\n # sets the pre/post event duration\r\n n_pre, n_post = calc_para['n_pre'], calc_para['n_post']\r\n\r\n # memory allocation\r\n dt = 1 / et_class.fps\r\n A = np.empty(n_file, dtype=object)\r\n et_class.t_evnt, et_class.y_evnt = dcopy(A), dcopy(A)\r\n et_class.t_type = list(np.unique(cf.flat_list([x.t_type for x in et_class.et_data])))\r\n\r\n # loops through each of the file calculating the eye-movement events\r\n for i_file, et_d in enumerate(et_class.et_data):\r\n # updates the progress bar string\r\n w_str = 'Detecting Movement Events (Expt {0} of {1})'.format(i_file + 1, n_file)\r\n\r\n # memory allocation\r\n n_tt = len(et_d.t_type)\r\n B = np.empty(len(et_class.t_type), dtype=object)\r\n et_class.t_evnt[i_file], et_class.y_evnt[i_file] = dcopy(B), dcopy(B)\r\n\r\n # loops through each of the trial types calculate the eye-movement events\r\n for i_tt in range(n_tt):\r\n # updates the progress-bar\r\n w_prog.emit(w_str, 100. * ((i_file * n_tt + i_tt) / (n_tt * n_file)))\r\n\r\n # retrieves the position values\r\n p0 = dcopy(et_d.p_pos[i_tt])\r\n if calc_para['use_med_filt']:\r\n # calculates the rolling median (if required)\r\n p0 = p0.rolling(window=3, center=True).median()\r\n\r\n # calculates the position difference values\r\n dp, p = calc_position_diff(p0, dt, calc_para)\r\n\r\n # calculates the events/signal sub-segments for all events\r\n j_tt = et_class.t_type.index(et_class.et_data[i_file].t_type[i_tt])\r\n t_frm = np.arange(len(p)) / et_class.fps\r\n tt, yy = det_movement_events(p, dp, calc_para, n_pre, n_post, t_frm)\r\n et_class.t_evnt[i_file][j_tt], et_class.y_evnt[i_file][j_tt] = tt[0], yy[0]\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # updates the calculation parameters\r\n et_class.use_med_filt = calc_para['use_med_filt']\r\n et_class.rmv_baseline = calc_para['rmv_baseline']\r\n et_class.dp_max = calc_para['dp_max']\r\n et_class.n_sd = calc_para['n_sd']\r\n et_class.n_pre = calc_para['n_pre']\r\n et_class.n_post = calc_para['n_post']\r\n et_class.is_set = True\r\n\r\n def calc_eye_track_corr(self, data, calc_para, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n def get_trial_group_start_time(r_info, tt_c0):\r\n '''\r\n\r\n :param c:\r\n :param tt_c:\r\n :return:\r\n '''\r\n\r\n def get_expt_time_span(ind0, i_type):\r\n '''\r\n\r\n :param ind0:\r\n :return:\r\n '''\r\n\r\n if i_type == 0:\r\n # returns the first trial index\r\n return ind0[0]\r\n else:\r\n # determines the 2nd order difference in the trial start times\r\n dind0 = np.zeros(len(ind0), dtype=int)\r\n dind0[2:] = np.diff(ind0, 2)\r\n \r\n #\r\n i_diff = np.where(np.abs(dind0) > 1e10)[0]\r\n return ind0[i_diff[0]]\r\n\r\n # sets the trial type (removes any extra indices at the end of the trial type string)\r\n i_type = int(tt_c0[-1] == '2')\r\n tt = tt_c0 if (i_type == 0) else tt_c0[:-1]\r\n\r\n # retrieves the start time of the trial grouping\r\n return get_expt_time_span(r_info['wfm_para'][tt]['ind0'], i_type)\r\n\r\n def get_grouping_spike_times(t_sp, t_exp, t0):\r\n '''\r\n\r\n :param t_sp_c:\r\n :param t_exp:\r\n :param t0:\r\n :return:\r\n '''\r\n\r\n # memory allocation\r\n n_cell = len(t_sp)\r\n t_sp_h = np.zeros((n_cell, len(t_exp)))\r\n\r\n # calculates the time spiking histograms (for each cell) downsampled to that of the eye-tracking analysis\r\n for i_cell in range(n_cell):\r\n # retrieves the spike times for the current cell\r\n t_sp_tmp = t_sp[i_cell] / 1000\r\n t_sp_grp = t_sp_tmp[np.logical_and(t_sp_tmp >= t0, t_sp_tmp <= t0 + t_exp[-1])] - t0\r\n\r\n # calculates the spike time histogram (time bins are set for the eye-tracking analysis)\r\n t_sp_h[i_cell, 1:] = np.histogram(t_sp_grp, bins=t_exp)[0]\r\n\r\n # returns the histogram arrays\r\n return t_sp_h\r\n\r\n def get_event_spike_times(t_sp_h, t_evnt, dt_et, calc_para):\r\n '''\r\n\r\n :param t_sp_h:\r\n :param t_evnt:\r\n :param calc_para:\r\n :return:\r\n '''\r\n\r\n # memory allocation\r\n n_cell, n_frm = np.shape(t_sp_h)\r\n sp_evnt = np.empty(len(t_evnt), dtype=object)\r\n\r\n # sets the pre/post event duration\r\n n_pre, n_post = calc_para['n_pre'], calc_para['n_post']\r\n n_pts = n_pre + n_post + 1\r\n\r\n # retrieves the spike time events for each eye-movement type\r\n for i in range(len(t_evnt)):\r\n # sets the indices of the events (ensures all frames are within that of the eye-tracking analysis)\r\n i_evnt = np.round(t_evnt[i] / dt_et).astype(int)\r\n i_evnt = i_evnt[np.logical_and((i_evnt - n_pre) >= 0, (i_evnt + n_post) < n_frm)]\r\n\r\n # memory allocation for eye-movement type\r\n n_evnt = len(t_evnt[i])\r\n sp_evnt[i] = np.zeros((n_evnt, n_pts, n_cell))\r\n\r\n # retrieves the spike time histogram values over each cell/eye-movement event\r\n for j in range(n_evnt):\r\n i_rng = np.arange(i_evnt[j] - n_pre, i_evnt[j] + n_post + 1)\r\n sp_evnt[i][j, :, :] = t_sp_h[:, i_rng].T\r\n\r\n # returns the array\r\n return sp_evnt\r\n\r\n # initialisations and memory allocation\r\n et_class = data.externd.eye_track\r\n exp_file = [cf.extract_file_name(x['expFile']) for x in data.cluster]\r\n n_exp, dt_et = et_class.n_file, 1. / et_class.fps\r\n\r\n # memory allocation\r\n A = np.empty(n_exp, dtype=object)\r\n t_sp_h, sp_evnt, y_corr, p_corr = dcopy(A), dcopy(A), dcopy(A), dcopy(A)\r\n\r\n # loops through each experiment calculating the spiking rate/eye movement correlations\r\n for i_exp, et_d in enumerate(et_class.et_data):\r\n # initialisations\r\n n_tt, pw0 = len(et_d.t_type), 1 / n_exp\r\n\r\n # memory allocation\r\n B = np.empty(n_tt, dtype=object)\r\n t_sp_h[i_exp], sp_evnt[i_exp], y_corr[i_exp], p_corr[i_exp] = dcopy(B), dcopy(B), dcopy(B), dcopy(B)\r\n\r\n # retrieves the rotation info of the corresponding expt\r\n c = data._cluster[cf.det_likely_filename_match(exp_file, et_class.exp_name[i_exp])]\r\n r_info, dt_c, t_sp_c = c['rotInfo'], 1. / c['sFreq'], c['tSpike']\r\n\r\n # loops through each trial type calculating the correlations\r\n for i_tt, tt in enumerate(et_d.t_type):\r\n # updates the progressbar\r\n tt_c = tt.capitalize()\r\n w_str = 'Calculating Correlations (Expt {0}/{1} - {2})'.format(i_tt + 1, n_tt, tt_c)\r\n w_prog.emit(w_str, 100. * (pw0 + (i_tt / n_tt)))\r\n\r\n # sets the time vector over the eye-tracking analysis\r\n j_tt = et_class.t_type.index(et_class.et_data[i_exp].t_type[i_tt])\r\n t_exp = np.arange(len(et_d.p_pos[j_tt])) * dt_et\r\n\r\n # retrieves the spike times over the duration of the eye tracking analysis\r\n t0 = get_trial_group_start_time(r_info, tt_c) * dt_c\r\n t_sp_h[i_exp][j_tt] = get_grouping_spike_times(t_sp_c, t_exp, t0)\r\n\r\n # retrieves the spike times traces surrounding the times of the eye movement\r\n t_evnt = et_class.t_evnt[i_exp][j_tt]\r\n sp_evnt[i_exp][j_tt] = get_event_spike_times(t_sp_h[i_exp][j_tt], t_evnt, dt_et, calc_para)\r\n\r\n # calculates the correlations between each cell and the eye movement events\r\n y_evnt = et_class.y_evnt[i_exp][j_tt]\r\n y_corr[i_exp][j_tt], p_corr[i_exp][j_tt] = cfcn.calc_event_correlation(y_evnt, sp_evnt[i_exp][j_tt])\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the arrays into the eye-tracking class object\r\n data.externd.eye_track.t_sp_h = t_sp_h\r\n data.externd.eye_track.sp_evnt = sp_evnt\r\n data.externd.eye_track.y_corr = y_corr\r\n data.externd.eye_track.p_corr = p_corr\r\n\r\n # final update of the progressbar\r\n w_prog.emit('Correlation Calculations Complete!', 100.)\r\n\r\n ######################################\r\n #### AHV ANALYSIS FUNCTIONS ####\r\n ######################################\r\n\r\n def calc_corr_fit_para(self, data, plot_para, calc_para, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param plot_para:\r\n :param calc_para:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n def calc_sf_lin_para(xi, sf, peak_hz, err_type):\r\n '''\r\n\r\n :param sf:\r\n :return:\r\n '''\r\n\r\n # memory allocation\r\n n_cell = np.shape(sf)[0]\r\n sf_slope, sf_int = np.zeros(n_cell), np.zeros(n_cell)\r\n sf_err = np.zeros(n_cell)\r\n\r\n # calculates the linear parameters for each cell\r\n for i_cell in range(n_cell):\r\n # slope/intercept calculation\r\n sf_calc = sf[i_cell]\r\n l_fit = linregress(xi, sf_calc / peak_hz[i_cell])\r\n sf_slope[i_cell], sf_int[i_cell] = l_fit.slope, l_fit.intercept\r\n\r\n # error calculation\r\n dsf_calc = (sf_calc - sf_calc[0])\r\n dsf_max = np.max(np.abs(dsf_calc))\r\n\r\n if (dsf_max > 0) and (err_type is not None):\r\n if err_type == 'Covariance':\r\n _, pcov = curve_fit(lin_func, xi, dsf_calc / dsf_max)\r\n sf_err[i_cell] = np.sqrt(pcov[0][0])\r\n\r\n elif err_type == 'Sum-of-Squares':\r\n p_fit_err = np.polyfit(xi, dsf_calc / dsf_max, 1, full=True)\r\n sf_err[i_cell] = p_fit_err[1][0]\r\n\r\n elif err_type == 'Standard Error':\r\n l_fit_err = linregress(xi, dsf_calc / dsf_max)\r\n sf_err[i_cell] = l_fit_err.stderr\r\n\r\n # returns the array\r\n return sf_slope, sf_int, sf_err\r\n\r\n # appends the fields to the rotation class object\r\n r_data = data.rotation\r\n if not hasattr(r_data, 'sf_fix_slope'):\r\n r_data.sf_fix_slope = None\r\n r_data.sf_fix_int = None\r\n r_data.sf_fix_err = None\r\n r_data.peak_hz_fix = None\r\n\r\n # applies the rotation filter to the dataset\r\n r_obj = RotationFilteredData(data, plot_para['rot_filt'], None, None, True, 'Whole Experiment', False)\r\n n_filt = r_obj.n_filt\r\n\r\n # determines the common cell indices for each filter types\r\n t_type_full = [x['t_type'][0] for x in r_obj.rot_filt_tot]\r\n i_cell_b, _ = cfcn.get_common_filtered_cell_indices(data, r_obj, t_type_full, True)\r\n\r\n # retrieves the spiking frequencies\r\n r_data = data.rotation\r\n sf = dcopy(r_data.vel_sf_mean)\r\n err_type = None if 'err_type' not in calc_para else calc_para['err_type']\r\n norm_sf = False if 'norm_sf' not in calc_para else calc_para['norm_sf']\r\n\r\n # sets up the velocity bin values\r\n v_max, v_bin = 80, r_data.vel_bin_corr\r\n xi_bin = np.arange(-v_max + v_bin / 2, v_max, v_bin)\r\n is_pos = xi_bin > 0\r\n n_bin = sum(is_pos)\r\n\r\n # memory allocation\r\n A = np.empty((2, n_filt), dtype=object)\r\n sf_slope, sf_int, sf_err, peak_hz = dcopy(A), dcopy(A), dcopy(A), np.empty(n_filt, dtype=object)\r\n\r\n if norm_sf:\r\n # for each filter type, calculate the linear fit parameters\r\n dsf_filt = np.empty(n_filt, dtype=object)\r\n peak_hz_filt = np.empty(n_filt, dtype=object)\r\n for i_filt, tt in enumerate(t_type_full):\r\n # calculates the slope/intercept values\r\n sf_filt = sf[tt][i_cell_b[i_filt], :]\r\n\r\n #\r\n sf_comb = [np.vstack(sf_filt[:, 0])[:, ::-1], np.vstack(sf_filt[:, 1])]\r\n dsf_filt[i_filt] = [sf - repmat(sf[:, 0], n_bin, 1).T for sf in sf_comb]\r\n\r\n # determines the peak frequency\r\n peak_hz_filt[i_filt] = np.max(np.abs(np.hstack((dsf_filt[i_filt][0], dsf_filt[i_filt][1]))), axis=1)\r\n\r\n # determines the peak spiking frequency across all conditions\r\n peak_hz = np.max(np.abs(np.vstack(peak_hz_filt)), axis=0)\r\n\r\n # for each filter type, calculate the linear fit parameters\r\n for i_filt, tt in enumerate(t_type_full):\r\n # updates the progress bar\r\n w_str = 'Linear Fit Calculations ({0})'.format(tt)\r\n w_prog.emit(w_str, 100. * i_filt / len(t_type_full))\r\n\r\n if norm_sf:\r\n # sets the positive/negative spiking frequencies\r\n sf_neg, sf_pos = dsf_filt[i_filt][0], dsf_filt[i_filt][1]\r\n\r\n else:\r\n # calculates the slope/intercept values\r\n sf_filt = sf[tt][i_cell_b[i_filt], :]\r\n\r\n # sets the positive/negative spiking frequencies\r\n sf_neg, sf_pos = np.vstack(sf_filt[:, 0])[:, ::-1], np.vstack(sf_filt[:, 1])\r\n peak_hz = np.ones(np.shape(sf_neg)[0])\r\n\r\n # calculates the spiking freuency slope, intercept and errors\r\n sf_slope[0, i_filt], sf_int[0, i_filt], sf_err[0, i_filt] = \\\r\n calc_sf_lin_para(xi_bin[is_pos], sf_neg, peak_hz, err_type)\r\n sf_slope[1, i_filt], sf_int[1, i_filt], sf_err[1, i_filt] = \\\r\n calc_sf_lin_para(xi_bin[is_pos], sf_pos, peak_hz, err_type)\r\n\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the class object fields\r\n r_data.sf_fix_slope = sf_slope\r\n r_data.sf_fix_int = sf_int\r\n r_data.sf_fix_err = sf_err\r\n r_data.r_obj_sf = r_obj\r\n r_data.peak_hz_fix = peak_hz\r\n\r\n #######################################\r\n #### FREELY MOVING FUNCTIONS ####\r\n #######################################\r\n\r\n def calc_cell_fit_residual(self, data, calc_para, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n def calc_cell_res_gain(xi, sf_split):\r\n '''\r\n\r\n :param sf_cell:\r\n :param xi:\r\n :param is_pos:\r\n :return:\r\n '''\r\n\r\n def calc_sf_res(xi, sf):\r\n '''\r\n\r\n :param xi:\r\n :param sf:\r\n :return:\r\n '''\r\n\r\n # fits a linear equation to the spiking frequencies\r\n l_fit = LinearRegression(fit_intercept=False).fit(xi, sf)\r\n # p_fit = np.polyfit(xi, sf, 1)\r\n\r\n # calculates the absolute residual values (normalising by the maximum spiking rate)\r\n return np.abs(l_fit.predict(xi) - sf)\r\n\r\n # memory allocation\r\n n_type = np.shape(sf_split)[1]\r\n sf_gain, sf_res = np.empty(n_type, dtype=object), np.empty(n_type, dtype=object)\r\n\r\n # calculates the overall spiking frequency maximum\r\n # sf_max = np.max([[np.max(y) for y in x] for x in sf_split])\r\n # if sf_max == 0:\r\n sf_max = np.max([[np.max(np.abs(y)) for y in x] for x in sf_split])\r\n\r\n # calculates/sets the residual/gain values for each direction/condition type\r\n for i_type in range(n_type):\r\n sf_gain[i_type] = np.array(cf.flat_list(sf_split[:, i_type]))\r\n sf_res[i_type] = np.array([calc_sf_res(xi, sf / np.max(np.abs(sf))) for sf in sf_split[:, i_type]]).flatten()\r\n\r\n # calculates the normalised absolute residuals from the linear fits to the spiking frequencies\r\n return sf_gain, sf_res, sf_max\r\n\r\n # initialisations\r\n f_data = data.externd.free_data\r\n\r\n # ensures the freely moving class calculation fields have been set (initialies them if they have not)\r\n if not hasattr(f_data, 'sf_gain'):\r\n setattr(f_data, 'sf_gain', None)\r\n setattr(f_data, 'sf_res', None)\r\n setattr(f_data, 'sf_vbin', None)\r\n setattr(f_data, 'sf_tt', None)\r\n setattr(f_data, 'sf_max', None)\r\n\r\n # initialisations\r\n t_type = ['DARK', calc_para['lcond_type']]\r\n v_bin, v_max = int(calc_para['vel_bin']), 80.\r\n i_bin = [5, 10].index(v_bin)\r\n i_tt = [list(f_data.t_type).index(tt) for tt in t_type]\r\n\r\n # sets up the velocity bin array\r\n xi = np.arange(-v_max + v_bin / 2, v_max, v_bin)\r\n\r\n # memory allocation\r\n n_type = len(t_type)\r\n A = np.empty(f_data.n_file, dtype=object)\r\n sf_res, sf_gain, sf_max = dcopy(A), dcopy(A), dcopy(A)\r\n\r\n ##########################################\r\n #### GAIN/RESIDUAL CALCULATIONS ####\r\n ##########################################\r\n\r\n # memory allocation and other initialisations\r\n is_pos = xi > 0\r\n n_bin, n_dir = int(len(xi) / 2), 2\r\n\r\n # retrieves the spiking frequencies for the velocity bin size\r\n sf_bin = [sf[i_bin] for sf in f_data.s_freq]\r\n\r\n # calculates the gain/residuals for each file\r\n for i_file in range(f_data.n_file):\r\n # updates the waitbar progress\r\n w_str = 'Gain/Residual Calculations ({0} of {1})'.format(i_file + 1, f_data.n_file)\r\n w_prog.emit(w_str, 100 * (i_file / f_data.n_file))\r\n\r\n # memory allocation\r\n n_cell = np.shape(sf_bin[i_file][0])[0]\r\n B = np.empty((n_cell, n_type), dtype=object)\r\n sf_res[i_file], sf_gain[i_file], sf_max[i_file] = dcopy(B), dcopy(B), np.zeros(n_cell)\r\n\r\n # calculates the gain/residuals for each cell/condition type\r\n for i_cell in range(n_cell):\r\n # memory allocation\r\n sf_split = np.empty((n_dir, n_type), dtype=object)\r\n\r\n # splits the spiking frequency into positive/negative velocities for each condition type\r\n for i_type in range(n_type):\r\n # retrieves the spiking frequency for the current cell/condition type and separates\r\n sf_cell = sf_bin[i_file][i_tt[i_type]][i_cell]\r\n sf_split0 = [sf_cell[~is_pos][::-1], sf_cell[is_pos]]\r\n\r\n # removes the first time bin from each direction\r\n for i_dir in range(n_dir):\r\n sf_split[i_dir, i_type] = sf_split0[i_dir] - sf_split0[i_dir][0]\r\n\r\n # calculates the gain/residual for condition type\r\n sf_gain[i_file][i_cell, :], sf_res[i_file][i_cell, :], sf_max[i_file][i_cell] = \\\r\n calc_cell_res_gain(xi[is_pos].reshape(-1, 1), sf_split)\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the class object fields\r\n f_data.sf_gain = sf_gain\r\n f_data.sf_res = sf_res\r\n f_data.sf_vbin = int(calc_para['vel_bin'])\r\n f_data.sf_tt = t_type\r\n f_data.sf_max = sf_max\r\n\r\n #########################################\r\n #### ROTATION LDA CALCULATIONS ####\r\n #########################################\r\n\r\n def run_temporal_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :return:\r\n '''\r\n\r\n # initialisations and memory allocation\r\n d_data, w_prog = data.discrim.temp, self.work_progress\r\n d_data.lda, d_data.y_acc = np.empty(2, dtype=object), np.empty(2, dtype=object)\r\n\r\n # retrieves the rotation phase duration\r\n r_obj = RotationFilteredData(data, r_filt, None, None, True, 'Whole Experiment', False)\r\n t_phase = r_obj.t_phase[0][0]\r\n\r\n ################################################\r\n #### DIFFERING PHASE LDA CALCULATIONS ####\r\n ################################################\r\n\r\n # creates a copy of the calculation parameters for the differing phase duration LDA calculations\r\n calc_para_phs = dcopy(calc_para)\r\n calc_para_phs['t_ofs_rot'] = 0\r\n\r\n # memory allocation\r\n dt_phs = np.arange(calc_para['dt_phase'], t_phase, calc_para['dt_phase'])\r\n d_data.lda[0], d_data.y_acc[0] = np.empty(len(dt_phs), dtype=object), np.empty(len(dt_phs), dtype=object)\r\n\r\n # loops through each of the phase discretisations calculating the LDA calculations\r\n n_phs = len(dt_phs)\r\n for i_phs in range(n_phs):\r\n # updates the progress bar\r\n w_str = 'Duration LDA Calculations (Group {0} of {1})'.format(i_phs + 1, n_phs)\r\n w_prog.emit(w_str, 50. * ((i_phs + 1)/ n_phs))\r\n\r\n # updates the phase duration parameter\r\n calc_para_phs['t_phase_rot'] = dt_phs[i_phs]\r\n\r\n # runs the rotation analysis for the current configuration\r\n result = cfcn.run_rot_lda(data, calc_para_phs, r_filt, i_expt, i_cell, n_trial_max)\r\n if isinstance(result, bool):\r\n # if there was an error, then return a false flag value\r\n return False\r\n else:\r\n # otherwise, store the lda/accuracy values\r\n d_data.lda[0][i_phs], d_data.y_acc[0][i_phs] = result[0], result[1]\r\n\r\n #################################################\r\n #### DIFFERING OFFSET LDA CALCULATIONS ####\r\n #################################################\r\n\r\n # creates a copy of the calculation parameters for the differing offset LDA calculations\r\n calc_para_ofs = dcopy(calc_para)\r\n calc_para_ofs['t_phase_rot'] = calc_para['t_phase_const']\r\n\r\n # sets the differing phase/offset value arrays\r\n dt_ofs = np.arange(0., t_phase - calc_para['t_phase_const'], calc_para['t_phase_const'])\r\n d_data.lda[1], d_data.y_acc[1] = np.empty(len(dt_ofs), dtype=object), np.empty(len(dt_ofs), dtype=object)\r\n\r\n # loops through each of the phase discretisations calculating the LDA calculations\r\n n_ofs = len(dt_ofs)\r\n for i_ofs in range(n_ofs):\r\n # updates the progress bar\r\n w_str = 'Offset LDA Calculations (Group {0} of {1})'.format(i_ofs + 1, n_ofs)\r\n w_prog.emit(w_str, 50. * (1 + ((i_ofs + 1) / n_ofs)))\r\n\r\n # updates the phase duration parameter\r\n calc_para_ofs['t_ofs_rot'] = dt_ofs[i_ofs]\r\n\r\n # runs the rotation analysis for the current configuration\r\n result = cfcn.run_rot_lda(data, calc_para_ofs, r_filt, i_expt, i_cell, n_trial_max)\r\n if isinstance(result, bool):\r\n # if there was an error, then return a false flag value\r\n return False\r\n else:\r\n # otherwise, store the lda/accuracy values\r\n d_data.lda[1][i_ofs], d_data.y_acc[1][i_ofs] = result[0], result[1]\r\n\r\n #######################################\r\n #### HOUSE KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # retrieves the LDA solver parameter fields\r\n lda_para = calc_para['lda_para']\r\n\r\n # sets the solver parameters\r\n d_data.lda = 1\r\n d_data.exp_name = result[2]\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the other calculation parameters\r\n d_data.dt_phs = calc_para['dt_phase']\r\n d_data.dt_ofs = calc_para['dt_ofs']\r\n d_data.phs_const = calc_para['t_phase_const']\r\n\r\n # sets the other variables/parameters of interest\r\n d_data.xi_phs = dt_phs\r\n d_data.xi_ofs = dt_ofs\r\n\r\n # returns a true value indicating the calculations were successful\r\n return True\r\n\r\n def run_shuffled_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:00\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :return:\r\n '''\r\n\r\n # initialisations and memory allocation\r\n d_data, w_prog = data.discrim.shuffle, self.work_progress\r\n if d_data.lda is not None:\r\n return True\r\n\r\n # retrieves the phase duration/offset values\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n if t_ofs is None:\r\n t_ofs, t_phase = 0, 3.5346\r\n\r\n ###############################################\r\n #### SHUFFLED TRIAL LDA CALCULATIONS ####\r\n ###############################################\r\n\r\n # creates a reduce data object and creates the rotation filter object\r\n n_ex, n_sh, n_cond = len(i_expt), calc_para['n_shuffle'], len(r_filt['t_type'])\r\n d_data.y_acc = np.empty((n_ex, n_cond + 1, n_sh), dtype=object)\r\n n_sp = np.empty((n_ex, n_sh), dtype=object)\r\n\r\n # runs the LDA for each of the shuffles\r\n for i_sh in range(n_sh):\r\n # updates the progressbar\r\n w_str = 'Shuffled Trial LDA (Shuffle #{0} of {1})'.format(i_sh + 1, n_sh)\r\n w_prog.emit(w_str, 100. * (i_sh / n_sh))\r\n\r\n # runs the rotation analysis for the current configuration\r\n result = cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, is_shuffle=True)\r\n if isinstance(result, bool):\r\n # if there was an error, then return a false flag value\r\n return False\r\n else:\r\n # otherwise, store the lda/accuracy values\r\n d_data.y_acc[:, :, i_sh], n_sp[:, i_sh] = result[1], result[3]\r\n if i_sh == 0:\r\n # sets the experiment names (for the first shuffle only)\r\n d_data.exp_name == result[2]\r\n\r\n #######################################\r\n #### HOUSE KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # retrieves the LDA solver parameter fields\r\n lda_para = calc_para['lda_para']\r\n\r\n # sets the solver parameters\r\n d_data.lda = 1\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the phase offset/duration parameters\r\n d_data.tofs = t_ofs\r\n d_data.tphase = t_phase\r\n d_data.usefull = calc_para['use_full_rot']\r\n\r\n # sets the other parameters\r\n d_data.nshuffle = n_sh\r\n # d_data.bsz = calc_para['b_sz']\r\n\r\n # calculates the correlations\r\n n_sp_tot = [np.dstack(x) for x in n_sp]\r\n cfcn.calc_noise_correl(d_data, n_sp_tot)\r\n\r\n # returns a true value indicating the calculations were successful\r\n return True\r\n\r\n def run_individual_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n '''\r\n \r\n :param data: \r\n :param calc_para: \r\n :param r_filt: \r\n :param i_expt: \r\n :param i_cell: \r\n :param n_trial_max: \r\n :return: \r\n '''\r\n\r\n # initialisations and memory allocation\r\n d_data, w_prog = data.discrim.indiv, self.work_progress\r\n\r\n # removes normalisation for the individual cell LDA calculations\r\n _calc_para = dcopy(calc_para)\r\n # _calc_para['lda_para']['is_norm'] = False\r\n\r\n ################################################\r\n #### INDIVIDUAL CELL LDA CALCULATIONS ####\r\n ################################################\r\n\r\n # creates a reduce data object and creates the rotation filter object\r\n n_ex = len(i_expt)\r\n A = np.empty(n_ex, dtype=object)\r\n d_data.y_acc, d_data.exp_name = dcopy(A), dcopy(A)\r\n n_cell = [len(i_c) for i_c in i_cell]\r\n\r\n #\r\n for i_ex in range(n_ex):\r\n # creates a copy a copy of the accepted cell array for the analysis\r\n _i_cell = np.zeros(n_cell[i_ex], dtype=bool)\r\n _n_cell = np.sum(i_cell[i_ex])\r\n d_data.y_acc[i_ex] = np.zeros((_n_cell, 1 + len(calc_para['lda_para']['comp_cond'])))\r\n\r\n # runs the LDA analysis for each of the cells\r\n for i, i_c in enumerate(np.where(i_cell[i_ex])[0]):\r\n # updates the progressbar\r\n w_str = 'Single Cell LDA (Cell {0}/{1}, Expt {2}/{3})'.format(i + 1, _n_cell, i_ex + 1, n_ex)\r\n w_prog.emit(w_str, 100. * (i_ex + i / _n_cell) / n_ex)\r\n\r\n # sets the cell for analysis and runs the LDA\r\n _i_cell[i_c] = True\r\n results = cfcn.run_rot_lda(data, _calc_para, r_filt, [i_expt[i_ex]], [_i_cell], n_trial_max)\r\n if isinstance(results, bool):\r\n # if there was an error, then return a false flag value\r\n return False\r\n else:\r\n # otherwise, reset the cell boolear flag\r\n _i_cell[i_c] = False\r\n\r\n # stores the results from the single cell LDA\r\n d_data.y_acc[i_ex][i, :] = results[1]\r\n if i == 0:\r\n # if the first iteration, then store the experiment name\r\n d_data.exp_name[i_ex] = results[2]\r\n\r\n #######################################\r\n #### HOUSE KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # retrieves the LDA solver parameter fields\r\n lda_para = calc_para['lda_para']\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # sets the solver parameters\r\n d_data.lda = 1\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the phase offset/duration\r\n d_data.tofs = t_ofs\r\n d_data.tphase = t_phase\r\n d_data.usefull = calc_para['use_full_rot']\r\n\r\n # returns a true value indicating the calculations were successful\r\n return True\r\n\r\n def run_pooled_lda(self, pool, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :return:\r\n '''\r\n\r\n def run_pooled_lda_expt(data, calc_para, r_filt, i_expt0, i_cell0, n_trial_max, n_cell, n_sp0):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :param xi:\r\n :return:\r\n '''\r\n\r\n while 1:\r\n # sets the required number of cells for the LDA analysis\r\n if calc_para['pool_expt']:\r\n n_sp = n_sp0[:, np.random.permutation(np.size(n_sp0, axis=1))[:n_cell]]\r\n i_cell, i_expt = i_cell0, i_expt0\r\n\r\n else:\r\n i_cell = dcopy(i_cell0)\r\n is_keep = np.ones(len(i_expt0), dtype=bool)\r\n\r\n for i_ex in range(len(i_expt0)):\r\n # determines the original valid cells for the current experiment\r\n ii = np.where(i_cell0[i_ex])[0]\r\n if len(ii) < n_cell:\r\n is_keep[i_ex] = False\r\n continue\r\n\r\n # from these cells, set n_cell cells as being valid (for analysis purposes)\r\n i_cell[i_ex][:] = False\r\n i_cell[i_ex][ii[np.random.permutation(len(ii))][:n_cell]] = True\r\n\r\n # removes the experiments which did not have the min number of cells\r\n i_expt, i_cell, n_sp = i_expt0[is_keep], i_cell[is_keep], n_sp0\r\n\r\n # runs the LDA\r\n results = cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, n_sp0=n_sp)\r\n if not isinstance(results, bool):\r\n # if successful, then exit the loop\r\n break\r\n\r\n # returns the decoding accuracy values\r\n if calc_para['pool_expt']:\r\n return results[1]\r\n else:\r\n # retrieves the results from the LDA\r\n y_acc0 = results[1]\r\n\r\n # sets the values into\r\n y_acc = np.nan * np.ones((len(is_keep), np.size(y_acc0, axis=1)))\r\n y_acc[is_keep, :] = y_acc0\r\n return y_acc\r\n\r\n # initialisations\r\n d_data = data.discrim.part\r\n w_prog, n_sp = self.work_progress, None\r\n\r\n #############################################\r\n #### PARTIAL CELL LDA CALCULATIONS ####\r\n #############################################\r\n\r\n # initialisations\r\n if calc_para['pool_expt']:\r\n # case is all experiments are pooled\r\n\r\n # initialisations and memory allocation\r\n ind_t, n_sp = np.arange(n_trial_max), []\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # creates a reduce data object and creates the rotation filter object\r\n data_tmp = cfcn.reduce_cluster_data(data, i_expt, True)\r\n r_obj = RotationFilteredData(data_tmp, r_filt, None, None, True, 'Whole Experiment', False,\r\n t_ofs=t_ofs, t_phase=t_phase)\r\n\r\n # sets up the LDA data/group index arrays across each condition\r\n for i_filt in range(r_obj.n_filt):\r\n # retrieves the time spikes for the current filter/experiment, and then combines into a single\r\n # concatenated array. calculates the final spike counts over each cell/trial and appends to the\r\n # overall spike count array\r\n A = dcopy(r_obj.t_spike[i_filt])[:, ind_t, :]\r\n if r_obj.rot_filt['t_type'][i_filt] == 'MotorDrifting':\r\n # case is motordrifting (swap phases)\r\n t_sp_tmp = np.hstack((A[:, :, 2], A[:, :, 1]))\r\n else:\r\n # case is other experiment conditions\r\n t_sp_tmp = np.hstack((A[:, :, 1], A[:, :, 2]))\r\n\r\n # calculates the spike counts and appends them to the count array\r\n n_sp.append(np.vstack([np.array([len(y) for y in x]) for x in t_sp_tmp]))\r\n\r\n # combines the spike counts/group indices into the final combined arrays\r\n n_sp, n_expt, i_expt_lda = np.hstack(n_sp).T, 1, np.array([i_expt[0]])\r\n xi = cfcn.get_pool_cell_counts(data, calc_para['lda_para'], 1)\r\n\r\n # reduces the cells to the selected cell type\r\n _, _, i_cell0, _, _ = cfcn.setup_lda(data, {'lda_para': calc_para['lda_para']}, None)\r\n n_sp = n_sp[:, np.hstack(i_cell0)]\r\n i_cell = np.array([np.ones(np.size(n_sp, axis=1), dtype=bool)])\r\n\r\n else:\r\n # case is experiments are not pooled\r\n\r\n # initialisations\r\n # y_acc_d, n_expt = data.discrim.dir.y_acc, min([3, len(i_expt)])\r\n y_acc_d, n_expt, i_expt_lda = data.discrim.dir.y_acc, len(i_expt), i_expt\r\n\r\n # # retrieves the top n_expt experiments based on the base decoding accuracy\r\n # ii = np.sort(np.argsort(-np.prod(y_acc_d, axis=1))[:n_expt])\r\n # i_expt, i_cell = i_expt[ii], i_cell[ii]\r\n\r\n # determines the cell count (based on the minimum cell count over all valid experiments)\r\n n_cell_max = np.max([sum(x) for x in i_cell])\r\n xi = [x for x in cfcn.n_cell_pool1 if x <= n_cell_max]\r\n\r\n # memory allocation\r\n n_xi, n_sh, n_cond = len(xi), calc_para['n_shuffle'], len(r_filt['t_type'])\r\n d_data.y_acc = np.zeros((n_expt, n_cond + 1, n_xi, n_sh))\r\n\r\n # loops through each of the cell counts calculating the partial LDA\r\n for i_sh in range(n_sh):\r\n # updates the progressbar\r\n w_str = 'Pooling LDA Calculations (Shuffle {0} of {1})'.format(i_sh + 1, n_sh)\r\n w_prog.emit(w_str, 100. * (i_sh / n_sh))\r\n\r\n # # runs the analysis based on the operating system\r\n # if 'Windows' in platform.platform():\r\n # # case is Richard's local computer\r\n #\r\n # # initialisations and memory allocation\r\n # p_data = [[] for _ in range(n_xi)]\r\n # for i_xi in range(n_xi):\r\n # p_data[i_xi].append(data)\r\n # p_data[i_xi].append(calc_para)\r\n # p_data[i_xi].append(r_filt)\r\n # p_data[i_xi].append(i_expt)\r\n # p_data[i_xi].append(i_cell)\r\n # p_data[i_xi].append(n_trial_max)\r\n # p_data[i_xi].append(xi[i_xi])\r\n #\r\n # # runs the pool object to run the partial LDA\r\n # p_results = pool.map(cfcn.run_part_lda_pool, p_data)\r\n # for i_xi in range(n_xi):\r\n # j_xi = xi.index(p_results[i_xi][0])\r\n # d_data.y_acc[:, :, j_xi, i_sh] = p_results[i_xi][1]\r\n # else:\r\n\r\n # case is Subiculum\r\n\r\n # initialisations and memory allocation\r\n for i_xi in range(n_xi):\r\n d_data.y_acc[:, :, i_xi, i_sh] = run_pooled_lda_expt(\r\n data, calc_para, r_filt, i_expt_lda, dcopy(i_cell), n_trial_max, xi[i_xi], dcopy(n_sp)\r\n )\r\n\r\n #######################################\r\n #### HOUSE KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # retrieves the LDA solver parameter fields\r\n lda_para = calc_para['lda_para']\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # sets the solver parameters\r\n d_data.lda = 1\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max, ignore_list=['n_cell_min'])\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the phase offset/duration parametrs\r\n d_data.tofs = t_ofs\r\n d_data.tphase = t_phase\r\n d_data.usefull = calc_para['use_full_rot']\r\n\r\n # sets the other parameters/arrays\r\n d_data.nshuffle = n_sh\r\n d_data.poolexpt = calc_para['pool_expt']\r\n d_data.xi = xi\r\n\r\n # returns a true value indicating the calculations were successful\r\n return True\r\n\r\n def run_wght_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :param d_data:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n # initialisations and memory allocation\r\n d_data, w_prog = data.discrim.wght, self.work_progress\r\n if d_data.lda is not None:\r\n # if no change, then exit flagging the calculations are already done\r\n return True\r\n else:\r\n lda_para = calc_para['lda_para']\r\n\r\n #######################################\r\n #### LDA WEIGHT CALCULATIONS ####\r\n #######################################\r\n\r\n # initialisations\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n n_ex, n_tt, n_t, _r_filt = len(i_expt), len(r_filt['t_type']), dcopy(n_trial_max), dcopy(r_filt)\r\n p_wt, p_wex, xi = 1 / n_tt, 1 / n_ex, np.linspace(0, 1, 101)\r\n p_w = p_wt * p_wex\r\n\r\n # memory allocation\r\n A, B, C = np.empty((n_ex, n_tt), dtype=object), np.empty(n_ex, dtype=object), np.empty(n_tt, dtype=object)\r\n c_ind, c_wght0 = dcopy(A), dcopy(A)\r\n c_wght, y_top, y_bot = dcopy(C), dcopy(C), dcopy(C)\r\n\r\n # reduces down the data cluster to the valid experiments\r\n data_tmp = cfcn.reduce_cluster_data(data, i_expt, True)\r\n\r\n # sets the LDA solver type\r\n lda = cfcn.setup_lda_solver(lda_para)\r\n\r\n # creates a reduce data object and creates the rotation filter object\r\n for i_tt, tt in enumerate(r_filt['t_type']):\r\n # retrieves the rotation filter for the current\r\n _r_filt['t_type'] = [tt]\r\n r_obj = RotationFilteredData(data_tmp, _r_filt, None, None, True, 'Whole Experiment', False,\r\n t_ofs=t_ofs, t_phase=t_phase)\r\n\r\n # memory allocation\r\n y_acc_bot, y_acc_top, c_wght_ex = dcopy(B), dcopy(B), dcopy(B)\r\n\r\n # calculates the cell weight scores for each experiment\r\n for i_ex in range(n_ex):\r\n # updates the progress bar\r\n w_str = 'Weighting LDA ({0}, Expt {1}/{2}'.format(tt, i_ex + 1, n_ex)\r\n p_w0 = p_wt * (i_tt + p_wex * i_ex)\r\n\r\n # retrieves the spike counts for the current experiment\r\n n_sp, i_grp = cfcn.setup_lda_spike_counts(r_obj, i_cell[i_ex], i_ex, n_t, return_all=False)\r\n\r\n try:\r\n # normalises the spike counts and fits the lda model\r\n n_sp_norm = cfcn.norm_spike_counts(n_sp, 2 * n_t, lda_para['is_norm'])\r\n lda.fit(n_sp_norm, i_grp)\r\n except:\r\n if w_prog is not None:\r\n e_str = 'There was an error running the LDA analysis with the current solver parameters. ' \\\r\n 'Either choose a different solver or alter the solver parameters before retrying'\r\n w_prog.emit(e_str, 'LDA Analysis Error')\r\n return False\r\n\r\n # retrieves the coefficients from the LDA solver\r\n coef0 = dcopy(lda.coef_)\r\n coef0 /= np.max(np.abs(coef0))\r\n\r\n # sets the sorting indices and re-orders the weights\r\n c_ind[i_ex, i_tt] = np.argsort(-np.abs(coef0))[0]\r\n c_wght0[i_ex, i_tt] = coef0[0, c_ind[i_ex, i_tt]]\r\n n_sp = n_sp[:, c_ind[i_ex, i_tt]]\r\n\r\n # calculates the top/bottom removed cells lda performance\r\n y_acc_bot[i_ex] = cfcn.run_reducing_cell_lda(w_prog, lda, lda_para, n_sp, i_grp, p_w0, p_w/2, w_str, True)\r\n y_acc_top[i_ex] = cfcn.run_reducing_cell_lda(w_prog, lda, lda_para, n_sp, i_grp, p_w0+p_w/2, p_w/2, w_str)\r\n\r\n # calculates the interpolated bottom/top removed values\r\n c_wght[i_tt] = interp_arr(xi, np.abs(c_wght0[:, i_tt]))\r\n y_bot[i_tt], y_top[i_tt] = interp_arr(xi, y_acc_bot), interp_arr(xi, y_acc_top)\r\n\r\n #######################################\r\n #### HOUSE KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the solver parameters\r\n d_data.lda = 1\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the phase offset/duration parametrs\r\n d_data.tofs = t_ofs\r\n d_data.tphase = t_phase\r\n d_data.usefull = calc_para['use_full_rot']\r\n\r\n # sets the other parameters\r\n d_data.xi = xi\r\n d_data.c_ind = c_ind\r\n d_data.c_wght = c_wght\r\n d_data.c_wght0 = c_wght0\r\n d_data.y_acc_bot = y_bot\r\n d_data.y_acc_top = y_top\r\n\r\n # return the calculations were a success\r\n return True\r\n\r\n ##########################################\r\n #### KINEMATIC LDA CALCULATIONS ####\r\n ##########################################\r\n\r\n def run_speed_lda_accuracy(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n d_data = data.discrim.spdacc\r\n\r\n # reduces down the cluster data array\r\n _data = cfcn.reduce_cluster_data(data, i_expt, True)\r\n\r\n # sets up the kinematic LDA spiking frequency array\r\n w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)\r\n spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog)\r\n\r\n # case is the normal kinematic LDA\r\n if not cfcn.run_full_kinematic_lda(_data, dcopy(spd_sf), calc_para, _r_filt, n_trial, w_prog, d_data):\r\n # if there was an error then exit with a false flag\r\n return False\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the lda values\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n\r\n # returns a true value indicating success\r\n return True\r\n\r\n def run_kinematic_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):\r\n '''\r\n\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial:\r\n :param w_prog:\r\n :param d_data:\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n d_data = data.discrim.spdc\r\n\r\n # reduces down the cluster data array\r\n _data = cfcn.reduce_cluster_data(data, i_expt, True)\r\n\r\n # sets up the kinematic LDA spiking frequency array\r\n w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)\r\n spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog)\r\n\r\n # case is the normal kinematic LDA\r\n if not cfcn.run_kinematic_lda(_data, spd_sf, calc_para, _r_filt, n_trial, w_prog=w_prog, d_data=d_data):\r\n # if there was an error then exit with a false flag\r\n return False\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the lda values\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n\r\n # returns a true value indicating success\r\n return True\r\n\r\n def run_pooled_kinematic_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog, r_data_type='rotation'):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n d_data = data.discrim.spdcp\r\n tt, lda_para, n_shuff = r_filt['t_type'], calc_para['lda_para'], calc_para['n_shuffle']\r\n\r\n ###########################################\r\n #### PRE-PROCESSING CALCULATIONS ####\r\n ###########################################\r\n\r\n # reduces down the cluster data array\r\n _data = cfcn.reduce_cluster_data(data, i_expt, True)\r\n\r\n # sets up the kinematic LDA spiking frequency array\r\n w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)\r\n spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial,\r\n w_prog, is_pooled=calc_para['pool_expt'])\r\n\r\n ##############################################\r\n #### POOLED NEURON LDA CALCULATIONS ####\r\n ##############################################\r\n\r\n # retrieves the rotation data class\r\n r_data = _data.rotation\r\n\r\n # determines the cell pool groupings\r\n if calc_para['pool_expt']:\r\n n_cell, is_keep = cfcn.get_pool_cell_counts(data, lda_para), []\r\n else:\r\n n_cell_ex = [sum(x) for x in i_cell]\r\n n_cell = [x for x in cfcn.n_cell_pool1 if x <= np.max(n_cell_ex)]\r\n\r\n # memory allocation\r\n n_cell_pool = n_cell[-1]\r\n n_ex = 1 if calc_para['pool_expt'] else len(i_cell)\r\n nC, n_tt, n_xi = len(n_cell), len(tt), len(r_data.spd_xi)\r\n y_acc = [np.nan * np.ones((n_shuff, n_xi, nC, n_ex)) for _ in range(n_tt)]\r\n\r\n #\r\n for i_c, n_c in enumerate(n_cell):\r\n n_shuff_nw = n_shuff if (((i_c + 1) < nC) or (not calc_para['pool_expt'])) else 1\r\n for i_s in range(n_shuff_nw):\r\n # updates the progressbar\r\n w_str = 'Speed LDA (G:{0}/{1}, Sh:{2}/{3}'.format(i_c + 1, nC, i_s + 1, n_shuff_nw)\r\n pw0 = 100. * (i_c + (i_s / n_shuff_nw)) / nC\r\n\r\n while 1:\r\n # sets the new shuffled spiking frequency array (over all expt)\r\n if calc_para['pool_expt']:\r\n # case all cells are pooled over all experiments\r\n spd_sf_sh = [set_sf_cell_perm(dcopy(spd_sf), n_cell_pool, n_c)]\r\n\r\n else:\r\n # case all cells\r\n is_keep = np.array(n_cell_ex) >= n_c\r\n spd_sf_sh = [set_sf_cell_perm(x, n_ex, n_c) for x, n_ex, is_k in\r\n zip(dcopy(spd_sf), n_cell_ex, is_keep) if is_k]\r\n\r\n # runs the kinematic LDA on the new data\r\n n_ex_sh = 1 if calc_para['pool_expt'] else sum(is_keep)\r\n results = cfcn.run_kinematic_lda(_data, spd_sf_sh, calc_para, _r_filt, n_trial, w_prog=w_prog,\r\n w_str0=w_str, pw0=pw0)\r\n if not isinstance(results, bool):\r\n # if successful, then retrieve the accuracy values\r\n for i_tt in range(n_tt):\r\n for i_ex in range(n_ex_sh):\r\n y_acc[i_tt][i_s, :, i_c, i_ex] = results[0][i_ex, :, i_tt]\r\n\r\n # exits the loop\r\n break\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets a copy of the lda parameters and updates the comparison conditions\r\n _lda_para = dcopy(lda_para)\r\n _lda_para['comp_cond'] = r_data.r_obj_kine.rot_filt['t_type']\r\n\r\n # sets the lda values\r\n d_data.lda = 1\r\n d_data.y_acc = y_acc\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n d_data.n_cell = n_cell\r\n d_data.exp_name = [os.path.splitext(os.path.basename(x['expFile']))[0] for x in _data.cluster]\r\n d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')\r\n\r\n # sets the rotation values\r\n d_data.spd_xi = r_data.spd_xi\r\n d_data.i_bin_spd = r_data.i_bin_spd\r\n\r\n # sets the solver parameters\r\n cfcn.set_lda_para(d_data, _lda_para, r_filt, n_trial)\r\n\r\n # sets the phase duration/offset parameters\r\n d_data.spd_xrng = calc_para['spd_x_rng']\r\n d_data.vel_bin = calc_para['vel_bin']\r\n d_data.n_sample = calc_para['n_sample']\r\n d_data.equal_time = calc_para['equal_time']\r\n d_data.nshuffle = calc_para['n_shuffle']\r\n d_data.poolexpt = calc_para['pool_expt']\r\n\r\n # returns a true value indicating success\r\n return True\r\n\r\n def run_speed_dir_lda_accuracy(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):\r\n '''\r\n\r\n :param calc_para:\r\n :param r_filt:\r\n :param i_expt:\r\n :param i_cell:\r\n :param n_trial_max:\r\n :param w_prog:\r\n :return:\r\n '''\r\n\r\n # initialisations\r\n d_data = data.discrim.spddir\r\n\r\n # reduces down the cluster data array\r\n _data = cfcn.reduce_cluster_data(data, i_expt, True)\r\n\r\n # sets up the kinematic LDA spiking frequency array\r\n w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)\r\n vel_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog, use_spd=False)\r\n\r\n # case is the normal kinematic LDA\r\n if not cfcn.run_vel_dir_lda(_data, dcopy(vel_sf), calc_para, _r_filt, n_trial, w_prog, d_data):\r\n # if there was an error then exit with a false flag\r\n return False\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the lda values\r\n d_data.i_expt = i_expt\r\n d_data.i_cell = i_cell\r\n\r\n # returns a true value indicating success\r\n return True\r\n\r\n ######################################\r\n #### ROC CURVE CALCULATIONS ####\r\n ######################################\r\n\r\n def calc_partial_roc_curves(self, data, calc_para, plot_para, pW, r_data=None):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param plot_para:\r\n :param pW:\r\n :return:\r\n '''\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # memory allocation\r\n r_data.part_roc, r_data.part_roc_xy, r_data.part_roc_auc = {}, {}, {}\r\n\r\n # initisalises the rotational filter (if not initialised already)\r\n if plot_para['rot_filt'] is None:\r\n plot_para['rot_filt'] = cf.init_rotation_filter_data(False)\r\n\r\n # calculates the partial roc curves for each of the trial conditions\r\n for tt in plot_para['rot_filt']['t_type']:\r\n # if tt not in r_data.part_roc:\r\n r_data.part_roc[tt], r_data.part_roc_xy[tt], r_data.part_roc_auc[tt] = \\\r\n self.calc_phase_roc_curves(data, calc_para, pW, t_type=tt, r_data=None)\r\n\r\n def calc_phase_roc_curves(self, data, calc_para, pW, t_type=None, r_data=None):\r\n '''\r\n\r\n :param calc_para:\r\n :param plot_para:\r\n :param data:\r\n :param pool:\r\n :return:\r\n '''\r\n\r\n # parameters and initialisations\r\n phase_str = ['CW/BL', 'CCW/BL', 'CCW/CW']\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # if the black phase is calculated already, then exit the function\r\n if (r_data.phase_roc is not None) and (t_type is None):\r\n return\r\n\r\n # retrieves the offset parameters\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # sets up the black phase data filter and returns the time spikes\r\n r_filt = cf.init_rotation_filter_data(False)\r\n\r\n if t_type is None:\r\n r_data.r_obj_black = r_obj = RotationFilteredData(data, r_filt, 0, None, True, 'Whole Experiment', False,\r\n t_phase=t_phase, t_ofs=t_ofs)\r\n else:\r\n r_filt['t_type'] = [t_type]\r\n r_obj = RotationFilteredData(data, r_filt, 0, None, True, 'Whole Experiment', False,\r\n t_phase=t_phase, t_ofs=t_ofs)\r\n\r\n # retrieves the time spikes and sets the roc class fields for update\r\n t_spike = r_obj.t_spike[0]\r\n\r\n # memory allocation\r\n n_cell = np.size(t_spike, axis=0)\r\n roc = np.empty((n_cell, len(phase_str)), dtype=object)\r\n roc_xy = np.empty(n_cell, dtype=object)\r\n roc_auc = np.ones((n_cell, len(phase_str)))\r\n\r\n # calculates the roc curves/integrals for all cells over each phase\r\n for i_phs, p_str in enumerate(phase_str):\r\n # updates the progress bar string\r\n w_str = 'ROC Curve Calculations ({0})...'.format(p_str)\r\n self.work_progress.emit(w_str, pW * i_phs / len(phase_str))\r\n\r\n # calculates the bootstrapped confidence intervals for each cell\r\n ind = np.array([1 * (i_phs > 1), 1 + (i_phs > 0)])\r\n for i_cell in range(n_cell):\r\n # calculates the roc curve/auc integral\r\n roc[i_cell, i_phs] = cf.calc_roc_curves(t_spike[i_cell, :, :], ind=ind)\r\n roc_auc[i_cell, i_phs] = cf.get_roc_auc_value(roc[i_cell, i_phs])\r\n\r\n # if the CW/CCW phase interaction, then set the roc curve x/y coordinates\r\n if (i_phs + 1) == len(phase_str):\r\n roc_xy[i_cell] = cf.get_roc_xy_values(roc[i_cell, i_phs])\r\n\r\n # case is the rotation (black) condition\r\n if t_type is None:\r\n r_data.phase_roc, r_data.phase_roc_xy, r_data.phase_roc_auc = roc, roc_xy, roc_auc\r\n else:\r\n return roc, roc_xy, roc_auc\r\n\r\n def calc_ud_roc_curves(self, data, r_obj_vis, ind_type, pW, r_data=None):\r\n '''\r\n\r\n :param data:\r\n :param r_obj_vis:\r\n :param calc_para:\r\n :param pW:\r\n :return:\r\n '''\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # parameters and initialisations\r\n t_spike = r_obj_vis.t_spike\r\n phase_str, ind = ['CW/BL', 'CCW/BL', 'CCW/CW'], np.array([0, 1])\r\n\r\n # array indexing values\r\n n_filt = round(r_obj_vis.n_filt / 2)\r\n n_trial = min([np.shape(x)[1] for x in t_spike])\r\n n_cell_expt = [x['nC'] for x in np.array(data.cluster)[cf.det_valid_rotation_expt(data, is_ud=True)]]\r\n n_cell = sum(n_cell_expt)\r\n\r\n # sets up the global index arrays\r\n i_ofs = np.concatenate(([0], np.cumsum(n_cell_expt[:-1])))\r\n i_cell_g = [i0 + np.arange(nC) for i0, nC in zip(i_ofs, n_cell_expt) if nC > 0]\r\n\r\n # if the uniformdrifting phase is calculated already, then exit the function\r\n if r_data.phase_roc_ud is not None:\r\n return\r\n\r\n # memory allocation\r\n roc = np.empty((n_cell, len(phase_str)), dtype=object)\r\n roc_xy = np.empty(n_cell, dtype=object)\r\n roc_auc = np.ones((n_cell, len(phase_str)))\r\n\r\n for i_filt in range(n_filt):\r\n # sets the time spike array and global cell indices array\r\n ind_CC, ind_CCW = ind_type[0][i_filt], ind_type[1][i_filt]\r\n ig_cell = cf.flat_list([ig[ind] for ig, ind in zip(i_cell_g, r_obj_vis.clust_ind[i_filt])])\r\n\r\n # sets the number of cells to be analysed for the current filter\r\n n_cell_f = np.shape(t_spike[ind_CC])[0]\r\n\r\n # calculates the roc curves/integrals for all cells over each phase\r\n for i_phs, p_str in enumerate(phase_str):\r\n # updates the progress bar string\r\n w_str = 'ROC Curve Calculations ({0})...'.format(p_str)\r\n self.work_progress.emit(w_str, 100 * pW * ((i_filt / n_filt) + (i_phs / len(phase_str))))\r\n\r\n # loops through each of the cells calculating the roc curves (and associated values)\r\n for i_cell in range(n_cell_f):\r\n # sets the time spike arrays depending on the phase type\r\n if (i_phs + 1) == len(phase_str):\r\n t_spike_phs = np.vstack((t_spike[ind_CC][i_cell, :n_trial, 1],\r\n t_spike[ind_CCW][i_cell, :n_trial, 1])).T\r\n else:\r\n t_spike_phs = t_spike[ind_type[i_phs][i_filt]][i_cell, :, :]\r\n\r\n # calculates the roc curve/auc integral\r\n ig_nw = int(ig_cell[i_cell])\r\n roc[ig_nw, i_phs] = cf.calc_roc_curves(t_spike_phs, ind=np.array([0, 1]))\r\n roc_auc[ig_nw, i_phs] = cf.get_roc_auc_value(roc[ig_nw, i_phs])\r\n\r\n # if the CW/CCW phase interaction, then set the roc curve x/y coordinates\r\n if (i_phs + 1) == len(phase_str):\r\n roc_xy[ig_nw] = cf.get_roc_xy_values(roc[ig_nw, i_phs])\r\n\r\n # sets the final\r\n r_data.phase_roc_ud, r_data.phase_roc_xy_ud, r_data.phase_roc_auc_ud = roc, roc_xy, roc_auc\r\n\r\n def calc_cond_roc_curves(self, data, pool, calc_para, plot_para, g_para, calc_cell_grp, pW,\r\n force_black_calc=False, r_data=None):\r\n '''\r\n\r\n :param calc_para:\r\n :param plot_para:\r\n :param data:\r\n :param pool:\r\n :return:\r\n '''\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # parameters and initialisations\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n r_obj_sig, plot_scope, c_lvl = None, 'Whole Experiment', float(g_para['roc_clvl'])\r\n phase_str = ['CW/BL', 'CCW/BL', 'CCW/CW']\r\n\r\n # initisalises the rotational filter (if not initialised already)\r\n if plot_para['rot_filt'] is None:\r\n plot_para['rot_filt'] = cf.init_rotation_filter_data(False)\r\n\r\n # sets the condition types (ensures that the black phase is always included)\r\n t_type = dcopy(plot_para['rot_filt']['t_type'])\r\n if 'Black' not in t_type:\r\n t_type = ['Black'] + t_type\r\n\r\n if 'vis_expt_type' in calc_para:\r\n if calc_para['vis_expt_type'] == 'MotorDrifting':\r\n t_type += ['MotorDrifting']\r\n\r\n # retrieves the rotation phase offset time/duration\r\n if t_ofs is not None:\r\n # if the values are not none, and do not match previous values, then reset the stored roc array\r\n if (r_data.t_ofs_rot != t_ofs) or (r_data.t_phase_rot != t_phase):\r\n r_data.t_ofs_rot, r_data.t_phase_rot, r_data.cond_roc = t_ofs, t_phase, None\r\n elif 'use_full_rot' in calc_para:\r\n # if using the full rotation, and the previous calculations were made using non-full rotation phases,\r\n # the reset the stored roc array\r\n if (r_data.t_ofs_rot > 0):\r\n r_data.t_ofs_rot, r_data.t_phase_rot, r_data.cond_roc = -1, -1, None\r\n\r\n # sets up a base filter with only the\r\n r_filt_base = cf.init_rotation_filter_data(False)\r\n r_filt_base['t_type'] = [x for x in t_type if x != 'UniformDrifting']\r\n\r\n # sets up the black phase data filter and returns the time spikes\r\n r_obj = RotationFilteredData(data, r_filt_base, None, plot_para['plot_exp_name'], True, plot_scope, False,\r\n t_ofs=t_ofs, t_phase=t_phase)\r\n if not r_obj.is_ok:\r\n # if there was an error, then output an error to screen\r\n self.work_error.emit(r_obj.e_str, 'Incorrect Analysis Function Parameters')\r\n return False\r\n\r\n # memory allocation (if the conditions have not been set)\r\n if r_data.cond_roc is None:\r\n r_data.cond_roc, r_data.cond_roc_xy, r_data.cond_roc_auc = {}, {}, {}\r\n r_data.cond_gtype, r_data.cond_auc_sig, r_data.cond_i_expt, r_data.cond_cl_id = {}, {}, {}, {}\r\n r_data.cond_ci_lo, r_data.cond_ci_hi, r_data.r_obj_cond = {}, {}, {}\r\n r_data.phase_gtype, r_data.phase_auc_sig, r_data.phase_roc = None, None, None\r\n\r\n for i_rr, rr in enumerate(r_obj.rot_filt_tot):\r\n # sets the trial type\r\n tt = rr['t_type'][0]\r\n\r\n # updates the progress bar string\r\n w_str = 'ROC Curve Calculations ({0})...'.format(tt)\r\n self.work_progress.emit(w_str, pW * (i_rr / r_obj.n_filt))\r\n\r\n if tt not in r_data.cond_roc:\r\n # array dimensions\r\n t_spike = r_obj.t_spike[i_rr]\r\n n_cell = np.size(t_spike, axis=0)\r\n\r\n # memory allocation and initialisations\r\n r_data.cond_roc[tt] = np.empty((n_cell, 3), dtype=object)\r\n r_data.cond_roc_xy[tt] = np.empty(n_cell, dtype=object)\r\n r_data.cond_roc_auc[tt] = np.zeros((n_cell, 3))\r\n r_data.cond_gtype[tt] = -np.ones((n_cell, 3))\r\n r_data.cond_auc_sig[tt] = np.zeros((n_cell, 3), dtype=bool)\r\n r_data.cond_i_expt[tt] = r_obj.i_expt[i_rr]\r\n r_data.cond_cl_id[tt] = r_obj.cl_id[i_rr]\r\n r_data.cond_ci_lo[tt] = -np.ones((n_cell, 2))\r\n r_data.cond_ci_hi[tt] = -np.ones((n_cell, 2))\r\n r_data.r_obj_cond[tt] = dcopy(r_obj)\r\n\r\n # calculates the roc curves/integrals for all cells over each phase\r\n for i_phs, p_str in enumerate(phase_str):\r\n # updates the progress bar string\r\n self.work_progress.emit(w_str, pW * ((i_rr / r_obj.n_filt) + (i_phs / len(phase_str))))\r\n\r\n # calculates the roc curve values for each phase\r\n ind = np.array([1 * (i_phs > 1), 1 + (i_phs > 0)])\r\n for ic in range(n_cell):\r\n r_data.cond_roc[tt][ic, i_phs] = cf.calc_roc_curves(t_spike[ic, :, :], ind=ind)\r\n r_data.cond_roc_auc[tt][ic, i_phs] = cf.get_roc_auc_value(r_data.cond_roc[tt][ic, i_phs])\r\n\r\n if (i_phs + 1) == len(phase_str):\r\n r_data.cond_roc_xy[tt][ic] = cf.get_roc_xy_values(r_data.cond_roc[tt][ic, i_phs])\r\n\r\n # calculates the confidence intervals for the current (only if bootstrapping count has changed or the\r\n # confidence intervals has not already been calculated)\r\n if 'auc_stype' in calc_para:\r\n # updates the auc statistics calculation type\r\n r_data.cond_auc_stats_type = calc_para['auc_stype']\r\n\r\n # determine if the auc confidence intervals need calculation\r\n is_boot = int(calc_para['auc_stype'] == 'Bootstrapping')\r\n if is_boot:\r\n # if bootstrapping, then determine if the\r\n if r_data.n_boot_cond_ci != calc_para['n_boot']:\r\n # if the bootstrapping count has changed, flag that the confidence intervals needs updating\r\n r_data.n_boot_cond_ci, calc_ci = calc_para['n_boot'], True\r\n else:\r\n # otherwise, recalculate the confidence intervals if they have not been set\r\n calc_ci = np.any(r_data.cond_ci_lo[tt][:, 1] < 0)\r\n else:\r\n # otherwise, recalculate the confidence intervals if they have not been set\r\n calc_ci = np.any(r_data.cond_ci_lo[tt][:, 0] < 0)\r\n\r\n # calculates the confidence intervals (if required)\r\n if calc_ci:\r\n conf_int = self.calc_roc_conf_intervals(pool, r_data.cond_roc[tt][:, 2],\r\n calc_para['auc_stype'], calc_para['n_boot'], c_lvl)\r\n r_data.cond_ci_lo[tt][:, is_boot] = conf_int[:, 0]\r\n r_data.cond_ci_hi[tt][:, is_boot] = conf_int[:, 1]\r\n\r\n # if not calculating the cell group indices, or the condition type is Black (the phase statistics for\r\n # this condition are already calculated in \"calc_phase_roc_significance\"), then continue\r\n if (not calc_cell_grp) or ((tt == 'Black') and (not force_black_calc)):\r\n continue\r\n\r\n # sets the rotation object filter (if using wilcoxon paired test for the cell group stats type)\r\n if calc_para['grp_stype'] == 'Wilcoxon Paired Test':\r\n if np.all(r_data.cond_gtype[tt][:, 0] >= 0):\r\n # if all the values have been calculated, then exit the function\r\n continue\r\n\r\n # sets the rotation object for the current condition\r\n r_obj_sig = RotationFilteredData(data, r_obj.rot_filt_tot[i_rr], None, plot_para['plot_exp_name'],\r\n True, plot_scope, False, t_ofs=t_ofs, t_phase=t_phase)\r\n if not r_obj_sig.is_ok:\r\n # if there was an error, then output an error to screen\r\n self.work_error.emit(r_obj_sig.e_str, 'Incorrect Analysis Function Parameters')\r\n return False\r\n\r\n # calculates the condition cell group types\r\n self.calc_phase_roc_significance(calc_para, g_para, data, pool, None, c_type='cond',\r\n roc=r_data.cond_roc[tt], auc=r_data.cond_roc_auc[tt],\r\n g_type=r_data.cond_gtype[tt], auc_sig=r_data.cond_auc_sig[tt],\r\n r_obj=r_obj_sig)\r\n\r\n # returns a true value\r\n return True\r\n\r\n def calc_phase_roc_significance(self, calc_para, g_para, data, pool, pW, c_type='phase',\r\n roc=None, auc=None, g_type=None, auc_sig=None, r_obj=None, r_data=None):\r\n '''\r\n\r\n :param calc_data:\r\n :param data:\r\n :param pool:\r\n :return:\r\n '''\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # sets the roc objects/integrals (if not provided)\r\n c_lvl = float(g_para['roc_clvl'])\r\n if c_type == 'phase':\r\n # case is the significance tests are being calculated for the phase\r\n r_data.phase_grp_stats_type = calc_para['grp_stype']\r\n roc, auc, r_obj = r_data.phase_roc, r_data.phase_roc_auc, r_data.r_obj_black\r\n else:\r\n # case is the significance tests are being calculated for the conditions\r\n r_data.cond_grp_stats_type = calc_para['grp_stype']\r\n\r\n # parameters and initialisations\r\n phase_str, i_col = ['CW/BL', 'CCW/BL', 'CCW/CW'], 0\r\n p_value, n_cell = 0.05, np.size(roc, axis=0)\r\n\r\n # allocates memory for the group-types (if not already calculated)\r\n if c_type == 'phase':\r\n # case is for the phase type\r\n n_boot = r_data.n_boot_phase_grp\r\n if r_data.phase_gtype is None:\r\n # group type has not been set, so initialise the array\r\n r_data.phase_gtype = g_type = -np.ones((n_cell, 3))\r\n r_data.phase_auc_sig = auc_sig = np.zeros((n_cell, 3), dtype=bool)\r\n else:\r\n # otherwise, retrieve the currently stored array\r\n g_type, auc_sig = r_data.phase_gtype, r_data.phase_auc_sig\r\n else:\r\n # case is for the condition type\r\n n_boot = r_data.n_boot_cond_grp\r\n\r\n #########################################\r\n #### WILCOXON STATISTICAL TEST ####\r\n #########################################\r\n\r\n if calc_para['grp_stype'] == 'Wilcoxon Paired Test':\r\n # if the statistics have already been calculated, then exit the function\r\n if np.all(g_type[:, 0] >= 0):\r\n return\r\n\r\n # updates the progress bar string\r\n if pW is not None:\r\n self.work_progress.emit('Calculating Wilcoxon Stats...', pW + 25.)\r\n\r\n # calculates the statistical significance between the phases\r\n sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj)\r\n _, _, sf_stats, _ = cf.setup_spike_freq_plot_arrays(r_obj, sp_f0, sp_f, None)\r\n\r\n # determines which cells are motion/direction sensitive\r\n for i_phs in range(len(sf_stats)):\r\n auc_sig[:, i_phs] = sf_stats[i_phs] < p_value\r\n\r\n ##########################################\r\n #### ROC-BASED STATISTICAL TEST ####\r\n ##########################################\r\n\r\n else:\r\n # determines what kind of statistics are to be calculated\r\n is_boot = calc_para['grp_stype'] == 'Bootstrapping'\r\n i_col, phase_stype = 1 + is_boot, calc_para['grp_stype']\r\n\r\n # if the statistics have been calculated for the selected type, then exit the function\r\n if is_boot:\r\n if np.all(g_type[:, 2] >= 0) and (calc_para['n_boot'] == n_boot):\r\n # if bootstrapping is selected, but all values have been calculated and the bootstrapping values\r\n # has not changed, then exit the function\r\n return\r\n else:\r\n # otherwise, update the bootstrapping count\r\n if c_type == 'phase':\r\n r_data.n_boot_phase_grp = dcopy(calc_para['n_boot'])\r\n else:\r\n r_data.n_boot_cond_grp = dcopy(calc_para['n_boot'])\r\n\r\n elif np.all(g_type[:, 1] >= 0):\r\n # if delong significance is selected, and all values have been calculated, then exit the function\r\n return\r\n\r\n # calculates the significance for each phase\r\n for i_phs, p_str in enumerate(phase_str):\r\n # updates the progress bar string\r\n if pW is not None:\r\n w_str = 'ROC Curve Calculations ({0})...'.format(p_str)\r\n self.work_progress.emit(w_str, pW * (1. + i_phs / len(phase_str)))\r\n\r\n # calculates the confidence intervals for the current\r\n conf_int = self.calc_roc_conf_intervals(pool, roc[:, i_phs], phase_stype, n_boot, c_lvl)\r\n\r\n # determines the significance for each cell in the phase\r\n auc_ci_lo = (auc[:, i_phs] + conf_int[:, 1]) < 0.5\r\n auc_ci_hi = (auc[:, i_phs] - conf_int[:, 0]) > 0.5\r\n auc_sig[:, i_phs] = np.logical_or(auc_ci_lo, auc_ci_hi)\r\n\r\n # calculates the cell group types\r\n g_type[:, i_col] = cf.calc_cell_group_types(auc_sig, calc_para['grp_stype'])\r\n\r\n def calc_dirsel_group_types(self, data, pool, calc_para, plot_para, g_para, r_data=None):\r\n '''\r\n\r\n :param data:\r\n :param plot_para:\r\n :return:\r\n '''\r\n\r\n def calc_combined_spiking_stats(r_data, r_obj, pool, calc_para, g_para, p_value, ind_type=None,\r\n t_type='Black'):\r\n '''\r\n\r\n :param r_obj:\r\n :param ind_type:\r\n :return:\r\n '''\r\n\r\n # calculates the individual trial/mean spiking rates and sets up the plot/stats arrays\r\n sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj)\r\n s_plt, _, sf_stats, i_grp = cf.setup_spike_freq_plot_arrays(r_obj, sp_f0, sp_f, ind_type)\r\n\r\n # calculates the CW/CCW spiking frequency ratio\r\n r_CCW_CW = np.array(s_plt[2][1]) / np.array(s_plt[2][0])\r\n\r\n #########################################\r\n #### WILCOXON STATISTICAL TEST ####\r\n #########################################\r\n\r\n if calc_para['grp_stype'] == 'Wilcoxon Paired Test':\r\n # case is the wilcoxon paired test\r\n sf_scores = cf.calc_ms_scores(s_plt, sf_stats, p_value)\r\n\r\n ##########################################\r\n #### ROC-BASED STATISTICAL TEST ####\r\n ##########################################\r\n\r\n else:\r\n # determines what kind of statistics are to be calculated\r\n phase_stype = calc_para['grp_stype']\r\n is_boot, n_boot = calc_para['grp_stype'] == 'Bootstrapping', calc_para['n_boot']\r\n phase_str, c_lvl, pW = ['CW/BL', 'CCW/BL', 'CCW/CW'], float(g_para['roc_clvl']), 100.\r\n\r\n # retrieves the roc/auc fields (depending on the type)\r\n if t_type == 'Black':\r\n # case is the black (rotation) condition\r\n roc, auc = r_data.phase_roc, r_data.phase_roc_auc\r\n elif t_type == 'UniformDrifting':\r\n # case is the uniformdrifting (visual) condition\r\n roc, auc = r_data.phase_roc_ud, r_data.phase_roc_auc_ud\r\n else:\r\n # case is the motordrifting (visual) condition\r\n roc, auc = r_data.cond_roc['MotorDrifting'], r_data.cond_roc_auc['MotorDrifting']\r\n\r\n # REMOVE ME LATER?\r\n c_lvl = 0.95\r\n\r\n # if the statistics have been calculated for the selected type, then exit the function\r\n if is_boot:\r\n # otherwise, update the bootstrapping count\r\n r_data.n_boot_comb_grp = dcopy(calc_para['n_boot'])\r\n\r\n # calculates the significance for each phase\r\n auc_sig = np.zeros((np.size(roc, axis=0), 3), dtype=bool)\r\n for i_phs, p_str in enumerate(phase_str):\r\n # updates the progress bar string\r\n if pW is not None:\r\n w_str = 'ROC Curve Calculations ({0})...'.format(p_str)\r\n self.work_progress.emit(w_str, pW * (i_phs / len(phase_str)))\r\n\r\n # calculates the confidence intervals for the current\r\n conf_int = self.calc_roc_conf_intervals(pool, roc[:, i_phs], phase_stype, n_boot, c_lvl)\r\n\r\n # determines the significance for each cell in the phase\r\n auc_ci_lo = (auc[:, i_phs] + conf_int[:, 1]) < 0.5\r\n auc_ci_hi = (auc[:, i_phs] - conf_int[:, 0]) > 0.5\r\n auc_sig[:, i_phs] = np.logical_or(auc_ci_lo, auc_ci_hi)\r\n\r\n # case is the wilcoxon paired test\r\n sf_scores = np.zeros((np.size(roc, axis=0), 3), dtype=int)\r\n for ig in i_grp:\r\n sf_scores[ig, :] = cf.calc_ms_scores(auc[ig, :], auc_sig[ig, :], None)\r\n\r\n # returns the direction selectivity scores\r\n return sf_scores, i_grp, r_CCW_CW\r\n\r\n def det_dirsel_cells(sf_score, grp_stype):\r\n '''\r\n\r\n :param sf_score:\r\n :return:\r\n '''\r\n\r\n # calculates the minimum/sum scores\r\n if grp_stype == 'Wilcoxon Paired Test':\r\n score_min, score_sum = np.min(sf_score[:, :2], axis=1), np.sum(sf_score[:, :2], axis=1)\r\n\r\n # determines the direction selective cells, which must meet the following conditions:\r\n # 1) one direction only produces a significant result, OR\r\n # 2) both directions are significant AND the CW/CCW comparison is significant\r\n one_dir_sig = np.logical_and(score_min == 0, score_sum > 0) # cells where one direction is significant\r\n both_dir_sig = np.min(sf_score[:, :2], axis=1) > 0 # cells where both CW/CCW is significant\r\n comb_dir_sig = sf_score[:, -1] > 0 # cells where CW/CCW difference is significant\r\n\r\n # determines which cells are direction selective (removes non-motion sensitive cells)\r\n return np.logical_or(one_dir_sig, np.logical_and(both_dir_sig, comb_dir_sig)).astype(int)\r\n else:\r\n # case is the roc analysis statistics (only consider the CW/CCW comparison for ds)\r\n return sf_score[:, 2] > 0\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # initialises the rotation filter (if not set)\r\n rot_filt = plot_para['rot_filt']\r\n if rot_filt is None:\r\n rot_filt = cf.init_rotation_filter_data(False)\r\n\r\n # sets the p-value\r\n if 'p_value' in calc_para:\r\n p_val = calc_para['p_value']\r\n else:\r\n p_val = 0.05\r\n\r\n # initialisations and memory allocation\r\n p_scope, n_grp, r_data, grp_stype = 'Whole Experiment', 4, r_data, calc_para['grp_stype']\r\n # r_filt_rot, r_filt_vis = dcopy(rot_filt), dcopy(rot_filt)\r\n plot_exp_name, plot_all_expt = plot_para['plot_exp_name'], plot_para['plot_all_expt']\r\n r_data.ds_p_value = dcopy(p_val)\r\n\r\n t_ofs_rot, t_phase_rot = cfcn.get_rot_phase_offsets(calc_para)\r\n t_ofs_vis, t_phase_vis = cfcn.get_rot_phase_offsets(calc_para, True)\r\n\r\n # determines what type of visual experiment is being used for comparison (if provided)\r\n if 'vis_expt_type' in calc_para:\r\n # case is a calculation parameter is set\r\n ud_rot_expt = calc_para['vis_expt_type'] == 'UniformDrifting'\r\n else:\r\n # case is no calculation parameter is set, so use uniform drifting\r\n ud_rot_expt = True\r\n\r\n # sets up the black-only rotation filter object\r\n r_filt_black = cf.init_rotation_filter_data(False)\r\n r_obj_black = RotationFilteredData(data, r_filt_black, None, plot_exp_name, plot_all_expt, p_scope, False,\r\n t_ofs=t_ofs_rot, t_phase=t_phase_rot)\r\n\r\n # retrieves the rotational filtered data (black conditions only)\r\n r_filt_rot = cf.init_rotation_filter_data(False)\r\n r_data.r_obj_rot_ds = RotationFilteredData(data, r_filt_rot, None, plot_exp_name, plot_all_expt,\r\n p_scope, False)\r\n\r\n # retrieves the visual filtered data\r\n r_filt_vis = cf.init_rotation_filter_data(True)\r\n if ud_rot_expt:\r\n # sets the visual phase/offset\r\n if t_phase_vis is None:\r\n # if the phase duration is not set\r\n t_phase_vis, t_ofs_vis = 2., 0.\r\n elif (t_phase_vis + t_ofs_vis) > 2:\r\n # output an error to screen\r\n e_str = 'The entered analysis duration and offset is greater than the experimental phase duration:\\n\\n' \\\r\n ' * Analysis Duration + Offset = {0}\\n s. * Experiment Phase Duration = {1} s.\\n\\n' \\\r\n 'Enter a correct analysis duration/offset combination before re-running ' \\\r\n 'the function.'.format(t_phase_vis + t_ofs_vis, 2.0)\r\n self.work_error.emit(e_str, 'Incorrect Analysis Function Parameters')\r\n\r\n # return a false value indicating the calculation is invalid\r\n return False\r\n\r\n # case is uniform-drifting experiments (split into CW/CCW phases)\r\n r_filt_vis['t_type'], r_filt_vis['is_ud'], r_filt_vis['t_cycle'] = ['UniformDrifting'], [True], ['15']\r\n r_data.r_obj_vis, ind_type = cf.split_unidrift_phases(data, r_filt_vis, None, plot_exp_name, plot_all_expt,\r\n p_scope, t_phase_vis, t_ofs_vis)\r\n\r\n if (r_data.phase_roc_ud is None) and ('Wilcoxon' not in calc_para['grp_stype']):\r\n self.calc_ud_roc_curves(data, r_data.r_obj_vis, ind_type, 66.)\r\n\r\n else:\r\n # case is motor-drifting experiments\r\n\r\n # retrieves the filtered data from the loaded datasets\r\n r_filt_vis['t_type'], r_filt_vis['is_ud'], ind_type = ['MotorDrifting'], [False], None\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para, is_vis=True)\r\n\r\n # runs the rotation filter\r\n r_data.r_obj_vis = RotationFilteredData(data, r_filt_vis, None, plot_exp_name, plot_all_expt,\r\n p_scope, False, t_ofs=t_ofs, t_phase=t_phase)\r\n if not r_data.r_obj_vis.is_ok:\r\n # if there was an error, then output an error to screen\r\n self.work_error.emit(r_data.r_obj_vis.e_str, 'Incorrect Analysis Function Parameters')\r\n return False\r\n\r\n # calculate the visual/rotation stats scores\r\n sf_score_rot, i_grp_rot, r_CCW_CW_rot = calc_combined_spiking_stats(r_data, r_data.r_obj_rot_ds, pool,\r\n calc_para, g_para, p_val)\r\n\r\n sf_score_vis, i_grp_vis, r_CCW_CW_vis = calc_combined_spiking_stats(r_data, r_data.r_obj_vis, pool,\r\n calc_para, g_para, p_val, ind_type,\r\n r_filt_vis['t_type'][0])\r\n\r\n # memory allocation\r\n ds_type_tmp, ms_type_tmp, pd_type_tmp = [], [], []\r\n r_data.ms_gtype_N, r_data.ds_gtype_N, r_data.pd_type_N = [], [], []\r\n A = np.empty(len(i_grp_rot), dtype=object)\r\n r_data.ds_gtype_ex, r_data.ms_gtype_ex, r_data.pd_type_ex = dcopy(A), dcopy(A), dcopy(A)\r\n r_data.ds_gtype_comb, r_data.ms_gtype_comb = dcopy(A), dcopy(A)\r\n\r\n # reduces the arrays to the matching cells\r\n for i in range(len(i_grp_rot)):\r\n if len(i_grp_rot[i]):\r\n # retrieves the matching rotation/visual indices\r\n ind_rot, ind_vis = cf.det_cell_match_indices(r_data.r_obj_rot_ds, i, r_data.r_obj_vis)\r\n\r\n # determines the motion sensitivity from the score phase types (append proportion/N-value arrays)\r\n # 0 = None\r\n # 1 = Rotation Only\r\n # 2 = Visual Only\r\n # 3 = Both\r\n _sf_score_rot = sf_score_rot[i_grp_rot[i][ind_rot]][:, :-1]\r\n _sf_score_vis = sf_score_vis[i_grp_vis[i][ind_vis]][:, :-1]\r\n ms_gtype_comb = (np.sum(_sf_score_rot, axis=1) > 0) + 2 * (np.sum(_sf_score_vis, axis=1) > 0)\r\n ms_type_tmp.append(cf.calc_rel_prop(ms_gtype_comb, 4))\r\n r_data.ms_gtype_N.append(len(ind_rot))\r\n\r\n # determines the direction selectivity type from the score phase types (append proportion/N-value arrays)\r\n # 0 = None\r\n # 1 = Rotation Only\r\n # 2 = Visual Only\r\n # 3 = Both\r\n is_ds_rot = det_dirsel_cells(sf_score_rot[i_grp_rot[i][ind_rot]], calc_para['grp_stype'])\r\n is_ds_vis = det_dirsel_cells(sf_score_vis[i_grp_vis[i][ind_vis]], calc_para['grp_stype'])\r\n ds_gtype_comb = is_ds_rot.astype(int) + 2 * is_ds_vis.astype(int)\r\n ds_type_tmp.append(cf.calc_rel_prop(ds_gtype_comb, 4))\r\n r_data.ds_gtype_N.append(len(ind_rot))\r\n\r\n # determines which cells have significance for both rotation/visual stimuli. from this determine the\r\n # preferred direction from the CW vs CCW spiking rates\r\n is_both_ds = ds_gtype_comb == 3\r\n r_CCW_CW_comb = np.vstack((r_CCW_CW_rot[i_grp_rot[i][ind_rot]][is_both_ds],\r\n r_CCW_CW_vis[i_grp_vis[i][ind_vis]][is_both_ds])).T\r\n\r\n # determines the preferred direction type (for clusters which have BOTH rotation and visual significance)\r\n # 0 = Incongruent (preferred direction is the same)\r\n # 1 = Congruent (preferred direction is different)\r\n pd_type = np.zeros(sum(is_both_ds), dtype=int)\r\n pd_type[np.sum(r_CCW_CW_comb > 1, axis=1) == 1] = 1\r\n\r\n # calculates the preferred direction type count/proportions\r\n r_data.pd_type_N.append(cf.calc_rel_count(pd_type, 2))\r\n pd_type_tmp.append(cf.calc_rel_prop(pd_type, 2))\r\n\r\n # sets the indices of the temporary group type into the total array\r\n ind_bl, ind_bl_rot = cf.det_cell_match_indices(r_obj_black, [0, i], r_data.r_obj_rot_ds)\r\n ind_comb = ind_bl[np.searchsorted(ind_bl_rot, ind_rot)]\r\n\r\n # sets the indices for each experiment\r\n i_expt0 = r_data.r_obj_vis.i_expt[i][ind_vis]\r\n i_expt, i_expt_cong = grp_expt_indices(i_expt0), grp_expt_indices(i_expt0[is_both_ds])\r\n\r\n # sets the final motion sensitivity, direction selectivity and congruency values\r\n r_data.ms_gtype_ex[i] = np.vstack([cf.calc_rel_prop(ms_gtype_comb[x], 4) for x in i_expt])\r\n r_data.ds_gtype_ex[i] = np.vstack([cf.calc_rel_prop(ds_gtype_comb[x], 4) for x in i_expt])\r\n\r\n if len(i_expt_cong):\r\n r_data.pd_type_ex[i] = np.vstack([cf.calc_rel_prop(pd_type[x], 2) for x in i_expt_cong])\r\n else:\r\n r_data.pd_type_ex[i] = np.nan * np.ones((1, 2))\r\n\r\n # sets the direction selective/motion sensitivity types for current experiment\r\n r_data.ds_gtype_comb[i] = [ds_gtype_comb[i_ex] for i_ex in i_expt]\r\n r_data.ms_gtype_comb[i] = [ms_gtype_comb[i_ex] for i_ex in i_expt]\r\n\r\n else:\r\n # appends the counts to the motion sensitive/direction selectivity arrays\r\n r_data.ms_gtype_N.append(0)\r\n r_data.ds_gtype_N.append(0)\r\n\r\n # appends NaN arrays to the temporary arrays\r\n ms_type_tmp.append(np.array([np.nan] * 4))\r\n ds_type_tmp.append(np.array([np.nan] * 4))\r\n pd_type_tmp.append(np.array([np.nan] * 2))\r\n\r\n # combines the relative proportion lists into a single array ()\r\n r_data.ms_gtype_pr = np.vstack(ms_type_tmp).T\r\n r_data.ds_gtype_pr = np.vstack(ds_type_tmp).T\r\n r_data.pd_type_pr = np.vstack(pd_type_tmp).T\r\n\r\n # return a true flag to indicate the analysis was valid\r\n return True\r\n\r\n def calc_kinematic_roc_curves(self, data, pool, calc_para, g_para, pW0, r_data=None):\r\n '''\r\n\r\n :param calc_para:\r\n :return:\r\n '''\r\n\r\n def resample_spike_freq(pool, sf, c_lvl, n_rs=100):\r\n '''\r\n\r\n :param data:\r\n :param r_data:\r\n :param rr:\r\n :param ind:\r\n :param n_rs:\r\n :return:\r\n '''\r\n\r\n # array dimensioning and other initialisations\r\n n_trial = len(sf)\r\n pz = norm.ppf(1 - (1 - c_lvl) / 2)\r\n n_trial_h = int(np.floor(n_trial / 2))\r\n\r\n # if the spiking frequency values are all identical, then return the fixed values\r\n if cfcn.arr_range(sf) == 0.:\r\n return sf[0] * np.ones(n_trial_h), sf[0] * np.ones(n_trial_h), 0.5, np.zeros(2)\r\n\r\n # initialisations and memory allocation\r\n p_data = [[] for _ in range(n_rs)]\r\n\r\n # returns the shuffled spike frequency arrays\r\n for i_rs in range(n_rs):\r\n ind0 = np.random.permutation(n_trial)\r\n p_data[i_rs].append(np.sort(sf[ind0[:n_trial_h]]))\r\n p_data[i_rs].append(np.sort(sf[ind0[n_trial_h:(2 * n_trial_h)]]))\r\n\r\n # calculates the roc curves and the x/y coordinates\r\n _roc = pool.map(cfcn.calc_roc_curves_pool, p_data)\r\n _roc_xy = cfcn.calc_avg_roc_curve([cf.get_roc_xy_values(x) for x in _roc])\r\n\r\n # calculate the roc auc values (ensures that they are > 0.5)\r\n _roc_auc = [cf.get_roc_auc_value(x) for x in _roc]\r\n _roc_auc = [(1. - x) if x < 0.5 else x for x in _roc_auc]\r\n\r\n # calculates the roc auc mean/confidence interval\r\n roc_auc_mn = np.mean(_roc_auc)\r\n roc_auc_ci = pz * np.ones(2) * (np.std(_roc_auc) / (n_rs ** 0.5))\r\n\r\n # returns the arrays and auc mean/confidence intervals\r\n return _roc_xy[:, 0], _roc_xy[:, 1], roc_auc_mn, roc_auc_ci\r\n\r\n # initialises the RotationData class object (if not provided)\r\n if r_data is None:\r\n r_data = data.rotation\r\n\r\n # initialisations\r\n is_boot = int(calc_para['auc_stype'] == 'Bootstrapping')\r\n pW1, c_lvl = 100 - pW0, float(g_para['roc_clvl'])\r\n\r\n # memory allocation (if the conditions have not been set)\r\n if r_data.vel_roc is None:\r\n r_data.vel_roc, r_data.vel_roc_xy, r_data.vel_roc_auc = {}, {}, {}\r\n r_data.spd_roc, r_data.spd_roc_xy, r_data.spd_roc_auc = {}, {}, {}\r\n r_data.vel_ci_lo, r_data.vel_ci_hi, r_data.spd_ci_lo, r_data.spd_ci_hi = {}, {}, {}, {}\r\n r_data.vel_roc_sig, r_data.spd_roc_sig = None, None\r\n\r\n for i_rr, rr in enumerate(r_data.r_obj_kine.rot_filt_tot):\r\n tt, _pW1 = rr['t_type'][0], pW1 * (i_rr / r_data.r_obj_kine.n_filt)\r\n init_data = tt not in r_data.vel_roc\r\n\r\n # array dimensions\r\n calc_ci = None\r\n if r_data.is_equal_time:\r\n vel_sf = dcopy(r_data.vel_sf_rs[tt])\r\n if not r_data.pn_comp:\r\n spd_sf = dcopy(r_data.spd_sf_rs[tt])\r\n else:\r\n vel_sf = dcopy(r_data.vel_sf[tt])\r\n if not r_data.pn_comp:\r\n spd_sf = dcopy(r_data.spd_sf[tt])\r\n\r\n # array indexing\r\n n_trial, n_bin_vel, n_cell = np.shape(vel_sf)\r\n if r_data.pn_comp:\r\n n_bin_vel = int(n_bin_vel / 2)\r\n\r\n if init_data:\r\n # velocity roc memory allocation and initialisations\r\n r_data.vel_roc[tt] = np.empty((n_cell, n_bin_vel), dtype=object)\r\n r_data.vel_roc_xy[tt] = np.empty((n_cell, n_bin_vel), dtype=object)\r\n r_data.vel_roc_auc[tt] = np.zeros((n_cell, n_bin_vel))\r\n r_data.vel_ci_lo[tt] = -np.ones((n_cell, n_bin_vel, 2))\r\n r_data.vel_ci_hi[tt] = -np.ones((n_cell, n_bin_vel, 2))\r\n\r\n # speed roc memory allocation and initialisations (non pos/neg comparison only\r\n if not r_data.pn_comp:\r\n n_bin_spd = np.size(spd_sf, axis=1)\r\n r_data.spd_roc[tt] = np.empty((n_cell, n_bin_spd), dtype=object)\r\n r_data.spd_roc_xy[tt] = np.empty((n_cell, n_bin_spd), dtype=object)\r\n r_data.spd_roc_auc[tt] = np.zeros((n_cell, n_bin_spd))\r\n r_data.spd_ci_lo[tt] = -np.ones((n_cell, n_bin_spd, 2))\r\n r_data.spd_ci_hi[tt] = -np.ones((n_cell, n_bin_spd, 2))\r\n\r\n # calculates the roc curves/integrals for all cells over each phase\r\n w_str0 = 'ROC Calculations ({0} - '.format(tt)\r\n for ic in range(n_cell):\r\n # updates the progress bar string\r\n w_str = '{0}{1}/{2})'.format(w_str0, ic+1, n_cell)\r\n self.work_progress.emit(w_str, pW0 + _pW1 + (pW1 / r_data.r_obj_kine.n_filt) * ( + (ic/ n_cell)))\r\n\r\n if init_data:\r\n # memory allocations\r\n vel_auc_ci, ii_v = [], ~np.isnan(vel_sf[:, 0, ic])\r\n\r\n # calculates the velocity roc curves values for each velocity bin\r\n for i_bin in range(n_bin_vel):\r\n if r_data.pn_comp:\r\n is_resampled = False\r\n vel_sf_x = vel_sf[ii_v, n_bin_vel + i_bin, ic]\r\n vel_sf_y = vel_sf[ii_v, n_bin_vel - (i_bin + 1), ic]\r\n else:\r\n # case is single bin comparison\r\n if (i_bin == r_data.i_bin_vel[0]) or (i_bin == r_data.i_bin_vel[1]):\r\n is_resampled = True\r\n vel_sf_x, vel_sf_y, vel_auc_roc, _auc_ci = \\\r\n resample_spike_freq(pool, vel_sf[ii_v, i_bin, ic], c_lvl)\r\n vel_auc_ci.append(_auc_ci)\r\n else:\r\n is_resampled = False\r\n vel_sf_x = vel_sf[ii_v, i_bin, ic]\r\n if r_data.vel_xi[i_bin, 0] < 0:\r\n vel_sf_y = vel_sf[ii_v, r_data.i_bin_vel[0], ic]\r\n else:\r\n vel_sf_y = vel_sf[ii_v, r_data.i_bin_vel[1], ic]\r\n\r\n # calculates the roc curves/coordinates from the spiking frequencies\r\n r_data.vel_roc[tt][ic, i_bin] = cf.calc_roc_curves(None, None,\r\n x_grp=vel_sf_x, y_grp=vel_sf_y)\r\n r_data.vel_roc_xy[tt][ic, i_bin] = cf.get_roc_xy_values(r_data.vel_roc[tt][ic, i_bin])\r\n\r\n # sets the roc auc values\r\n if is_resampled:\r\n # case is the resampled frequencies\r\n r_data.vel_roc_auc[tt][ic, i_bin] = vel_auc_roc\r\n else:\r\n # other cases\r\n r_data.vel_roc_auc[tt][ic, i_bin] = cf.get_roc_auc_value(r_data.vel_roc[tt][ic, i_bin])\r\n\r\n # calculates the speed roc curves values for each speed bin\r\n if not r_data.pn_comp:\r\n ii_s = ~np.isnan(spd_sf[:, 0, ic])\r\n for i_bin in range(n_bin_spd):\r\n calc_roc = True\r\n if i_bin == r_data.i_bin_spd:\r\n # spd_sf_x, spd_sf_y = resample_spike_freq(data, r_data, rr, [i_rr, i_bin, ic])\r\n is_resampled = True\r\n spd_sf_x, spd_sf_y, spd_auc_roc, spd_auc_ci = \\\r\n resample_spike_freq(pool, spd_sf[ii_s, i_bin, ic], c_lvl)\r\n else:\r\n is_resampled = False\r\n spd_sf_x, spd_sf_y = spd_sf[ii_s, r_data.i_bin_spd, ic], spd_sf[ii_s, i_bin, ic]\r\n\r\n # calculates the roc curves/coordinates from the spiking frequencies\r\n r_data.spd_roc[tt][ic, i_bin] = cf.calc_roc_curves(None, None, x_grp=spd_sf_x, y_grp=spd_sf_y)\r\n r_data.spd_roc_xy[tt][ic, i_bin] = cf.get_roc_xy_values(r_data.spd_roc[tt][ic, i_bin])\r\n\r\n # sets the roc auc values\r\n if is_resampled:\r\n # case is the resampled frequencies\r\n r_data.spd_roc_auc[tt][ic, i_bin] = spd_auc_roc\r\n else:\r\n # other cases\r\n r_data.spd_roc_auc[tt][ic, i_bin] = cf.get_roc_auc_value(r_data.spd_roc[tt][ic, i_bin])\r\n\r\n # calculates the confidence intervals for the current (only if bootstrapping count has changed or\r\n # the confidence intervals has not already been calculated)\r\n if calc_ci is None:\r\n if 'auc_stype' in calc_para:\r\n # updates the auc statistics calculation type\r\n r_data.kine_auc_stats_type = dcopy(calc_para['auc_stype'])\r\n\r\n # determine if the auc confidence intervals need calculation\r\n is_boot = int(calc_para['auc_stype'] == 'Bootstrapping')\r\n if is_boot:\r\n # if bootstrapping, then determine if the\r\n if r_data.n_boot_kine_ci != calc_para['n_boot']:\r\n # if the count has changed, flag the confidence intervals needs updating\r\n r_data.n_boot_kine_ci, calc_ci = dcopy(calc_para['n_boot']), True\r\n else:\r\n # otherwise, recalculate the confidence intervals if they have not been set\r\n calc_ci = np.any(r_data.vel_ci_lo[tt][ic, :, 1] < 0)\r\n else:\r\n # otherwise, recalculate the confidence intervals if they have not been set\r\n calc_ci = np.any(r_data.vel_ci_lo[tt][ic, :, 0] < 0)\r\n\r\n # calculates the confidence intervals (if required)\r\n if calc_ci:\r\n # calculates the velocity confidence intervals\r\n auc_type, n_boot = calc_para['auc_stype'], calc_para['n_boot']\r\n conf_int_vel = self.calc_roc_conf_intervals(pool, r_data.vel_roc[tt][ic, :],\r\n auc_type, n_boot, c_lvl)\r\n\r\n # resets the resampled confidence interval values\r\n if not r_data.pn_comp and init_data:\r\n conf_int_vel[r_data.i_bin_vel[0], :] = vel_auc_ci[0]\r\n conf_int_vel[r_data.i_bin_vel[1], :] = vel_auc_ci[1]\r\n\r\n # sets the upper and lower velocity confidence intervals\r\n r_data.vel_ci_lo[tt][ic, :, is_boot] = conf_int_vel[:, 0]\r\n r_data.vel_ci_hi[tt][ic, :, is_boot] = conf_int_vel[:, 1]\r\n\r\n # calculates the speed confidence intervals\r\n if not r_data.pn_comp:\r\n # calculates the speed confidence intervals\r\n conf_int_spd = self.calc_roc_conf_intervals(pool, r_data.spd_roc[tt][ic, :],\r\n auc_type, n_boot, c_lvl)\r\n\r\n # resets the resampled confidence interval values\r\n if init_data:\r\n conf_int_spd[r_data.i_bin_spd] = spd_auc_ci\r\n\r\n # sets the upper and lower speed confidence intervals\r\n r_data.spd_ci_lo[tt][ic, :, is_boot] = conf_int_spd[:, 0]\r\n r_data.spd_ci_hi[tt][ic, :, is_boot] = conf_int_spd[:, 1]\r\n\r\n def calc_roc_conf_intervals(self, pool, roc, phase_stype, n_boot, c_lvl):\r\n '''\r\n\r\n :param r_data:\r\n :return:\r\n '''\r\n\r\n # sets the parameters for the multi-processing pool\r\n p_data = []\r\n for i_cell in range(len(roc)):\r\n p_data.append([roc[i_cell], phase_stype, n_boot, c_lvl])\r\n\r\n # returns the rotation data class object\r\n return np.array(pool.map(cf.calc_roc_conf_intervals, p_data))\r\n\r\n def calc_kinematic_roc_significance(self, data, calc_para, g_para):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param g_para:\r\n :return:\r\n '''\r\n\r\n # initialisations and other array indexing\r\n r_data = data.rotation\r\n is_boot, r_obj = int(calc_para['auc_stype'] == 'Bootstrapping'), r_data.r_obj_kine\r\n n_filt = r_obj.n_filt\r\n\r\n # sets the comparison bin for the velocity/speed arrays\r\n for use_vel in range(2):\r\n #\r\n if use_vel:\r\n i_bin = np.array([r_data.i_bin_vel])\r\n roc_auc, ci_lo, ci_hi = dcopy(r_data.vel_roc_auc), dcopy(r_data.vel_ci_lo), dcopy(r_data.vel_ci_hi)\r\n\r\n else:\r\n i_bin = np.array([r_data.i_bin_spd])\r\n roc_auc, ci_lo, ci_hi = dcopy(r_data.spd_roc_auc), dcopy(r_data.spd_ci_lo), dcopy(r_data.spd_ci_hi)\r\n\r\n # if the significance array is not set or the correct size, then reset the array dimensions\r\n is_sig = np.empty((n_filt,2), dtype=object)\r\n\r\n # determines the indices of the cell in the overall array\r\n t_type_base = list(r_data.spd_sf_rs.keys()) if r_data.is_equal_time else list(r_data.spd_sf.keys())\r\n for i_filt in range(n_filt):\r\n # determines the match condition with the currently calculated roc values\r\n tt = r_obj.rot_filt_tot[i_filt]['t_type'][0]\r\n i_match = t_type_base.index(tt)\r\n tt_nw = t_type_base[i_match]\r\n\r\n # determines which errorbars are significant\r\n ci_lo_tmp, ci_hi_tmp = ci_lo[tt][:, :, is_boot], ci_hi[tt][:, :, is_boot]\r\n is_sig[i_filt, is_boot] = np.logical_or((roc_auc[tt_nw] - ci_lo_tmp) > 0.5,\r\n (roc_auc[tt_nw] + ci_hi_tmp) < 0.5)\r\n is_sig[i_filt, is_boot][:, i_bin] = False\r\n\r\n # updates the significance arrays (based on whether calculating for speed or velocity)\r\n if use_vel:\r\n r_data.vel_roc_sig = is_sig\r\n else:\r\n r_data.spd_roc_sig = is_sig\r\n\r\n ###################################################\r\n #### MISCELLANEOUS FUNCTION CALCULATIONS ####\r\n ###################################################\r\n\r\n def setup_spiking_freq_dataframe(self, data, calc_para):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :return:\r\n '''\r\n\r\n def get_mlt(t_type, i_dir):\r\n '''\r\n\r\n :param t_type:\r\n :param i_dir:\r\n :return:\r\n '''\r\n\r\n if t_type == 'MotorDrifting':\r\n # return [-1, 1][i_dir]\r\n return [1, -1][i_dir]\r\n else:\r\n return [1, -1][i_dir]\r\n\r\n def is_valid_cell_type(ch_region):\r\n '''\r\n\r\n :param ch_region:\r\n :return:\r\n '''\r\n\r\n # the valid region types\r\n valid_type = ['RSPd', 'RSPg', 'V1', 'Hip', 'SUB']\r\n\r\n # returns the cells which have a valid region type\r\n return np.array([ch_reg in valid_type for ch_reg in ch_region])\r\n\r\n def setup_expt_dataframe(data, calc_para, i_expt_rot, i_ex, i_ex_c, t_phase):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :param i_expt_rot:\r\n :param i_ex:\r\n :param t_phase:\r\n :return:\r\n '''\r\n\r\n # dictionaries and lambda function declarations\r\n d_str = {-1: 'CW', 1: 'CCW'}\r\n stack_arr = lambda y_arr, n_trial: np.hstack([yy * np.ones(n_trial) for yy in y_arr]).reshape(-1, 1)\r\n ind_fcn = lambda i_dir, cond: (1 - i_dir) if cond == 'MotorDrifting' else i_dir\r\n\r\n # DETERMINE VALID CELLS HERE!\r\n j_ex = i_expt_rot[i_ex]\r\n w, c = np.pi / t_phase, data._cluster[j_ex]\r\n is_ok = is_valid_cell_type(c['chRegion'])\r\n\r\n # other initialisations\r\n mlt = [-1, 1]\r\n cond_key = {'Black': 'Vestibular', 'Uniform': 'Visual + Vestibular', 'MotorDrifting': 'Visual',\r\n 'Mismatch1': 'Mismatch Opposite', 'Mismatch2': 'Mismatch Same'}\r\n r_filt, exp_name = calc_para['rot_filt'], cf.extract_file_name(c['expFile'])\r\n t_ofs0, n_cond, n_cell = 0., len(r_filt['t_type']), c['nC']\r\n t_phs, dt_ofs = calc_para['bin_sz'] / 1000., (calc_para['bin_sz'] - calc_para['t_over']) / 1000.\r\n\r\n # memory allocation\r\n n_bin_tot = int(np.floor((t_phase - dt_ofs) / dt_ofs)) + 1\r\n A = np.zeros((n_bin_tot, 1))\r\n p_bin, v_bin = dcopy(A), dcopy(A)\r\n\r\n # calculates the spiking frequencies for all cells over the duration configuration\r\n for i_bin_tot in range(n_bin_tot):\r\n # # check to see if the current time offset will allow for a feasible number of future time bins (i.e.,\r\n # # the current time bin + the future time bins must fit into the phase duration). if not then exit loop\r\n # if (t_ofs0 + t_phs) > t_phase:\r\n # break\r\n\r\n # retrieves the filtered time spiking data for the current phase/duration configuration\r\n r_obj = RotationFilteredData(data, r_filt, None, exp_name, False, 'Whole Experiment', False,\r\n t_phase=t_phs, t_ofs=t_ofs0)\r\n\r\n # calculates the average spiking frequency data for the current experiment\r\n sp_f0, _ = cf.calc_phase_spike_freq(r_obj)\r\n\r\n # memory allocation (first iteration only)\r\n if i_bin_tot == 0:\r\n n_cell = np.shape(sp_f0[0])[0]\r\n wvm_para = r_obj.wvm_para\r\n\r\n y_dir = [x[0]['yDir'] for x in wvm_para]\r\n n_trial = [sum(~np.isnan(y)) for y in y_dir]\r\n\r\n B = [np.empty(nt * n_bin_tot, dtype=object) for nt in n_trial]\r\n sf, s_dir0 = dcopy(B), dcopy(B)\r\n\r\n # retrieves the CW/CCW phases (removes BL)\r\n sp_f_tmp = [sp_f[:, :, 1:] for sp_f in dcopy(sp_f0)]\r\n\r\n # if the first bin, calculate the average speed over the bin's duration\r\n w_vals0 = rot.calc_waveform_values(90, w, t_ofs0)\r\n w_vals1 = rot.calc_waveform_values(90, w, t_ofs0 + t_phs)\r\n p_bin[i_bin_tot] = 0.5 * (w_vals1[0] + w_vals0[0]) + 90\r\n v_bin[i_bin_tot] = 0.5 * (w_vals1[1] + w_vals0[1])\r\n\r\n # splits/stores the spiking frequency by the condition\r\n for i_cond in range(n_cond):\r\n i_trial = 0\r\n for i in range(len(y_dir[i_cond])):\r\n # if there was an error with the trial, then continue\r\n if np.isnan(y_dir[i_cond][i]):\r\n continue\r\n\r\n # sets the spiking frequency values\r\n ind_sf = i_bin_tot * n_trial[i_cond] + i_trial\r\n sf[i_cond][ind_sf] = sp_f_tmp[i_cond][:, i, :]\r\n\r\n # sets the direction string\r\n i_dir0 = y_dir[i_cond][i]\r\n s_dir0[i_cond][ind_sf] = d_str[i_dir0]\r\n\r\n # increments the trial counter\r\n i_trial += 1\r\n\r\n # increments the time offset by the time-overlap\r\n t_ofs0 += dt_ofs\r\n\r\n # initialisations\r\n df_tot, tt = [], r_filt['t_type']\r\n g_str = {'Nar': 'Narrow', 'Wid': 'Wide', 'N/A': 'N/A'}\r\n\r\n # sets the trial condition type column\r\n tt_col = np.hstack([cf.flat_list([[cond_key[_tt]] * (2 * _nt * n_bin_tot)])\r\n for _tt, _nt in zip(tt, n_trial)]).reshape(-1, 1)\r\n bin_col = np.vstack([repmat(np.vstack([(i + 1) * np.ones((_nt, 1), dtype=int)\r\n for i in range(n_bin_tot)]), 2, 1) for _nt in n_trial])\r\n trial_col = np.vstack([repmat(np.arange(_nt).reshape(-1, 1) + 1, 2 * n_bin_tot, 1) for _nt in n_trial])\r\n\r\n for i_cell in range(n_cell):\r\n # combines the information for the current cell\r\n sf_cell = np.vstack(\r\n [np.vstack(\r\n [np.hstack((stack_arr(p_bin, nt) if (mlt[i_dir] > 0) else (180 - stack_arr(p_bin, nt)),\r\n mlt[i_dir] * stack_arr(v_bin, nt), s_dir0[i_cond].reshape(-1, 1),\r\n np.array([_sf[i_cell, ind_fcn(i_dir, tt[i_cond])] for _sf in sf[i_cond]]).reshape(\r\n -1, 1)))\r\n for i_dir in range(2)])\r\n for i_cond, nt in enumerate(n_trial)]\r\n )\r\n\r\n # # combines the information for the current cell\r\n # sf_cell = np.vstack(\r\n # [np.vstack(\r\n # [np.hstack((stack_arr(p_bin, nt) if (get_mlt(tt[i_cond], i_dir) > 0) else (180 - stack_arr(p_bin, nt)),\r\n # get_mlt(tt[i_cond], i_dir) * stack_arr(v_bin, nt),\r\n # np.array([_sf[i_cell, i_dir] for _sf in sf[i_cond]]).reshape(-1, 1)))\r\n # for i_dir in range(2)])\r\n # for i_cond, nt in enumerate(n_trial)]\r\n # )\r\n\r\n # sets the other column details\r\n n_row = np.size(sf_cell, axis=0)\r\n reg_col = np.array([c['chRegion'][i_cell]] * n_row).reshape(-1, 1)\r\n layer_col = np.array([c['chLayer'][i_cell]] * n_row).reshape(-1, 1)\r\n\r\n # sets the cell indices\r\n ind_col = (i_cell + 1) * np.ones((n_row, 1), dtype=int)\r\n\r\n # appends all the data for the given cell\r\n if data.classify.class_set:\r\n # sets the cell classification type ('N/A' if 'SC'/'N/A', otherwise use the classification string)\r\n g_str_nw = g_str[data.classify.grp_str[i_expt_rot[i_ex]][i_cell]] if is_ok[i_cell] else 'N/A'\r\n\r\n # adds in the cell group type (if calculated)\r\n grp_col = np.array([g_str_nw] * n_row).reshape(-1, 1)\r\n df_tot.append(\r\n np.hstack((ind_col, bin_col, trial_col, sf_cell, tt_col, reg_col, layer_col, grp_col)))\r\n\r\n else:\r\n # otherwise, use the existing information only\r\n df_tot.append(np.hstack((ind_col, bin_col, trial_col, sf_cell, tt_col, reg_col, layer_col)))\r\n\r\n # combines all data from each cell (along with the experiment index) into a final np array\r\n exp_col = (i_ex_c + 1) * np.ones((n_row * n_cell, 1), dtype=int)\r\n return np.hstack((exp_col, np.vstack(df_tot)))\r\n\r\n # determines the valid rotation experiments\r\n i_expt_rot = np.where(cf.det_valid_rotation_expt(data))[0]\r\n\r\n # memory allocation and initialisations\r\n n_ex = len(i_expt_rot)\r\n sf_data = np.empty(n_ex, dtype=object)\r\n w_prog, d_data = self.work_progress, data.spikedf\r\n\r\n # retrieves the rotation filter\r\n r_filt = calc_para['rot_filt']\r\n if r_filt is None:\r\n # if not set, then initialise\r\n r_filt = cf.init_rotation_filter_data(False)\r\n\r\n # returns the overall rotation filter class object\r\n r_obj = RotationFilteredData(data, r_filt, None, None, True, 'Whole Experiment', False)\r\n t_phase, t_type, i_ex_c = r_obj.t_phase[0][0], calc_para['rot_filt']['t_type'], 0\r\n\r\n # creates the spiking frequency dataframe for the each experiment\r\n for i_ex in range(n_ex):\r\n # updates the progress bar\r\n w_str = 'Combining Spike Freq. Data (Expt #{0}/{1})'.format(i_ex + 1, n_ex)\r\n w_prog.emit(w_str, 100. * (i_ex / n_ex))\r\n\r\n # determines if all trial types exist within the current experiment\r\n tt_expt = list(data._cluster[i_expt_rot[i_ex]]['rotInfo']['trial_type'])\r\n if np.all([tt in tt_expt for tt in t_type]):\r\n # if so, then set the data for the current experiment\r\n sf_data[i_ex] = setup_expt_dataframe(data, calc_para, i_expt_rot, i_ex, i_ex_c, t_phase)\r\n if sf_data[i_ex] is not None:\r\n i_ex_c += 1\r\n\r\n ######################################\r\n #### HOUSEKEEPING EXERCISES ####\r\n ######################################\r\n\r\n # updates the progressbar\r\n w_prog.emit('Setting Final Dataframe...', 100.)\r\n\r\n # sets the calculation parameters\r\n d_data.rot_filt = dcopy(calc_para['rot_filt'])\r\n d_data.bin_sz = calc_para['bin_sz']\r\n d_data.t_over = calc_para['t_over']\r\n\r\n # creates the final dataframe\r\n c_str = ['Expt #', 'Cell #', 'Bin #', 'Trial #', 'Position (deg)', 'Speed (deg/s)', 'Initial Dir'] + \\\r\n ['Firing Rate', 'Trial Condition', 'Region', 'Layer'] + \\\r\n (['Cell Type'] if data.classify.class_set else [])\r\n sf_data_valid = np.vstack([x for x in sf_data if x is not None])\r\n d_data.sf_df = pd.DataFrame(sf_data_valid, columns=c_str)\r\n\r\n def calc_auto_ccgram_fft(self, data, calc_para):\r\n '''\r\n\r\n :param data:\r\n :param calc_para:\r\n :return:\r\n '''\r\n\r\n # parameters\r\n n_count = 0\r\n t_bin = calc_para['t_bin']\r\n n_bin = int(t_bin / calc_para['bin_sz']) # the number of time bins\r\n f_theta = [5, 11] # theta frequency range (from Yartsev 2011)\r\n freq_rng = [0, 50] # theta index comparison frequency range (from Yartsev 2011)\r\n ratio_tol = 5 # threshold ratio (from Yartsev 2011)\r\n n_pad = 2 ** 16\r\n\r\n # sets up the psd frequency\r\n df = (2 * t_bin) / n_pad\r\n f = np.arange(0, 2 * t_bin, df) / calc_para['bin_sz']\r\n i_theta_f0 = np.logical_and(f >= f_theta[0], f <= f_theta[1])\r\n i_theta_nf, i_theta_f = np.where(np.logical_and(~i_theta_f0, f <= freq_rng[1]))[0], np.where(i_theta_f0)[0]\r\n\r\n # calculates the number of bins for 1Hz within the freq. range\r\n dn = int(np.floor(1 / df))\r\n\r\n # # sets the array index ranges\r\n # i_theta = np.arange(f_theta[0], f_theta[1] + 1)\r\n # i_freq_rng = np.arange(freq_rng[0], freq_rng[1] + 1)\r\n\r\n # sets up the boolean array for the non-zero lag bins (used to set the zero-lag bin value below)\r\n is_ok = np.ones(2 * n_bin - 1, dtype=bool)\r\n is_ok[n_bin - 1] = False\r\n\r\n # memory allocation and other initialisations\r\n is_free = np.logical_not(cf.det_valid_rotation_expt(data))\r\n a = np.empty(np.sum(is_free), dtype=object)\r\n cc_gram, p_fft, th_index = dcopy(a), dcopy(a), dcopy(a)\r\n w_prog, th_data = self.work_progress, data.theta_index\r\n exp_name = [cf.extract_file_name(c['expFile']) for c in np.array(data._cluster)[is_free]]\r\n\r\n # retrieves the time spike arrays\r\n t_spike = [c['tSpike'] for c, i in zip(data._cluster, is_free) if i]\r\n n_cell_tot = np.sum([len(x) for x in t_spike])\r\n\r\n # for each free experiment, calculate the theta index for each cell\r\n n_expt = len(t_spike)\r\n for i_expt in range(n_expt):\r\n # memory allocation for the current expt\r\n n_cell = len(t_spike[i_expt])\r\n cc_gram[i_expt] = np.zeros((n_cell, 2 * n_bin - 1))\r\n p_fft[i_expt] = np.zeros((n_cell, int(n_pad / 2)))\r\n th_index[i_expt] = np.zeros((n_cell, 2))\r\n\r\n # calculates the theta index for each cell in the experiment\r\n for i_cell in range(n_cell):\r\n # updates the progress bar\r\n n_count += 1\r\n w_str = 'Theta Index (Expt={0}/{1}, Cell={2}/{3})'.format(i_expt + 1, n_expt, i_cell + 1, n_cell)\r\n w_prog.emit(w_str, 100. * (n_count / (n_cell_tot + 1)))\r\n\r\n # calculates the new autocorrelogram for the current cell\r\n t_sp = t_spike[i_expt][i_cell]\r\n cc_gram[i_expt][i_cell, :], _ = cfcn.calc_ccgram(t_sp, t_sp, t_bin, bin_size=calc_para['bin_sz'])\r\n\r\n # sets the zero-lag bin value to be the max non zero-lag cc-gram bin value\r\n cc_gram[i_expt][i_cell, n_bin - 1] = np.max(cc_gram[i_expt][i_cell, is_ok])\r\n\r\n # calculates the PSD estimate of the cc-gram\r\n cc_gram_calc = cc_gram[i_expt][i_cell, :]\r\n if calc_para['remove_bl']:\r\n cc_gram_calc -= np.mean(cc_gram[i_expt][i_cell, :])\r\n\r\n if calc_para['pow_type'] == 'FFT-Squared':\r\n # calculates using the square of the FFT\r\n if calc_para['win_type'] == 'none':\r\n # if no signal windowing, then scale the signal by its length\r\n y_sig = cc_gram_calc / len(cc_gram_calc)\r\n else:\r\n # otherwise, set the windowing function based on the specified type\r\n if calc_para['win_type'] == 'boxcar':\r\n y_win = boxcar(len(cc_gram_calc))\r\n else:\r\n y_win = hamming(len(cc_gram_calc))\r\n\r\n # applies the windowing function\r\n y_sig = np.multiply(cc_gram_calc / len(cc_gram_calc), y_win)\r\n\r\n # pads zero to the end of the function (increases resolution for the PSD)\r\n y_sig_pad = np.pad(y_sig, (0, n_pad - (2 * n_bin - 1)), 'constant')\r\n\r\n # calculates the fft of the signal and calculates the power spectrum\r\n y_fft = np.fft.fft(y_sig_pad)\r\n p_fft0 = np.abs(y_fft)\r\n\r\n # rectangular smoothing of the PSD (2Hz in length)\r\n p_fft_mn0 = pd.DataFrame(p_fft0).rolling(2 * dn, min_periods=1, center=True).mean()\r\n p_fft_mn = np.array(p_fft_mn0.ix[:, 0])\r\n\r\n # taking positive frequency range of PSD for visualisation\r\n p_fft[i_expt][i_cell, :] = p_fft_mn[:int(n_pad / 2)]\r\n\r\n else:\r\n # calculates using the periodgram method\r\n _, p_fft[i_expt][i_cell, :] = periodogram(cc_gram_calc, window=calc_para['win_type'])\r\n\r\n # calculates the location of the max peak within the theta range\r\n i_fft_mx = find_peaks(p_fft0[i_theta_f])[0]\r\n if len(i_fft_mx):\r\n i_mx = np.argmax(p_fft0[i_theta_f][i_fft_mx])\r\n if_mx = i_theta_f[i_fft_mx[i_mx]]\r\n\r\n # calculates the theta index numerator/denominator\r\n th_index_num = np.mean(p_fft0[(if_mx-dn):(if_mx+dn)]) # mean power for +/- 1Hz surrounding peak within theta range\r\n th_index_den = np.mean(p_fft0[i_theta_nf]) # mean power spectrum outside of theta range\r\n\r\n else:\r\n # if there are no peaks, then ensure the theta index value is zero\r\n th_index_num, th_index_den = 0, 1\r\n\r\n # calculates the theta index of the signal\r\n # this is calculate as the ratio of the mean of the points surrounding the max power spectrum value\r\n # between the 5-11Hz freq range divided by the mean power spectrum values btwn the 1-125Hz freq range\r\n th_index[i_expt][i_cell, 0] = th_index_num / th_index_den\r\n th_index[i_expt][i_cell, 1] = th_index[i_expt][i_cell, 0] > ratio_tol\r\n\r\n #######################################\r\n #### HOUSE-KEEPING EXERCISES ####\r\n #######################################\r\n\r\n # sets the final values into the class object\r\n th_data.cc_gram = cc_gram\r\n th_data.p_fft = p_fft\r\n th_data.th_index = th_index\r\n th_data.f = f\r\n\r\n # sets the other fields\r\n th_data.is_set = True\r\n th_data.exp_name = exp_name\r\n th_data.t_bin = calc_para['t_bin']\r\n th_data.bin_sz = calc_para['bin_sz']\r\n th_data.vel_bin = calc_para['vel_bin']\r\n th_data.win_type = calc_para['win_type']\r\n th_data.remove_bl = calc_para['remove_bl']\r\n\r\n ###########################################\r\n #### OTHER CALCULATION FUNCTIONS ####\r\n ###########################################\r\n\r\n def check_combined_conditions(self, calc_para, plot_para):\r\n '''\r\n\r\n :param calc_para:\r\n :param plot_para:\r\n :return:\r\n '''\r\n\r\n if plot_para['rot_filt'] is not None:\r\n if 'MotorDrifting' in plot_para['rot_filt']['t_type']:\r\n # if the mapping file is not correct, then output an error to screen\r\n e_str = 'MotorDrifting is not a valid filter option when running this function.\\n\\n' \\\r\n 'De-select this filter option before re-running this function.'\r\n self.work_error.emit(e_str, 'Invalid Filter Options')\r\n\r\n # returns a false value\r\n return False\r\n\r\n # if everything is correct, then return a true value\r\n return True\r\n\r\n def check_altered_para(self, data, calc_para, plot_para, g_para, chk_type, other_para=None):\r\n '''\r\n\r\n :param calc_para:\r\n :param g_para:\r\n :param chk_type:\r\n :return:\r\n '''\r\n\r\n def check_class_para_equal(d_data, attr, chk_value, def_val=False):\r\n '''\r\n\r\n :param d_data:\r\n :param attr:\r\n :param chk_value:\r\n :return:\r\n '''\r\n\r\n if hasattr(d_data, attr):\r\n return getattr(d_data, attr) == chk_value\r\n else:\r\n return def_val\r\n\r\n # initialisations\r\n r_data, ff_corr = data.rotation, data.comp.ff_corr if hasattr(data.comp, 'ff_corr') else None\r\n t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)\r\n\r\n # loops through each of the check types determining if any parameters changed\r\n for ct in chk_type:\r\n # initialises the change flag\r\n is_change = data.force_calc\r\n\r\n if ct == 'condition':\r\n # case is the roc condition parameters\r\n\r\n # retrieves the rotation phase offset time/duration\r\n if t_ofs is not None:\r\n # if the values are not none, and do not match previous values, then reset the stored roc array\r\n if (r_data.t_ofs_rot != t_ofs) or (r_data.t_phase_rot != t_phase):\r\n r_data.t_ofs_rot, r_data.t_phase_rot, is_change = t_ofs, t_phase, True\r\n elif 'use_full_rot' in calc_para:\r\n # if using the full rotation, and the previous calculations were made using non-full rotation\r\n # phases, the reset the stored roc array\r\n if (r_data.t_ofs_rot > 0):\r\n r_data.t_ofs_rot, r_data.t_phase_rot, is_change = -1, -1, True\r\n\r\n # if there was a change, then re-initialise the roc condition fields\r\n if is_change:\r\n # memory allocation (if the conditions have not been set)\r\n r_data.phase_roc, r_data.phase_roc_auc, r_data.phase_roc_xy = {}, {}, {}\r\n r_data.phase_ci_lo, self.phase_ci_hi, self.phase_gtype = None, None, None\r\n r_data.phase_auc_sig, r_data.phase_grp_stats_type = None, None\r\n\r\n r_data.cond_roc, r_data.cond_roc_xy, r_data.cond_roc_auc = {}, {}, {}\r\n r_data.cond_gtype, r_data.cond_auc_sig, r_data.cond_i_expt, r_data.cond_cl_id = {}, {}, {}, {}\r\n r_data.cond_ci_lo, r_data.cond_ci_hi, r_data.r_obj_cond = {}, {}, {}\r\n r_data.phase_gtype, r_data.phase_auc_sig, r_data.phase_roc = None, None, None\r\n\r\n r_data.part_roc, r_data.part_roc_xy, r_data.part_roc_auc = {}, {}, {}\r\n\r\n elif ct == 'clust':\r\n # case is the fixed/free cell clustering calculations\r\n i_expt = cf.det_comp_dataset_index(data.comp.data, calc_para['calc_comp'])\r\n c_data = data.comp.data[i_expt]\r\n\r\n # if the calculations have not been made, then exit the function\r\n if not c_data.is_set:\r\n continue\r\n\r\n # determines if the global parameters have changed\r\n is_equal = [\r\n check_class_para_equal(c_data, 'd_max', calc_para['d_max']),\r\n check_class_para_equal(c_data, 'r_max', calc_para['r_max']),\r\n check_class_para_equal(c_data, 'sig_corr_min', calc_para['sig_corr_min']),\r\n check_class_para_equal(c_data, 'isi_corr_min', calc_para['isi_corr_min']),\r\n check_class_para_equal(c_data, 'sig_diff_max', calc_para['sig_diff_max']),\r\n check_class_para_equal(c_data, 'sig_feat_min', calc_para['sig_feat_min']),\r\n check_class_para_equal(c_data, 'w_sig_feat', calc_para['w_sig_feat']),\r\n check_class_para_equal(c_data, 'w_sig_comp', calc_para['w_sig_comp']),\r\n check_class_para_equal(c_data, 'w_isi', calc_para['w_isi']),\r\n ]\r\n\r\n # determines if there was a change in parameters (and hence a recalculation required)\r\n c_data.is_set = np.all(is_equal)\r\n\r\n elif ct == 'ff_corr':\r\n\r\n # case is the fixed/freely moving spiking frequency correlation analysis\r\n is_equal = [\r\n not data.force_calc,\r\n check_class_para_equal(ff_corr, 'vel_bin', float(calc_para['vel_bin'])),\r\n check_class_para_equal(ff_corr, 'n_shuffle_corr', float(calc_para['n_shuffle'])),\r\n check_class_para_equal(ff_corr, 'split_vel', int(calc_para['split_vel'])),\r\n ]\r\n\r\n # determines if recalculation is required\r\n ff_corr.is_set = np.all(is_equal)\r\n if not ff_corr.is_set:\r\n data.force_calc = True\r\n\r\n elif ct == 'eye_track':\r\n\r\n # case is the eye tracking data\r\n et_data = data.externd.eye_track\r\n\r\n # if the calculations have not been made, then exit the function\r\n if not et_data.is_set:\r\n return\r\n\r\n # case is the fixed/freely moving spiking frequency correlation analysis\r\n is_equal = [\r\n check_class_para_equal(et_data, 'dp_max', float(calc_para['dp_max'])),\r\n check_class_para_equal(et_data, 'n_sd', float(calc_para['n_sd'])),\r\n check_class_para_equal(et_data, 'n_pre', int(calc_para['n_pre'])),\r\n check_class_para_equal(et_data, 'n_post', int(calc_para['n_post'])),\r\n ]\r\n\r\n # determines if recalculation is required\r\n et_data.is_set = np.all(is_equal)\r\n if not et_data.is_set:\r\n et_data.t_evnt, et_data.y_evnt, et_data.sp_evnt = [], [], []\r\n et_data.y_corr, et_data.t_sp_h = [], []\r\n\r\n elif ct == 'phase':\r\n # case is the phase ROC calculations\r\n pass\r\n\r\n elif ct == 'visual':\r\n # retrieves the visual phase time offset/duration\r\n t_ofs_vis, t_phase_vis = cfcn.get_rot_phase_offsets(calc_para, True)\r\n\r\n # if the values are not none, and do not match previous values, then reset the stored roc array\r\n if (r_data.t_ofs_vis != t_ofs_vis) or (r_data.t_phase_vis != t_phase_vis):\r\n r_data.t_ofs_vis, r_data.t_phase_vis, is_change = t_ofs_vis, t_phase_vis, True\r\n\r\n # if there was a change, then re-initialise the fields\r\n if is_change:\r\n r_data.phase_roc_ud, r_data.phase_roc_auc_ud, r_data.phase_roc_xy_ud = None, None, None\r\n\r\n elif ct == 'vel':\r\n # case is the kinematic calculations\r\n\r\n # initialisations\r\n roc_calc = other_para\r\n vel_bin = float(calc_para['vel_bin']) if ('vel_bin' in calc_para) else float(plot_para['vel_bin'])\r\n\r\n # checks to see if the dependent speed has changed\r\n if 'spd_x_rng' in calc_para:\r\n # case is a single speed bin range comparison\r\n\r\n # if the dependent speed range has changed then reset the roc curve calculations\r\n if r_data.comp_spd != calc_para['spd_x_rng']:\r\n is_change = True\r\n\r\n if r_data.pn_comp is True:\r\n r_data.pn_comp, is_change = False, True\r\n\r\n # updates the speed comparison flag\r\n r_data.comp_spd = dcopy(calc_para['spd_x_rng'])\r\n\r\n else:\r\n # case is the positive/negative speed comparison\r\n\r\n # if the positive/negative comparison flag is not set to true, then reset the roc curve calculations\r\n if r_data.pn_comp is False:\r\n r_data.pn_comp, is_change = True, True\r\n\r\n # if using equal time bins, then check to see if the sample size has changed (if so then recalculate)\r\n if calc_para['equal_time']:\r\n if r_data.n_rs != calc_para['n_sample']:\r\n r_data.vel_sf_rs, r_data.spd_sf_rs = None, None\r\n r_data.n_rs, is_change = dcopy(calc_para['n_sample']), True\r\n\r\n # if the velocity bin size has changed or isn't initialised, then reset velocity roc values\r\n if data.force_calc:\r\n r_data.vel_sf_rs, r_data.spd_sf_rs = None, None\r\n r_data.vel_sf, r_data.spd_sf = None, None\r\n\r\n if roc_calc:\r\n if (vel_bin != r_data.vel_bin) or (calc_para['freq_type'] != r_data.freq_type):\r\n r_data.vel_sf_rs, r_data.spd_sf_rs = None, None\r\n r_data.vel_sf, r_data.spd_sf = None, None\r\n r_data.vel_bin, is_change = vel_bin, True\r\n r_data.freq_type = dcopy(calc_para['freq_type'])\r\n\r\n if r_data.is_equal_time != calc_para['equal_time']:\r\n is_change = True\r\n\r\n # if there was a change, then re-initialise the roc phase fields\r\n if is_change:\r\n r_data.vel_roc = None\r\n\r\n else:\r\n if (vel_bin != r_data.vel_bin):\r\n r_data.vel_sf_rs, r_data.spd_sf_rs = None, None\r\n r_data.vel_sf, r_data.spd_sf = None, None\r\n\r\n elif ct == 'vel_sf_fix':\r\n # if the spiking frequency calculation field has not been set, then force an update\r\n if not hasattr(r_data, 'vel_shuffle_calc'):\r\n data.force_calc = True\r\n\r\n # case is the kinematic spiking frequency calculations\r\n is_equal = [\r\n check_class_para_equal(r_data, 'vel_sf_nsm', calc_para['n_smooth'] * calc_para['is_smooth']),\r\n check_class_para_equal(r_data, 'vel_bin_corr', float(calc_para['vel_bin'])),\r\n check_class_para_equal(r_data, 'n_shuffle_corr', calc_para['n_shuffle']),\r\n check_class_para_equal(r_data, 'split_vel', calc_para['split_vel']),\r\n check_class_para_equal(r_data, 'vel_sf_eqlt', calc_para['equal_time'])\r\n ]\r\n\r\n # if there was a change in any of the parameters, then reset the spiking frequency fields\r\n if not np.all(is_equal) or data.force_calc:\r\n r_data.vel_shuffle_calc, r_data.vel_sf_corr = False, None\r\n r_data.vel_sf, r_data.vel_sf_rs = None, None\r\n\r\n # determines if all trial conditions have been calculated (for calculation if not)\r\n if r_data.vel_shuffle_calc:\r\n t_type = list(r_data.vel_sf_mean.keys())\r\n r_data.vel_shuffle_calc = np.all([tt in t_type for tt in plot_para['rot_filt']['t_type']])\r\n\r\n elif ct == 'vel_sf_free':\r\n # case is the kinematic spiking frequency calculations\r\n is_equal = [\r\n check_class_para_equal(r_data, 'vel_bin_corr', float(calc_para['vel_bin'])),\r\n ]\r\n\r\n # if there was a change in any of the parameters, then reset the spiking frequency fields\r\n if not np.all(is_equal) or data.force_calc:\r\n r_data.vel_shuffle_calc, r_data.vel_sf_corr = False, None\r\n r_data.vel_sf, r_data.vel_sf_rs = None, None\r\n\r\n elif ct == 'lda':\r\n # case is the LDA calculations\r\n\r\n # if initialising the LDA then continue (as nothing has been set)\r\n d_data, lda_para, lda_tt = other_para, calc_para['lda_para'], cfcn.get_glob_para('lda_trial_type')\r\n if d_data.lda is None:\r\n continue\r\n\r\n # otherwise, determine if there are any changes in the parameters\r\n is_equal = [\r\n check_class_para_equal(d_data, 'solver', lda_para['solver_type']),\r\n check_class_para_equal(d_data, 'shrinkage', lda_para['use_shrinkage']),\r\n check_class_para_equal(d_data, 'norm', lda_para['is_norm']),\r\n check_class_para_equal(d_data, 'cellmin', lda_para['n_cell_min']),\r\n check_class_para_equal(d_data, 'trialmin', lda_para['n_trial_min']),\r\n check_class_para_equal(d_data, 'yaccmx', lda_para['y_acc_max']),\r\n check_class_para_equal(d_data, 'yaccmn', lda_para['y_acc_min'], def_val=True),\r\n check_class_para_equal(d_data, 'yaucmx', lda_para['y_auc_max'], def_val=True),\r\n check_class_para_equal(d_data, 'yaucmn', lda_para['y_auc_min'], def_val=True),\r\n check_class_para_equal(d_data, 'lda_trial_type', lda_tt, def_val=True),\r\n check_class_para_equal(d_data, 'fctype', lda_para['free_ctype'], def_val='All'),\r\n set(d_data.ttype) == set(lda_para['comp_cond']),\r\n ]\r\n\r\n #\r\n if d_data.type in ['Direction', 'Individual', 'TrialShuffle', 'Partial', 'IndivFilt', 'LDAWeight']:\r\n if 'use_full_rot' in calc_para:\r\n if d_data.usefull:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'usefull', calc_para['use_full_rot']),\r\n ]\r\n else:\r\n if 't_ofs_rot' in calc_para:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'tofs', calc_para['t_ofs_rot']),\r\n check_class_para_equal(d_data, 'tphase', calc_para['t_phase_rot']),\r\n ]\r\n else:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'tofs', calc_para['t_ofs']),\r\n check_class_para_equal(d_data, 'tphase', calc_para['t_phase']),\r\n ]\r\n\r\n if d_data.type in ['Direction']:\r\n is_equal += [\r\n hasattr(d_data, 'z_corr')\r\n ]\r\n\r\n elif d_data.type in ['TrialShuffle']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'nshuffle', calc_para['n_shuffle']),\r\n ]\r\n\r\n elif d_data.type in ['IndivFilt']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'yaccmn', calc_para['y_acc_min']),\r\n check_class_para_equal(d_data, 'yaccmx', calc_para['y_acc_max']),\r\n ]\r\n\r\n elif d_data.type in ['Partial']:\r\n is_equal[3] = True\r\n\r\n is_equal += [\r\n check_class_para_equal(d_data, 'nshuffle', calc_para['n_shuffle']),\r\n ]\r\n\r\n elif d_data.type in ['Temporal']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'dt_phs', calc_para['dt_phase']),\r\n check_class_para_equal(d_data, 'dt_ofs', calc_para['dt_ofs']),\r\n check_class_para_equal(d_data, 'phs_const', calc_para['t_phase_const']),\r\n ]\r\n\r\n elif d_data.type in ['SpdAcc', 'SpdComp', 'SpdCompPool', 'SpdCompDir']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'vel_bin', calc_para['vel_bin']),\r\n check_class_para_equal(d_data, 'n_sample', calc_para['n_sample']),\r\n check_class_para_equal(d_data, 'equal_time', calc_para['equal_time']),\r\n ]\r\n\r\n if d_data.type in ['SpdComp', 'SpdCompPool']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'spd_xrng', calc_para['spd_x_rng']),\r\n ]\r\n\r\n if d_data.type in ['SpdCompPool']:\r\n is_equal += [\r\n check_class_para_equal(d_data, 'nshuffle', calc_para['n_shuffle']),\r\n check_class_para_equal(d_data, 'poolexpt', calc_para['pool_expt']),\r\n ]\r\n\r\n # if there was a change in any of the parameters, then flag recalculation is needed\r\n if not np.all(is_equal) or data.force_calc:\r\n d_data.lda = None\r\n\r\n elif ct == 'spikedf':\r\n # initialisations\r\n d_data = other_para\r\n\r\n # if the spike frequency dataframe has not been setup, then exit the function\r\n if not d_data.is_set:\r\n return\r\n\r\n # case is the spiking frequency dataframe\r\n is_equal = [\r\n check_class_para_equal(d_data, 'rot_filt', calc_para['rot_filt']),\r\n check_class_para_equal(d_data, 'bin_sz', calc_para['bin_sz']),\r\n check_class_para_equal(d_data, 't_over', calc_para['t_over']),\r\n ]\r\n\r\n # if there was a change in any of the parameters, then flag recalculation is needed\r\n if not np.all(is_equal) or data.force_calc:\r\n d_data.is_set = False\r\n\r\n elif ct == 'theta':\r\n # initialisations\r\n th_data = other_para\r\n\r\n # if the data is not calculated, then exit the function\r\n if not th_data.is_set:\r\n return\r\n\r\n # determines the calculation parameter that have been altered\r\n is_equal = [\r\n check_class_para_equal(th_data, 'vel_bin', calc_para['vel_bin']),\r\n check_class_para_equal(th_data, 'bin_sz', calc_para['bin_sz']),\r\n check_class_para_equal(th_data, 'win_type', calc_para['win_type']),\r\n check_class_para_equal(th_data, 'remove_bl', calc_para['remove_bl']),\r\n ]\r\n\r\n # if there was a change in any of the parameters, then flag recalculation is needed\r\n if not np.all(is_equal) or data.force_calc:\r\n th_data.is_set = False"
]
| [
[
"scipy.stats.norm.ppf",
"numpy.polyfit",
"scipy.signal.find_peaks",
"numpy.sqrt",
"numpy.linspace",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.all",
"numpy.max",
"numpy.concatenate",
"numpy.round",
"numpy.argmin",
"numpy.any",
"numpy.mean",
"numpy.nanmean",
"numpy.searchsorted",
"numpy.nanstd",
"numpy.histogram",
"numpy.where",
"numpy.divide",
"scipy.optimize.curve_fit",
"numpy.hstack",
"pandas.read_csv",
"numpy.pad",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.unique",
"numpy.arange",
"numpy.subtract",
"numpy.lexsort",
"numpy.size",
"numpy.argmax",
"numpy.diff",
"numpy.std",
"scipy.signal.periodogram",
"numpy.matlib.repmat",
"numpy.zeros",
"numpy.min",
"numpy.isnan",
"numpy.logical_or",
"scipy.stats.linregress",
"numpy.floor",
"numpy.corrcoef",
"numpy.argsort",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.fft.fft",
"numpy.empty",
"numpy.ones",
"numpy.dstack",
"numpy.sign",
"numpy.sort",
"numpy.random.permutation",
"scipy.interpolate.PchipInterpolator",
"numpy.shape",
"sklearn.linear_model.LinearRegression",
"numpy.vstack"
]
]
|
nilsmeyerkit/meshio | [
"51d30fbffadc91425d64417e2ee977ec04669e83"
]
| [
"src/meshio/ply/_ply.py"
]
| [
"\"\"\"\nI/O for the PLY format, cf.\n<https://en.wikipedia.org/wiki/PLY_(file_format)>.\n<https://web.archive.org/web/20161221115231/http://www.cs.virginia.edu/~gfx/Courses/2001/Advanced.spring.01/plylib/Ply.txt>.\n\"\"\"\nimport collections\nimport datetime\nimport re\nimport sys\nimport warnings\n\nimport numpy as np\n\nfrom ..__about__ import __version__\nfrom .._exceptions import ReadError, WriteError\nfrom .._files import open_file\nfrom .._helpers import register_format\nfrom .._mesh import CellBlock, Mesh\n\n# Reference dtypes\nply_to_numpy_dtype = {\n # [u]char is often used as [u]int, e.g., from Wikipedia:\n # > The word 'list' indicates that the data is a list of values, the first of which\n # > is the number of entries in the list (represented as a 'uchar' in this case).\n \"char\": np.int8,\n \"uchar\": np.uint8,\n \"short\": np.int16,\n \"ushort\": np.uint16,\n \"int\": np.int32,\n \"int8\": np.int8,\n \"int32\": np.int32,\n \"int64\": np.int64,\n \"uint\": np.uint32,\n \"uint8\": np.uint8,\n \"uint16\": np.uint16,\n \"uint32\": np.uint32,\n \"uint64\": np.uint64,\n \"float\": np.float32,\n \"float32\": np.float32,\n \"float64\": np.float64,\n \"double\": np.float64,\n}\nnumpy_to_ply_dtype = {np.dtype(v): k for k, v in ply_to_numpy_dtype.items()}\n\n\ndef cell_type_from_count(count):\n if count == 1:\n return \"vertex\"\n elif count == 2:\n return \"line\"\n elif count == 3:\n return \"triangle\"\n elif count == 4:\n return \"quad\"\n\n return \"polygon\"\n\n\ndef read(filename):\n with open_file(filename, \"rb\") as f:\n mesh = read_buffer(f)\n return mesh\n\n\ndef _next_line(f):\n # fast forward to the next significant line\n while True:\n line = f.readline().decode().strip()\n if line and line[:7] != \"comment\":\n break\n return line\n\n\ndef read_buffer(f):\n # assert that the first line reads `ply`\n line = f.readline().decode().strip()\n if line != \"ply\":\n raise ReadError(\"Expected ply\")\n\n line = _next_line(f)\n endianness = None\n if line == \"format ascii 1.0\":\n is_binary = False\n elif line == \"format binary_big_endian 1.0\":\n is_binary = True\n endianness = \">\"\n else:\n if line != \"format binary_little_endian 1.0\":\n raise ReadError()\n is_binary = True\n endianness = \"<\"\n\n # read header\n line = _next_line(f)\n num_verts = 0\n num_cells = 0\n point_data_formats = []\n point_data_names = []\n cell_data_names = []\n cell_data_dtypes = []\n while line != \"end_header\":\n m_vert = re.match(\"element vertex (\\\\d+)\", line)\n m_face = re.match(\"element face (\\\\d+)\", line)\n if line[:8] == \"obj_info\":\n line = _next_line(f)\n elif m_vert is not None:\n num_verts = int(m_vert.groups()[0])\n\n # read point data\n line = _next_line(f)\n while line[:8] == \"property\":\n m = re.match(\"property (.+) (.+)\", line)\n assert m is not None\n point_data_formats.append(m.groups()[0])\n point_data_names.append(m.groups()[1])\n line = _next_line(f)\n elif m_face is not None:\n num_cells = int(m_face.groups()[0])\n\n if num_cells < 0:\n raise ReadError(f\"Expected positive num_cells (got `{num_cells}`.\")\n\n # read property lists\n line = _next_line(f)\n # read cell data\n while line[:8] == \"property\":\n if line[:13] == \"property list\":\n m = re.match(\"property list (.+) (.+) (.+)\", line)\n assert m is not None\n cell_data_dtypes.append(tuple(m.groups()[:-1]))\n else:\n m = re.match(\"property (.+) (.+)\", line)\n assert m is not None\n cell_data_dtypes.append(m.groups()[0])\n cell_data_names.append(m.groups()[-1])\n line = _next_line(f)\n else:\n raise ReadError(\n \"Expected `element vertex` or `element face` or `obj_info`, \"\n f\"got `{line}`\"\n )\n\n if is_binary:\n mesh = _read_binary(\n f,\n endianness,\n point_data_names,\n point_data_formats,\n num_verts,\n num_cells,\n cell_data_names,\n cell_data_dtypes,\n )\n else:\n mesh = _read_ascii(\n f,\n point_data_names,\n point_data_formats,\n num_verts,\n num_cells,\n cell_data_names,\n cell_data_dtypes,\n )\n\n return mesh\n\n\ndef _read_ascii(\n f,\n point_data_names,\n point_data_formats,\n num_verts,\n num_cells,\n cell_data_names,\n cell_dtypes,\n):\n assert len(cell_data_names) == len(cell_dtypes)\n\n # assert that all formats are the same\n # Now read the data\n dtype = np.dtype(\n [\n (name, ply_to_numpy_dtype[fmt])\n for name, fmt in zip(point_data_names, point_data_formats)\n ]\n )\n pd = np.genfromtxt(f, max_rows=num_verts, dtype=dtype)\n\n # split off coordinate data and additional point data\n verts = []\n k = 0\n if point_data_names[0] == \"x\":\n verts.append(pd[\"x\"])\n k += 1\n if point_data_names[1] == \"y\":\n verts.append(pd[\"y\"])\n k += 1\n if point_data_names[2] == \"z\":\n verts.append(pd[\"z\"])\n k += 1\n verts = np.column_stack(verts)\n\n point_data = {\n point_data_names[i]: pd[point_data_names[i]]\n for i in range(k, len(point_data_names))\n }\n cell_data = {}\n\n cell_blocks = []\n\n for k in range(num_cells):\n line = f.readline().decode().strip()\n data = line.split()\n\n if k == 0:\n # initialize the cell data arrays\n i = 0\n cell_data = {}\n for name, dtype in zip(cell_data_names, cell_dtypes):\n if name == \"vertex_indices\":\n n = int(data[i])\n i += n + 1\n else:\n cell_data[name] = collections.defaultdict(list)\n i += 1\n\n # go over the line\n i = 0\n n = None\n for name, dtype in zip(cell_data_names, cell_dtypes):\n if name == \"vertex_indices\":\n idx_dtype, value_dtype = dtype\n n = ply_to_numpy_dtype[idx_dtype](data[i])\n dtype = ply_to_numpy_dtype[value_dtype]\n idx = dtype(data[i + 1 : i + n + 1])\n if len(cell_blocks) == 0 or len(cell_blocks[-1][1][-1]) != n:\n cell_blocks.append((cell_type_from_count(n), [idx]))\n else:\n cell_blocks[-1][1].append(idx)\n i += n + 1\n else:\n dtype = ply_to_numpy_dtype[dtype]\n # use n from vertex_indices\n assert n is not None\n cell_data[name][n] += [dtype(data[j]) for j in range(i, i + 1)]\n i += 1\n\n cell_data = {\n key: [np.array(v) for v in value.values()] for key, value in cell_data.items()\n }\n\n return Mesh(verts, cell_blocks, point_data=point_data, cell_data=cell_data)\n\n\ndef _read_binary(\n f,\n endianness,\n point_data_names,\n formats,\n num_verts,\n num_cells,\n cell_data_names,\n cell_data_dtypes,\n):\n ply_to_numpy_dtype_string = {\n \"uchar\": \"i1\",\n \"uint\": \"u4\",\n \"uint8\": \"u1\",\n \"uint16\": \"u2\",\n \"uint32\": \"u4\",\n \"uint64\": \"u8\",\n \"int\": \"i4\",\n \"int8\": \"i1\",\n \"int32\": \"i4\",\n \"int64\": \"i8\",\n \"float\": \"f4\",\n \"float32\": \"f4\",\n \"double\": \"f8\",\n }\n\n # read point data\n dtype = [\n (name, endianness + ply_to_numpy_dtype_string[fmt])\n for name, fmt in zip(point_data_names, formats)\n ]\n point_data = np.frombuffer(\n f.read(num_verts * np.dtype(dtype).itemsize), dtype=dtype\n )\n verts = np.column_stack([point_data[\"x\"], point_data[\"y\"], point_data[\"z\"]])\n point_data = {\n name: point_data[name]\n for name in point_data_names\n if name not in [\"x\", \"y\", \"z\"]\n }\n\n # Convert strings to proper numpy dtypes\n dts = [\n (\n endianness + ply_to_numpy_dtype_string[dtype[0]],\n endianness + ply_to_numpy_dtype_string[dtype[1]],\n )\n if isinstance(dtype, tuple)\n else endianness + ply_to_numpy_dtype_string[dtype]\n for dtype in cell_data_dtypes\n ]\n\n # memoryviews can be sliced and passed around without copying. However, the\n # `bytearray()` call here redundantly copies so that the final output arrays\n # are writeable.\n buffer = memoryview(bytearray(f.read()))\n buffer_position = 0\n\n cell_data = {}\n for (name, dt) in zip(cell_data_names, dts):\n if isinstance(dt, tuple):\n buffer_increment, cell_data[name] = _read_binary_list(\n buffer[buffer_position:], dt[0], dt[1], num_cells, endianness\n )\n else:\n buffer_increment = np.dtype(dt).itemsize\n cell_data[name] = np.frombuffer(\n buffer[buffer_position : buffer_position + buffer_increment], dtype=dt\n )[0]\n buffer_position += buffer_increment\n\n cells = cell_data.pop(\"vertex_indices\", [])\n\n return Mesh(verts, cells, point_data=point_data, cell_data=cell_data)\n\n\ndef _read_binary_list(buffer, count_dtype, data_dtype, num_cells, endianness):\n \"\"\"Parse a ply ragged list into a :class:`CellBlock` for each change in row\n length. The only way to know how many bytes the list takes up is to parse\n it. Hence this function also returns the number of bytes consumed.\n \"\"\"\n count_dtype, data_dtype = np.dtype(count_dtype), np.dtype(data_dtype)\n count_itemsize = count_dtype.itemsize\n data_itemsize = data_dtype.itemsize\n byteorder = \"little\" if endianness == \"<\" else \"big\"\n\n # Firstly, walk the buffer to extract all start and end ids (in bytes) of\n # each row into `byte_starts_ends`. Using `np.fromiter(generator)` is\n # 2-3x faster than list comprehension or manually populating an array with\n # a for loop. This is still very much the bottleneck - might be worth\n # ctype-ing in future?\n def parse_ragged(start, num_cells):\n at = start\n yield at\n for _ in range(num_cells):\n count = int.from_bytes(buffer[at : at + count_itemsize], byteorder)\n at += count * data_itemsize + count_itemsize\n yield at\n\n # Row `i` is given by `buffer[byte_starts_ends[i]: byte_starts_ends[i+1]]`.\n byte_starts_ends = np.fromiter(parse_ragged(0, num_cells), np.intp, num_cells + 1)\n\n # Next, find where the row length changes and list the (start, end) row ids\n # of each homogeneous block into `block_bounds`.\n row_lengths = np.diff(byte_starts_ends)\n count_changed_ids = np.nonzero(np.diff(row_lengths))[0] + 1\n\n block_bounds = []\n start = 0\n for end in count_changed_ids:\n block_bounds.append((start, end))\n start = end\n block_bounds.append((start, len(byte_starts_ends) - 1))\n\n # Finally, parse each homogeneous block. Constructing an appropriate\n # `block_dtype` to include the initial counts in each row avoids any\n # wasteful copy operations.\n blocks = []\n for (start, end) in block_bounds:\n if start == end:\n # This should only happen if the element was empty to begin with.\n continue\n block_buffer = buffer[byte_starts_ends[start] : byte_starts_ends[end]]\n cells_per_row = (row_lengths[start] - count_itemsize) // data_itemsize\n block_dtype = np.dtype(\n [(\"count\", count_dtype), (\"data\", data_dtype * cells_per_row)]\n )\n cells = np.frombuffer(block_buffer, dtype=block_dtype)[\"data\"]\n\n cell_type = cell_type_from_count(cells.shape[1])\n\n blocks.append(CellBlock(cell_type, cells))\n\n return byte_starts_ends[-1], blocks\n\n\ndef write(filename, mesh: Mesh, binary: bool = True): # noqa: C901\n\n with open_file(filename, \"wb\") as fh:\n fh.write(b\"ply\\n\")\n\n if binary:\n fh.write(f\"format binary_{sys.byteorder}_endian 1.0\\n\".encode())\n else:\n fh.write(b\"format ascii 1.0\\n\")\n\n now = datetime.datetime.now().isoformat()\n fh.write(f\"comment Created by meshio v{__version__}, {now}\\n\".encode())\n\n # counts\n fh.write(f\"element vertex {mesh.points.shape[0]:d}\\n\".encode())\n #\n dim_names = [\"x\", \"y\", \"z\"]\n # From <https://en.wikipedia.org/wiki/PLY_(file_format)>:\n #\n # > The type can be specified with one of char uchar short ushort int uint float\n # > double, or one of int8 uint8 int16 uint16 int32 uint32 float32 float64.\n #\n # We're adding [u]int64 here.\n type_name_table = {\n np.dtype(np.int8): \"int8\",\n np.dtype(np.int16): \"int16\",\n np.dtype(np.int32): \"int32\",\n np.dtype(np.int64): \"int64\",\n np.dtype(np.uint8): \"uint8\",\n np.dtype(np.uint16): \"uint16\",\n np.dtype(np.uint32): \"uint32\",\n np.dtype(np.uint64): \"uint64\",\n np.dtype(np.float32): \"float\",\n np.dtype(np.float64): \"double\",\n }\n for k in range(mesh.points.shape[1]):\n type_name = type_name_table[mesh.points.dtype]\n fh.write(f\"property {type_name} {dim_names[k]}\\n\".encode())\n\n pd = []\n for key, value in mesh.point_data.items():\n if len(value.shape) > 1:\n warnings.warn(\n \"PLY writer doesn't support multidimensional point data yet. \"\n f\"Skipping {key}.\"\n )\n continue\n type_name = type_name_table[value.dtype]\n fh.write(f\"property {type_name} {key}\\n\".encode())\n pd.append(value)\n\n num_cells = 0\n legal_cell_types = [\"vertex\", \"line\", \"triangle\", \"quad\", \"polygon\"]\n for cell_block in mesh.cells:\n if cell_block.type in legal_cell_types:\n num_cells += cell_block.data.shape[0]\n\n if num_cells > 0:\n fh.write(f\"element face {num_cells:d}\\n\".encode())\n\n # possibly cast down to int32\n # TODO don't alter the mesh data\n has_cast = False\n for k, cell_block in enumerate(mesh.cells):\n if cell_block.data.dtype == np.int64:\n has_cast = True\n mesh.cells[k] = CellBlock(\n cell_block.type, cell_block.data.astype(np.int32)\n )\n\n if has_cast:\n warnings.warn(\n \"PLY doesn't support 64-bit integers. Casting down to 32-bit.\"\n )\n\n # assert that all cell dtypes are equal\n cell_dtype = None\n for cell_block in mesh.cells:\n if cell_dtype is None:\n cell_dtype = cell_block.data.dtype\n if cell_block.data.dtype != cell_dtype:\n raise WriteError()\n\n if cell_dtype is not None:\n ply_type = numpy_to_ply_dtype[cell_dtype]\n fh.write(f\"property list uint8 {ply_type} vertex_indices\\n\".encode())\n\n # TODO other cell data\n fh.write(b\"end_header\\n\")\n\n if binary:\n # points and point_data\n out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd)\n fh.write(out.tobytes())\n\n # cells\n for cell_block in mesh.cells:\n if cell_block.type not in legal_cell_types:\n warnings.warn(\n f'cell_type \"{cell_block.type}\" is not supported by PLY format '\n \"- skipping\"\n )\n continue\n # prepend with count\n d = cell_block.data\n out = np.rec.fromarrays(\n [np.broadcast_to(np.uint8(d.shape[1]), d.shape[0]), *d.T]\n )\n fh.write(out.tobytes())\n else:\n # vertices\n # np.savetxt(fh, mesh.points, \"%r\") # slower\n # out = np.column_stack([mesh.points] + list(mesh.point_data.values()))\n out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd)\n fmt = \" \".join([\"{}\"] * len(out[0]))\n out = \"\\n\".join([fmt.format(*row) for row in out]) + \"\\n\"\n fh.write(out.encode())\n\n # cells\n for cell_block in mesh.cells:\n if cell_block.type not in legal_cell_types:\n warnings.warn(\n f'cell_type \"{cell_block.type}\" is not supported by PLY format '\n + \"- skipping\"\n )\n continue\n # if cell_type not in cell_type_to_count.keys():\n # continue\n d = cell_block.data\n out = np.column_stack(\n [np.full(d.shape[0], d.shape[1], dtype=d.dtype), d]\n )\n # savetxt is slower\n # np.savetxt(fh, out, \"%d %d %d %d\")\n fmt = \" \".join([\"{}\"] * out.shape[1])\n out = \"\\n\".join([fmt.format(*row) for row in out]) + \"\\n\"\n fh.write(out.encode())\n\n\nregister_format(\"ply\", [\".ply\"], read, {\"ply\": write})\n"
]
| [
[
"numpy.uint8",
"numpy.rec.fromarrays",
"numpy.dtype",
"numpy.genfromtxt",
"numpy.full",
"numpy.frombuffer",
"numpy.diff",
"numpy.column_stack",
"numpy.array"
]
]
|
betefaber/tracking-simulator | [
"a53bdabce55cac617e45417ca54f8b51a34c2547"
]
| [
"trackingsim/sim.py"
]
| [
"import threading\nimport logging\nimport time\nimport json\nimport numpy as np\nimport paho.mqtt.client as mqtt\nfrom geopy import Point\nfrom geopy import distance\n\n\nclass Simulator:\n MIN_SLEEP_TIME = 2 # 2 second\n MAX_SLEEP_TIME = 10 # 10 seconds\n MIN_DISPLACEMENT = 0 # 0 Km\n MAX_DISPLACEMENT = 0.01 # 0.01 Km\n MIN_BEARING = 0 # 0 degree\n MAX_BEARING = 360 # 360 degrees\n MAX_DISPLACEMENT_FROM_ORIGIN = 25 # 25 Km\n MEAN_TEMPERATURE = 90\n MEAN_RPM = 4000\n\n def __init__(self, host, port, tenant, device, latitude, longitude, movement):\n self.__logger = logging.getLogger('trackingsim.sim')\n self.__origin = Point(latitude, longitude)\n self.__current_position = Point(latitude, longitude)\n self.__mqttc = mqtt.Client(\"{}:{}\".format(tenant,device))\n self.__mqttc.username_pw_set(\"{}:{}\".format(tenant,device)) # not necessary with iotagent-mosca\n self.__mqttc.connect(host=host, port=port)\n self.__mqttc.loop_start()\n self.__topic = \"{0}:{1}/attrs\".format(tenant, device)\n # self.__topic = \"/{0}/{1}/attrs\".format(tenant, device) # iotagent-mosca topic\n self.__sleep = np.random.uniform(self.__class__.MIN_SLEEP_TIME, self.__class__.MAX_SLEEP_TIME)\n self.__logger.info(\"Starting simulation for device {0} with sleep time {1}\".format(device, self.__sleep))\n\n if movement == 'straight-line':\n self.__next_movement = self.__get_next_position_for_straight_line\n else:\n self.__next_movement = self.__get_next_random_position\n\n # bearing displacement used in the last movement. Start with random values\n self.__displacement = \\\n np.random.uniform(self.__class__.MIN_DISPLACEMENT, self.__class__.MAX_DISPLACEMENT)\n self.__bearing = \\\n np.random.uniform(self.__class__.MIN_BEARING, self.__class__.MAX_BEARING)\n\n def run(self):\n while True:\n data = dict()\n\n # gps\n data['gps'] = \"{0}, {1}\".format(str(self.__current_position.latitude),\n str(self.__current_position.longitude))\n\n # sinr\n data['sinr'] = self.__get_next_sinr()\n\n # temperature\n data['temperature'] = np.random.normal(self.MEAN_TEMPERATURE)\n\n # RPM\n data['rpm'] = np.random.normal(self.MEAN_RPM)\n\n # publish\n self.__logger.info(\"Publishing: {0}\".format(json.dumps(data)))\n self.__mqttc.publish(self.__topic, json.dumps(data))\n\n # sleep\n time.sleep(self.__sleep)\n\n # next position\n (self.__current_position, self.__displacement, self.__bearing) = \\\n self.__next_movement()\n\n def __get_next_random_position(self):\n # displacement\n displacement = np.random.uniform(self.__class__.MIN_DISPLACEMENT, self.__class__.MAX_DISPLACEMENT)\n\n # direction\n bearing = np.random.uniform(self.__class__.MIN_BEARING, self.__class__.MAX_BEARING)\n\n # next point\n next_position = distance.vincenty(kilometers=displacement).destination(self.__current_position, bearing)\n while (distance.vincenty(self.__origin, next_position).kilometers\n > self.__class__.MAX_DISPLACEMENT_FROM_ORIGIN):\n # displacement\n displacement = np.random.uniform(self.__class__.MIN_DISPLACEMENT, self.__class__.MAX_DISPLACEMENT)\n\n # direction\n bearing = np.random.uniform(self.__class__.MIN_BEARING, self.__class__.MAX_BEARING)\n\n # next point\n next_position = distance.vincenty(kilometers=displacement).destination(self.__current_position, bearing)\n\n return next_position, displacement, bearing\n\n def __get_next_position_for_straight_line(self):\n # displacement\n displacement = np.random.uniform(self.__class__.MIN_DISPLACEMENT, self.__class__.MAX_DISPLACEMENT)\n\n # direction\n bearing = self.__bearing\n\n # next point\n next_position = distance.vincenty(kilometers=displacement).destination(self.__current_position, bearing)\n if (distance.vincenty(self.__origin, next_position).kilometers\n > self.__class__.MAX_DISPLACEMENT_FROM_ORIGIN):\n\n # direction\n bearing = (self.__bearing + 180) % 360\n\n # next point\n next_position = distance.vincenty(kilometers=displacement).destination(self.__current_position, bearing)\n\n return next_position, displacement, bearing\n\n def __get_next_sinr(self):\n displacement_from_origin = distance.vincenty(self.__origin, self.__current_position).kilometers\n relative_displacement = displacement_from_origin / self.MAX_DISPLACEMENT_FROM_ORIGIN\n\n if relative_displacement < 0.15:\n sinr = np.random.uniform(20, 30)\n\n elif relative_displacement < 0.30:\n sinr = np.random.uniform(15, 20)\n\n elif relative_displacement < 0.45:\n sinr = np.random.uniform(10, 15)\n\n elif relative_displacement < 0.60:\n sinr = np.random.uniform(5, 10)\n\n elif relative_displacement < 0.75:\n sinr = np.random.uniform(2, 5)\n\n else:\n sinr = np.random.uniform(-1, 2)\n\n return sinr\n"
]
| [
[
"numpy.random.uniform",
"numpy.random.normal"
]
]
|
RikilG/Geometry-Algorithms | [
"7bdf25e425b93dc6955331a48980a4b4d8051a6d"
]
| [
"DCEL/plot.py"
]
| [
"#!/usr/bin/env python\n\nimport os\nimport sys\nimport random\nimport argparse\nimport traceback\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import Polygon\n\nf, ax = plt.subplots()\n\ndef plotPoly(file):\n lines = []\n lines = file.readlines()\n n = int(lines[0].strip().split()[0].strip());\n xy = []\n for i in range(1, n+1):\n coords = [ float(k) for k in lines[i].split() ]\n x1, y1, x2, y2 = coords\n # xy.append([x, y])\n plt.plot([x1, x2], [y1, y2], '--', linewidth=2, color='red')\n # polygon = Polygon(xy, edgecolor='red', facecolor=None, fill=None, linewidth='3')\n # ax.add_patch(polygon)\n\n\ndef main(file):\n xy = []\n for line in file:\n print(line, end='')\n if line == '\\n' or line[0] not in ['F', 'H']: continue\n if line[0] == 'F':\n if len(xy) == 0: continue\n polygon = Polygon(xy, color=[ random.uniform(0,1) for _ in range(3) ])\n ax.add_patch(polygon)\n xy = []\n continue\n line = line.replace('HEdge: ', '').replace(' -> ', ' ').replace(',', ' ').replace('(', ' ').replace(')', ' ').split()\n xy.append([line[0], line[1]])\n if len(xy) != 0:\n polygon = Polygon(xy, color=[ random.uniform(0,1) for _ in range(3) ])\n ax.add_patch(polygon)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='pipe points to stdin or provide file as argument to plot')\n parser.add_argument('outputFile', nargs='?', default=None, help='optional output to read from file. if provided, -o option is ignored')\n parser.add_argument('-i', '--input', default=None, help='optional input file which has the original polygon')\n parser.add_argument('-o', '--output', default=None, help='optional output file from the program, if not given, stdin is assumed')\n args = parser.parse_args()\n outFile = args.outputFile or args.output\n inFile = args.input\n if outFile == None:\n file = sys.stdin\n else:\n if not os.path.exists(outFile):\n print('provided output filepath is invalid')\n exit(1)\n file = open(outFile, 'r')\n if inFile != None and os.path.exists(inFile):\n inFile = open(inFile, 'r')\n try:\n main(file)\n if inFile != None: plotPoly(inFile)\n if inFile != None: print(f\"File name: {args.input}\")\n plt.plot()\n plt.show()\n except Exception as e:\n file.close()\n if inFile != None: inFile.close()\n print(e)\n print(traceback.format_exc())\n print('Encountered error, quitting...')\n"
]
| [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
obliviateandsurrender/namd2helper | [
"392403f0fca3a2a69913a866240e7f27d991c324"
]
| [
"plotter.py"
]
| [
"import matplotlib.pyplot as plt\nf = open('da_us.psf',\"r\")\na = f.read().split('#')[1]\na = a.split('\\n')[1:-1]\nx = []\ny = []\nfor i in a:\n b = i.split('\\t')\n x.append(float(b[0]))\n y.append(float(b[1]))\nplt.plot(x,y)\nplt.xlabel(\"Distance (A)\")\nplt.ylabel(\"Free Energy (kcal/mol)\")\nplt.show()"
]
| [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
MartaYang/SEGA | [
"ad843b51508e87c0b181a01a397f12b13d5ccbbb"
]
| [
"data/mini_imagenet.py"
]
| [
"# Dataloader of Gidaris & Komodakis, CVPR 2018\r\n# Adapted from:\r\n# https://github.com/gidariss/FewShotWithoutForgetting/blob/master/dataloader.py\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport os.path\r\nimport numpy as np\r\nimport random\r\nimport pickle\r\nimport json\r\nimport math\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nimport torchvision\r\nimport torchvision.datasets as datasets\r\nimport torchvision.transforms as transforms\r\nimport torchnet as tnt\r\n\r\nfrom PIL import Image\r\nfrom PIL import ImageEnhance\r\n\r\nfrom pdb import set_trace as breakpoint\r\n\r\n\r\n# TODO: Set the appropriate paths of the datasets here.\r\n_MINI_IMAGENET_DATASET_DIR = '/data/FSLDatasets/MiniImagenet'\r\n\r\ndef buildLabelIndex(labels):\r\n label2inds = {}\r\n for idx, label in enumerate(labels):\r\n if label not in label2inds:\r\n label2inds[label] = []\r\n label2inds[label].append(idx)\r\n\r\n return label2inds\r\n\r\n\r\ndef load_data(file):\r\n try:\r\n with open(file, 'rb') as fo:\r\n data = pickle.load(fo)\r\n return data\r\n except:\r\n with open(file, 'rb') as f:\r\n u = pickle._Unpickler(f)\r\n u.encoding = 'latin1'\r\n data = u.load()\r\n return data\r\n\r\nclass MiniImageNet(data.Dataset):\r\n def __init__(self, phase='train', do_not_use_random_transf=False):\r\n\r\n self.base_folder = 'miniImagenet'\r\n assert(phase=='train' or phase=='val' or phase=='test')\r\n self.phase = phase\r\n self.name = 'MiniImageNet_' + phase\r\n\r\n print('Loading mini ImageNet dataset - phase {0}'.format(phase))\r\n file_train_categories_train_phase = os.path.join(\r\n _MINI_IMAGENET_DATASET_DIR,\r\n 'miniImageNet_category_split_train_phase_train.pickle')\r\n file_train_categories_val_phase = os.path.join(\r\n _MINI_IMAGENET_DATASET_DIR,\r\n 'miniImageNet_category_split_train_phase_val.pickle')\r\n file_train_categories_test_phase = os.path.join(\r\n _MINI_IMAGENET_DATASET_DIR,\r\n 'miniImageNet_category_split_train_phase_test.pickle')\r\n file_val_categories_val_phase = os.path.join(\r\n _MINI_IMAGENET_DATASET_DIR,\r\n 'miniImageNet_category_split_val.pickle')\r\n file_test_categories_test_phase = os.path.join(\r\n _MINI_IMAGENET_DATASET_DIR,\r\n 'miniImageNet_category_split_test.pickle')\r\n\r\n if self.phase=='train':\r\n # During training phase we only load the training phase images\r\n # of the training categories (aka base categories).\r\n data_train = load_data(file_train_categories_train_phase)\r\n self.data = data_train['data']\r\n self.labels = data_train['labels']\r\n\r\n self.label2ind = buildLabelIndex(self.labels)\r\n self.labelIds = sorted(self.label2ind.keys())\r\n self.num_cats = len(self.labelIds)\r\n self.labelIds_base = self.labelIds\r\n self.num_cats_base = len(self.labelIds_base)\r\n\r\n elif self.phase=='val' or self.phase=='test':\r\n if self.phase=='test':\r\n # load data that will be used for evaluating the recognition\r\n # accuracy of the base categories.\r\n data_base = load_data(file_train_categories_test_phase)\r\n # load data that will be use for evaluating the few-shot recogniton\r\n # accuracy on the novel categories.\r\n data_novel = load_data(file_test_categories_test_phase)\r\n else: # phase=='val'\r\n # load data that will be used for evaluating the recognition\r\n # accuracy of the base categories.\r\n data_base = load_data(file_train_categories_val_phase)\r\n # load data that will be use for evaluating the few-shot recogniton\r\n # accuracy on the novel categories.\r\n data_novel = load_data(file_val_categories_val_phase)\r\n\r\n self.data = np.concatenate(\r\n [data_base['data'], data_novel['data']], axis=0)\r\n self.labels = data_base['labels'] + data_novel['labels']\r\n\r\n self.label2ind = buildLabelIndex(self.labels)\r\n self.labelIds = sorted(self.label2ind.keys())\r\n self.num_cats = len(self.labelIds)\r\n\r\n self.labelIds_base = buildLabelIndex(data_base['labels']).keys()\r\n self.labelIds_novel = buildLabelIndex(data_novel['labels']).keys()\r\n self.num_cats_base = len(self.labelIds_base)\r\n self.num_cats_novel = len(self.labelIds_novel)\r\n intersection = set(self.labelIds_base) & set(self.labelIds_novel)\r\n assert(len(intersection) == 0)\r\n else:\r\n raise ValueError('Not valid phase {0}'.format(self.phase))\r\n\r\n mean_pix = [x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]]\r\n std_pix = [x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]]\r\n normalize = transforms.Normalize(mean=mean_pix, std=std_pix)\r\n\r\n if (self.phase=='test' or self.phase=='val') or (do_not_use_random_transf==True):\r\n self.transform = transforms.Compose([\r\n # transforms.Resize(92),\r\n # transforms.CenterCrop(84),\r\n lambda x: np.asarray(x),\r\n transforms.ToTensor(),\r\n normalize\r\n ])\r\n else:\r\n self.transform = transforms.Compose([\r\n transforms.RandomResizedCrop(84),\r\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\r\n transforms.RandomHorizontalFlip(),\r\n lambda x: np.asarray(x),\r\n transforms.ToTensor(),\r\n normalize,\r\n transforms.RandomErasing(0.5)\r\n ])\r\n \r\n def __getitem__(self, index):\r\n img, label = self.data[index], self.labels[index]\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img = Image.fromarray(img)\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n return img, label\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n\r\nclass FewShotDataloader():\r\n def __init__(self,\r\n dataset,\r\n nKnovel=5, # number of novel categories.\r\n nKbase=-1, # number of base categories.\r\n nExemplars=1, # number of training examples per novel category.\r\n nTestNovel=15*5, # number of test examples for all the novel categories.\r\n nTestBase=15*5, # number of test examples for all the base categories.\r\n batch_size=1, # number of training episodes per batch.\r\n num_workers=4,\r\n epoch_size=2000, # number of batches per epoch.\r\n ):\r\n\r\n self.dataset = dataset\r\n self.phase = self.dataset.phase\r\n max_possible_nKnovel = (self.dataset.num_cats_base if self.phase=='train'\r\n else self.dataset.num_cats_novel)\r\n assert(nKnovel >= 0 and nKnovel < max_possible_nKnovel)\r\n self.nKnovel = nKnovel\r\n\r\n max_possible_nKbase = self.dataset.num_cats_base\r\n nKbase = nKbase if nKbase >= 0 else max_possible_nKbase\r\n if self.phase=='train' and nKbase > 0:\r\n nKbase -= self.nKnovel\r\n max_possible_nKbase -= self.nKnovel\r\n\r\n assert(nKbase >= 0 and nKbase <= max_possible_nKbase)\r\n self.nKbase = nKbase\r\n\r\n self.nExemplars = nExemplars\r\n self.nTestNovel = nTestNovel\r\n self.nTestBase = nTestBase\r\n self.batch_size = batch_size\r\n self.epoch_size = epoch_size\r\n self.num_workers = num_workers\r\n self.is_eval_mode = (self.phase=='test') or (self.phase=='val')\r\n\r\n def sampleImageIdsFrom(self, cat_id, sample_size=1):\r\n \"\"\"\r\n Samples `sample_size` number of unique image ids picked from the\r\n category `cat_id` (i.e., self.dataset.label2ind[cat_id]).\r\n\r\n Args:\r\n cat_id: a scalar with the id of the category from which images will\r\n be sampled.\r\n sample_size: number of images that will be sampled.\r\n\r\n Returns:\r\n image_ids: a list of length `sample_size` with unique image ids.\r\n \"\"\"\r\n assert(cat_id in self.dataset.label2ind)\r\n assert(len(self.dataset.label2ind[cat_id]) >= sample_size)\r\n # Note: random.sample samples elements without replacement.\r\n return random.sample(self.dataset.label2ind[cat_id], sample_size)\r\n\r\n def sampleCategories(self, cat_set, sample_size=1):\r\n \"\"\"\r\n Samples `sample_size` number of unique categories picked from the\r\n `cat_set` set of categories. `cat_set` can be either 'base' or 'novel'.\r\n\r\n Args:\r\n cat_set: string that specifies the set of categories from which\r\n categories will be sampled.\r\n sample_size: number of categories that will be sampled.\r\n\r\n Returns:\r\n cat_ids: a list of length `sample_size` with unique category ids.\r\n \"\"\"\r\n if cat_set=='base':\r\n labelIds = self.dataset.labelIds_base\r\n elif cat_set=='novel':\r\n labelIds = self.dataset.labelIds_novel\r\n else:\r\n raise ValueError('Not recognized category set {}'.format(cat_set))\r\n\r\n assert(len(labelIds) >= sample_size)\r\n # return sample_size unique categories chosen from labelIds set of\r\n # categories (that can be either self.labelIds_base or self.labelIds_novel)\r\n # Note: random.sample samples elements without replacement.\r\n return random.sample(labelIds, sample_size)\r\n\r\n def sample_base_and_novel_categories(self, nKbase, nKnovel):\r\n \"\"\"\r\n Samples `nKbase` number of base categories and `nKnovel` number of novel\r\n categories.\r\n\r\n Args:\r\n nKbase: number of base categories\r\n nKnovel: number of novel categories\r\n\r\n Returns:\r\n Kbase: a list of length 'nKbase' with the ids of the sampled base\r\n categories.\r\n Knovel: a list of lenght 'nKnovel' with the ids of the sampled novel\r\n categories.\r\n \"\"\"\r\n if self.is_eval_mode:\r\n assert(nKnovel <= self.dataset.num_cats_novel)\r\n # sample from the set of base categories 'nKbase' number of base\r\n # categories.\r\n Kbase = sorted(self.sampleCategories('base', nKbase))\r\n # sample from the set of novel categories 'nKnovel' number of novel\r\n # categories.\r\n Knovel = sorted(self.sampleCategories('novel', nKnovel))\r\n else:\r\n # sample from the set of base categories 'nKnovel' + 'nKbase' number\r\n # of categories.\r\n cats_ids = self.sampleCategories('base', nKnovel+nKbase)\r\n assert(len(cats_ids) == (nKnovel+nKbase))\r\n # Randomly pick 'nKnovel' number of fake novel categories and keep\r\n # the rest as base categories.\r\n random.shuffle(cats_ids)\r\n Knovel = sorted(cats_ids[:nKnovel])\r\n Kbase = sorted(cats_ids[nKnovel:])\r\n\r\n return Kbase, Knovel\r\n\r\n def sample_test_examples_for_base_categories(self, Kbase, nTestBase):\r\n \"\"\"\r\n Sample `nTestBase` number of images from the `Kbase` categories.\r\n\r\n Args:\r\n Kbase: a list of length `nKbase` with the ids of the categories from\r\n where the images will be sampled.\r\n nTestBase: the total number of images that will be sampled.\r\n\r\n Returns:\r\n Tbase: a list of length `nTestBase` with 2-element tuples. The 1st\r\n element of each tuple is the image id that was sampled and the\r\n 2nd elemend is its category label (which is in the range\r\n [0, len(Kbase)-1]).\r\n \"\"\"\r\n Tbase = []\r\n if len(Kbase) > 0:\r\n # Sample for each base category a number images such that the total\r\n # number sampled images of all categories to be equal to `nTestBase`.\r\n KbaseIndices = np.random.choice(\r\n np.arange(len(Kbase)), size=nTestBase, replace=True)\r\n KbaseIndices, NumImagesPerCategory = np.unique(\r\n KbaseIndices, return_counts=True)\r\n\r\n for Kbase_idx, NumImages in zip(KbaseIndices, NumImagesPerCategory):\r\n imd_ids = self.sampleImageIdsFrom(\r\n Kbase[Kbase_idx], sample_size=NumImages)\r\n Tbase += [(img_id, Kbase_idx) for img_id in imd_ids]\r\n\r\n assert(len(Tbase) == nTestBase)\r\n\r\n return Tbase\r\n\r\n def sample_train_and_test_examples_for_novel_categories(\r\n self, Knovel, nTestNovel, nExemplars, nKbase):\r\n \"\"\"Samples train and test examples of the novel categories.\r\n\r\n Args:\r\n \t Knovel: a list with the ids of the novel categories.\r\n nTestNovel: the total number of test images that will be sampled\r\n from all the novel categories.\r\n nExemplars: the number of training examples per novel category that\r\n will be sampled.\r\n nKbase: the number of base categories. It is used as offset of the\r\n category index of each sampled image.\r\n\r\n Returns:\r\n Tnovel: a list of length `nTestNovel` with 2-element tuples. The\r\n 1st element of each tuple is the image id that was sampled and\r\n the 2nd element is its category label (which is in the range\r\n [nKbase, nKbase + len(Knovel) - 1]).\r\n Exemplars: a list of length len(Knovel) * nExemplars of 2-element\r\n tuples. The 1st element of each tuple is the image id that was\r\n sampled and the 2nd element is its category label (which is in\r\n the ragne [nKbase, nKbase + len(Knovel) - 1]).\r\n \"\"\"\r\n\r\n if len(Knovel) == 0:\r\n return [], []\r\n\r\n nKnovel = len(Knovel)\r\n Tnovel = []\r\n Exemplars = []\r\n assert((nTestNovel % nKnovel) == 0)\r\n nEvalExamplesPerClass = int(nTestNovel / nKnovel)\r\n\r\n for Knovel_idx in range(len(Knovel)):\r\n imd_ids = self.sampleImageIdsFrom(\r\n Knovel[Knovel_idx],\r\n sample_size=(nEvalExamplesPerClass + nExemplars))\r\n\r\n imds_tnovel = imd_ids[:nEvalExamplesPerClass]\r\n imds_ememplars = imd_ids[nEvalExamplesPerClass:]\r\n\r\n Tnovel += [(img_id, nKbase+Knovel_idx) for img_id in imds_tnovel]\r\n Exemplars += [(img_id, nKbase+Knovel_idx) for img_id in imds_ememplars]\r\n assert(len(Tnovel) == nTestNovel)\r\n assert(len(Exemplars) == len(Knovel) * nExemplars)\r\n random.shuffle(Exemplars)\r\n\r\n return Tnovel, Exemplars\r\n\r\n def sample_episode(self):\r\n \"\"\"Samples a training episode.\"\"\"\r\n nKnovel = self.nKnovel\r\n nKbase = self.nKbase\r\n nTestNovel = self.nTestNovel\r\n nTestBase = self.nTestBase\r\n nExemplars = self.nExemplars\r\n\r\n Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)\r\n Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)\r\n Tnovel, Exemplars = self.sample_train_and_test_examples_for_novel_categories(\r\n Knovel, nTestNovel, nExemplars, nKbase)\r\n\r\n # concatenate the base and novel category examples.\r\n Test = Tbase + Tnovel\r\n random.shuffle(Test)\r\n Kall = Kbase + Knovel\r\n\r\n return Exemplars, Test, Kall, nKbase\r\n\r\n def createExamplesTensorData(self, examples):\r\n \"\"\"\r\n Creates the examples image and label tensor data.\r\n\r\n Args:\r\n examples: a list of 2-element tuples, each representing a\r\n train or test example. The 1st element of each tuple\r\n is the image id of the example and 2nd element is the\r\n category label of the example, which is in the range\r\n [0, nK - 1], where nK is the total number of categories\r\n (both novel and base).\r\n\r\n Returns:\r\n images: a tensor of shape [nExamples, Height, Width, 3] with the\r\n example images, where nExamples is the number of examples\r\n (i.e., nExamples = len(examples)).\r\n labels: a tensor of shape [nExamples] with the category label\r\n of each example.\r\n \"\"\"\r\n images = torch.stack(\r\n [self.dataset[img_idx][0] for img_idx, _ in examples], dim=0)\r\n labels = torch.LongTensor([label for _, label in examples])\r\n return images, labels\r\n\r\n def get_iterator(self, epoch=0):\r\n rand_seed = epoch\r\n random.seed(rand_seed)\r\n np.random.seed(rand_seed)\r\n def load_function(iter_idx):\r\n Exemplars, Test, Kall, nKbase = self.sample_episode()\r\n Xt, Yt = self.createExamplesTensorData(Test)\r\n Kall = torch.LongTensor(Kall)\r\n if len(Exemplars) > 0:\r\n Xe, Ye = self.createExamplesTensorData(Exemplars)\r\n return Xe, Ye, Xt, Yt, Kall, nKbase\r\n else:\r\n return Xt, Yt, Kall, nKbase\r\n\r\n tnt_dataset = tnt.dataset.ListDataset(\r\n elem_list=range(self.epoch_size), load=load_function)\r\n data_loader = tnt_dataset.parallel(\r\n batch_size=self.batch_size,\r\n num_workers=(0 if self.is_eval_mode else self.num_workers),\r\n shuffle=(False if self.is_eval_mode else True))\r\n\r\n return data_loader\r\n\r\n def __call__(self, epoch=0):\r\n return self.get_iterator(epoch)\r\n\r\n def __len__(self):\r\n return int(self.epoch_size / self.batch_size)\r\n"
]
| [
[
"torch.LongTensor",
"numpy.random.seed",
"numpy.unique",
"numpy.asarray",
"numpy.concatenate",
"torch.stack"
]
]
|
pyMatJ/pymeasure | [
"cf790b00e37bc574b356de4f7eaf9466ffa3710d"
]
| [
"pymeasure/instruments/keithley/keithley2600.py"
]
| [
"# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\nimport time\nimport numpy as np\nfrom pymeasure.instruments import Instrument\n\n# Setup logging\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\nclass Keithley2600(Instrument):\n \"\"\"Represents the Keithley 2600 series (channel A and B) SourceMeter\"\"\"\n def __init__(self, adapter, **kwargs):\n super(Keithley2600, self).__init__(\n adapter,\n \"Keithley 2600 SourceMeter\",\n **kwargs\n )\n self.ChA = Channel(self, 'a')\n self.ChB = Channel(self, 'b')\n\n\nclass Channel(object):\n\n @property\n def source_output(self):\n if self.instrument.ask('print(smu%s.source.output)' % self.channel)==1:\n return 'output is ON'\n elif self.instrument.ask('print(smu%s.source.output)' % self.channel)==0:\n return 'output is OFF'\n @source_output.setter\n def source_output(self, state):\n if state in ['on', 'off']:\n statedict={\n 'off': 0,\n 'on': 1\n }\n self.instrument.write('smu%s.source.output=%d' % (self.channel,statedict[state]))\n else:\n raise ValueError('State has to be either on or off!')\n\n @property\n def source_mode(self):\n return self.instrument.ask('print(smu%s.source.func)' % self.channel)\n @source_mode.setter\n def source_mode(self, mode):\n if mode in ['current', 'voltage']:\n modedict={\n \"voltage\": 1,\n \"current\": 0\n }\n self.instrument.write('smu%s.source.func=%d' % (self.channel, modedict[mode]))\n else:\n raise ValueError('Mode has to be either current or voltage!')\n\n @property\n def measure_nplc(self):\n return self.instrument.ask('print(smu%s.measure.nplc)' % self.channel)\n @measure_nplc.setter\n def measure_nplc(self, nplc):\n if nplc in range(0.001,25):\n self.instrument.write('smu%s.measure.nplc=%f' % (self.channel, nplc))\n else:\n raise ValueError('NPLC has to be in the interval [0.001,25]!')\n ###############\n # Current (A) #\n ###############\n @property\n def current(self):\n return float(self.instrument.ask('print(smu%s.measure.i())' % self.channel))\n\n @property\n def source_current(self):\n return float(self.instrument.ask('print(smu%s.source.leveli)' % self.channel))\n @source_current.setter\n def source_current(self, current):\n self.instrument.write('smu%s.source.leveli=%f' % (self.channel, current))\n\n @property\n def compliance_current(self):\n return float(self.instrument.ask('print(smu%s.source.limiti)' % self.channel))\n @compliance_current.setter\n def compliance_current(self, complvalue):\n self.instrument.write('smu%s.source.limiti=%f' % (self.channel, complvalue))\n\n @property\n def source_current_range(self):\n return float(self.instrument.ask('print(smu%s.source.rangei' % self.channel))\n @source_current_range.setter\n def source_current_range(self, rng):\n if rng in range(-1.5,1.5):\n self.instrument.write('smu%s.source.rangei=%f' % (self.channel, range))\n else:\n raise ValueError('Source current range has to be in the interval [-1.5,1.5]!')\n\n @property\n def current_range(self):\n return float(self.instrument.ask('print(smu%s.measure.rangei' % self.channel))\n @current_range.setter\n def current_range(self, rng):\n if rng in range(-1.5,1.5):\n self.instrument.write('smu%s.measure.rangei=%f' % (self.channel, rng))\n else:\n raise ValueError('Source current range has to be in the interval [-1.5,1.5]!')\n ###############\n # Voltage (V) #\n ###############\n\n @property\n def voltage(self):\n return float(self.instrument.ask('print(smu%s.measure.v())' % self.channel))\n @property\n def source_voltage(self):\n return float(self.instrument.ask('print(smu%s.source.levelv)' % self.channel))\n @source_voltage.setter\n def source_voltage(self, voltage):\n self.instrument.write('smu%s.source.levelv=%f' % (self.channel,voltage))\n\n @property\n def compliance_voltage(self):\n return float(self.instrument.ask('print(smu%s.source.limitv)' % self.channel))\n @compliance_voltage.setter\n def compliance_voltage(self, complvalue):\n if complvalue in range(-200,200):\n self.instrument.write('smu%s.source.limitv=%f' % (self.channel, complvalue))\n else:\n raise ValueError('Compliance voltage must be in the range [-200,200]!')\n\n @property\n def source_voltage_range(self):\n return float(self.instrument.ask('print(smu%s.source.rangev' % self.channel))\n @source_voltage_range.setter\n def source_voltage_range(self, rng):\n if rng in range(-200,200):\n self.instrument.write('smu%s.source.rangev=%f' % (self.channel, rng))\n else:\n raise ValueError('Source voltage range has to be in the interval [-200,200]!')\n\n @property\n def voltage_range(self):\n return float(self.instrument.ask('print(smu%s.measure.rangev' % self.channel))\n @voltage_range.setter\n def voltage_range(self, rng):\n if rng in range(-200,200):\n self.instrument.write('smu%s.measure.rangev=%f' % (self.channel, rng))\n else:\n raise ValueError('Source current range has to be in the interval [-200,200]!')\n ####################\n # Resistance (Ohm) #\n ####################\n\n @property\n def resistance(self):\n return float(self.instrument.ask('print(smu%s.measure.r())' % self.channel))\n\n @property\n def wires_mode(self):\n return self.instrument.ask('print(smu%s.sense)' % self.channel)\n @wires_mode.setter\n def wires_mode(self, mode):\n if mode in ['4', '2']:\n modedict = {\n \"4\": 1,\n \"2\": 0\n }\n self.instrument.write('smu%s.sense=%d' % (self.channel, modedict[mode]))\n else:\n raise ValueError('Wire mode has to be either 4 (wires) or 2 (wires)!')\n ###########\n # Methods #\n ###########\n\n def __init__(self, instrument, channel):\n self.instrument = instrument\n self.channel = channel\n\n def ask(self, cmd):\n return self.instrument.ask(cmd)\n\n def write(self, cmd):\n self.instrument.write(cmd)\n\n def measure_voltage(self, nplc=1, voltage=21.0, auto_range=True):\n \"\"\" Configures the measurement of voltage.\n :param nplc: Number of power line cycles (NPLC) from 0.001 to 25\n :param voltage: Upper limit of voltage in Volts, from -200 V to 200 V\n :param auto_range: Enables auto_range if True, else uses the set voltage\n \"\"\"\n log.info(\"%s is measuring voltage.\" % self.channel)\n self.write('smu%s.measure.v()' % self.channel)\n self.write('smu%s.measure.nplc=%f' % (self.channel, nplc))\n if auto_range:\n self.write('smu%s.measure.autorangev=1' % self.channel)\n else:\n self.voltage_range = voltage\n self.check_errors()\n\n def measure_current(self, nplc=1, current=1.05e-4, auto_range=True):\n \"\"\" Configures the measurement of current.\n :param nplc: Number of power line cycles (NPLC) from 0.001 to 25\n :param current: Upper limit of current in Amps, from -1.5 A to 1.5 A\n :param auto_range: Enables auto_range if True, else uses the set current\n \"\"\"\n log.info(\"%s is measuring current.\" % self.channel)\n self.write('smu%s.measure.i()' % self.channel)\n self.write('smu%s.measure.nplc=%f' % (self.channel, nplc))\n if auto_range:\n self.write('smu%s.measure.autorangei=1' % self.channel)\n else:\n self.current_range = current\n self.check_errors()\n\n def auto_range_source(self):\n \"\"\" Configures the source to use an automatic range.\n \"\"\"\n if self.source_mode == 'current':\n self.write('smu%s.source.autorangei=1' % self.channel)\n else:\n self.write('smu%s.source.autorangev=1' % self.channel)\n\n def apply_current(self, current_range=None, compliance_voltage=0.1):\n \"\"\" Configures the instrument to apply a source current, and\n uses an auto range unless a current range is specified.\n The compliance voltage is also set.\n :param compliance_voltage: A float in the correct range for a\n :attr:`~.Keithley2400.compliance_voltage`\n :param current_range: A :attr:`~.Keithley2400.current_range` value or None\n \"\"\"\n log.info(\"%s is sourcing current.\" % self.channel)\n self.source_mode = 'current'\n if current_range is None:\n self.auto_range_source()\n else:\n self.source_current_range = current_range\n self.compliance_voltage = compliance_voltage\n self.check_errors()\n\n def apply_voltage(self, voltage_range=None,\n compliance_current=0.1):\n \"\"\" Configures the instrument to apply a source voltage, and\n uses an auto range unless a voltage range is specified.\n The compliance current is also set.\n :param compliance_current: A float in the correct range for a\n :attr:`~.Keithley2400.compliance_current`\n :param voltage_range: A :attr:`~.Keithley2400.voltage_range` value or None\n \"\"\"\n log.info(\"%s is sourcing voltage.\" % self.name)\n self.source_mode = 'voltage'\n if voltage_range is None:\n self.auto_range_source()\n else:\n self.source_voltage_range = voltage_range\n self.compliance_current = compliance_current\n self.check_errors()\n\n @property\n def error(self):\n \"\"\" Returns a tuple of an error code and message from a\n single error. \"\"\"\n err = self.values('errorqueue.next()')\n if len(err) < 2:\n err = self.read() # Try reading again\n code = err[0]\n message = err[1].replace('\"', '')\n return (code, message)\n\n def check_errors(self):\n \"\"\" Logs any system errors reported by the instrument.\n \"\"\"\n code, message = self.error\n while code != 0:\n t = time.time()\n log.info(\"Keithley 2600 reported error: %d, %s\" % (code, message))\n code, message = self.error\n if (time.time() - t) > 10:\n log.warning(\"Timed out for Keithley 2600 error retrieval.\")\n\n def ramp_to_voltage(self, target_voltage, steps=30, pause=0.1):\n \"\"\" Ramps to a target voltage from the set voltage value over\n a certain number of linear steps, each separated by a pause duration.\n :param target_voltage: A voltage in Amps\n :param steps: An integer number of steps\n :param pause: A pause duration in seconds to wait between steps \"\"\"\n voltages = np.linspace(self.source_voltage, target_voltage, steps)\n for voltage in voltages:\n self.source_voltage = voltage\n time.sleep(pause)\n\n def ramp_to_current(self, target_current, steps=30, pause=0.1):\n \"\"\" Ramps to a target current from the set current value over\n a certain number of linear steps, each separated by a pause duration.\n :param target_current: A current in Amps\n :param steps: An integer number of steps\n :param pause: A pause duration in seconds to wait between steps \"\"\"\n currents = np.linspace(self.source_current, target_current, steps)\n for current in currents:\n self.source_current = current\n time.sleep(pause)\n\n def shutdown(self):\n \"\"\" Ensures that the current or voltage is turned to zero\n and disables the output. \"\"\"\n log.info(\"Shutting down channel %s.\" % self.channel)\n if self.source_mode == 'current':\n self.ramp_to_current(0.0)\n else:\n self.ramp_to_voltage(0.0)\n self.source_output='off'"
]
| [
[
"numpy.linspace"
]
]
|
bla1089/pandas | [
"9ea49653a621002f222f93edcbcbf5239d1afb2d"
]
| [
"pandas/tests/reshape/test_tile.py"
]
| [
"import os\nimport pytest\n\nimport numpy as np\nfrom pandas.compat import zip\n\nimport pandas as pd\nfrom pandas import (DataFrame, Series, isna, to_datetime, DatetimeIndex, Index,\n Timestamp, Interval, IntervalIndex, Categorical,\n cut, qcut, date_range, timedelta_range, NaT,\n TimedeltaIndex)\nfrom pandas.tseries.offsets import Nano, Day\nimport pandas.util.testing as tm\nfrom pandas.api.types import CategoricalDtype as CDT\n\nfrom pandas.core.algorithms import quantile\nimport pandas.core.reshape.tile as tmod\n\nclass TestCut(object):\n\n def test_simple(self):\n data = np.ones(5, dtype='int64')\n result = cut(data, 4, labels=False)\n expected = np.array([1, 1, 1, 1, 1])\n tm.assert_numpy_array_equal(result, expected,\n check_dtype=False)\n\n def test_bins(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n def test_str_bins(self):\n # GH 14627\n data = np.array([0.1, 0.1, 0.2, 0.5, 0.5, 0.9, 1.0])\n result, bins_cut = cut(data, bins=\"auto\",\n retbins=True)\n\n bins_np = np.histogram_bin_edges(data, \"auto\")\n adj = (bins_np[-1] - bins_np[0]) * 0.001\n bins_np[0] -= adj\n tm.assert_almost_equal(bins_cut, bins_np)\n tm.assert_almost_equal(np.round(bins_cut, 4),\n np.array([0.0991, 0.325, 0.55, 0.775, 1.0]))\n\n intervals = IntervalIndex.from_breaks(np.round(bins_np, 4),\n closed=\"right\")\n expected = Categorical(intervals, ordered=True)\n tm.assert_index_equal(result.categories,\n expected.categories)\n \n \n # Test that a `bin` string not present in `np.histogram_bin_edges`\n # throws a ValueError.\n with pytest.raises(ValueError,\n message=\"Verify acceptable bins in `np.histogram_bin_edges`.\"):\n cut(data, bins=\"bad bins\")\n\n def test_right(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=True, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3))\n expected = Categorical(intervals, ordered=True)\n expected = expected.take([0, 0, 0, 2, 3, 0, 0])\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95,\n 7.325, 9.7]))\n\n def test_noright(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3), closed='left')\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95,\n 7.325, 9.7095]))\n\n def test_arraylike(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n result, bins = cut(data, 3, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n def test_bins_from_intervalindex(self):\n c = cut(range(5), 3)\n expected = c\n result = cut(range(5), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n expected = Categorical.from_codes(np.append(c.codes, -1),\n categories=c.categories,\n ordered=True)\n result = cut(range(6), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n # doc example\n # make sure we preserve the bins\n ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])\n c = cut(ages, bins=[0, 18, 35, 70])\n expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])\n tm.assert_index_equal(c.categories, expected)\n\n result = cut([25, 20, 50], bins=c.categories)\n tm.assert_index_equal(result.categories, expected)\n tm.assert_numpy_array_equal(result.codes,\n np.array([1, 1, 2], dtype='int8'))\n\n def test_bins_not_monotonic(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])\n\n def test_wrong_num_labels(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n pytest.raises(ValueError, cut, data, [0, 1, 10],\n labels=['foo', 'bar', 'baz'])\n\n def test_cut_corner(self):\n # h3h\n pytest.raises(ValueError, cut, [], 2)\n\n pytest.raises(ValueError, cut, [1, 2, 3], 0.5)\n\n @pytest.mark.parametrize('arg', [2, np.eye(2), DataFrame(np.eye(2))])\n @pytest.mark.parametrize('cut_func', [cut, qcut])\n def test_cut_not_1d_arg(self, arg, cut_func):\n with pytest.raises(ValueError):\n cut_func(arg, 2)\n\n def test_cut_out_of_range_more(self):\n # #1511\n s = Series([0, -1, 0, 1, -3], name='x')\n ind = cut(s, [0, 1], labels=False)\n exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')\n tm.assert_series_equal(ind, exp)\n\n def test_labels(self):\n arr = np.tile(np.arange(0, 1.01, 0.1), 4)\n\n result, bins = cut(arr, 4, retbins=True)\n ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1])\n tm.assert_index_equal(result.categories, ex_levels)\n\n result, bins = cut(arr, 4, retbins=True, right=False)\n ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3],\n closed='left')\n tm.assert_index_equal(result.categories, ex_levels)\n\n def test_cut_pass_series_name_to_factor(self):\n s = Series(np.random.randn(100), name='foo')\n\n factor = cut(s, 4)\n assert factor.name == 'foo'\n\n def test_label_precision(self):\n arr = np.arange(0, 0.73, 0.01)\n\n result = cut(arr, 4, precision=2)\n ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36,\n 0.54, 0.72])\n tm.assert_index_equal(result.categories, ex_levels)\n\n def test_na_handling(self):\n arr = np.arange(0, 0.75, 0.01)\n arr[::3] = np.nan\n\n result = cut(arr, 4)\n\n result_arr = np.asarray(result)\n\n ex_arr = np.where(isna(arr), np.nan, result_arr)\n\n tm.assert_almost_equal(result_arr, ex_arr)\n\n result = cut(arr, 4, labels=False)\n ex_result = np.where(isna(arr), np.nan, result)\n tm.assert_almost_equal(result, ex_result)\n\n def test_inf_handling(self):\n data = np.arange(6)\n data_ser = Series(data, dtype='int64')\n\n bins = [-np.inf, 2, 4, np.inf]\n result = cut(data, bins)\n result_ser = cut(data_ser, bins)\n\n ex_uniques = IntervalIndex.from_breaks(bins)\n tm.assert_index_equal(result.categories, ex_uniques)\n assert result[5] == Interval(4, np.inf)\n assert result[0] == Interval(-np.inf, 2)\n assert result_ser[5] == Interval(4, np.inf)\n assert result_ser[0] == Interval(-np.inf, 2)\n\n def test_qcut(self):\n arr = np.random.randn(1000)\n\n # We store the bins as Index that have been rounded\n # to comparisons are a bit tricky.\n labels, bins = qcut(arr, 4, retbins=True)\n ex_bins = quantile(arr, [0, .25, .5, .75, 1.])\n result = labels.categories.left.values\n assert np.allclose(result, ex_bins[:-1], atol=1e-2)\n result = labels.categories.right.values\n assert np.allclose(result, ex_bins[1:], atol=1e-2)\n\n ex_levels = cut(arr, ex_bins, include_lowest=True)\n tm.assert_categorical_equal(labels, ex_levels)\n\n def test_qcut_bounds(self):\n arr = np.random.randn(1000)\n\n factor = qcut(arr, 10, labels=False)\n assert len(np.unique(factor)) == 10\n\n def test_qcut_specify_quantiles(self):\n arr = np.random.randn(100)\n\n factor = qcut(arr, [0, .25, .5, .75, 1.])\n expected = qcut(arr, 4)\n tm.assert_categorical_equal(factor, expected)\n\n def test_qcut_all_bins_same(self):\n with pytest.raises(ValueError, match=\"edges.*unique\"):\n qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)\n\n def test_cut_out_of_bounds(self):\n arr = np.random.randn(100)\n\n result = cut(arr, [-1, 0, 1])\n\n mask = isna(result)\n ex_mask = (arr < -1) | (arr > 1)\n tm.assert_numpy_array_equal(mask, ex_mask)\n\n def test_cut_pass_labels(self):\n arr = [50, 5, 10, 15, 20, 30, 70]\n bins = [0, 25, 50, 100]\n labels = ['Small', 'Medium', 'Large']\n\n result = cut(arr, bins, labels=labels)\n exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'],\n categories=labels,\n ordered=True)\n tm.assert_categorical_equal(result, exp)\n\n result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2],\n labels))\n exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels)\n tm.assert_categorical_equal(result, exp)\n\n # issue 16459\n labels = ['Good', 'Medium', 'Bad']\n result = cut(arr, 3, labels=labels)\n exp = cut(arr, 3, labels=Categorical(labels, categories=labels,\n ordered=True))\n tm.assert_categorical_equal(result, exp)\n\n def test_qcut_include_lowest(self):\n values = np.arange(10)\n\n ii = qcut(values, 4)\n\n ex_levels = IntervalIndex(\n [Interval(-0.001, 2.25),\n Interval(2.25, 4.5),\n Interval(4.5, 6.75),\n Interval(6.75, 9)])\n tm.assert_index_equal(ii.categories, ex_levels)\n\n def test_qcut_nas(self):\n arr = np.random.randn(100)\n arr[:20] = np.nan\n\n result = qcut(arr, 4)\n assert isna(result[:20]).all()\n\n def test_qcut_index(self):\n result = qcut([0, 2], 2)\n intervals = [Interval(-0.001, 1), Interval(1, 2)]\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_round_frac(self):\n # it works\n result = cut(np.arange(11.), 2)\n\n result = cut(np.arange(11.) / 1e10, 2)\n\n # #1979, negative numbers\n\n result = tmod._round_frac(-117.9998, precision=3)\n assert result == -118\n result = tmod._round_frac(117.9998, precision=3)\n assert result == 118\n\n result = tmod._round_frac(117.9998, precision=2)\n assert result == 118\n result = tmod._round_frac(0.000123456, precision=2)\n assert result == 0.00012\n\n def test_qcut_binning_issues(self, datapath):\n # #1978, 1979\n cut_file = datapath(os.path.join('reshape', 'data', 'cut_data.csv'))\n arr = np.loadtxt(cut_file)\n\n result = qcut(arr, 20)\n\n starts = []\n ends = []\n for lev in np.unique(result):\n s = lev.left\n e = lev.right\n assert s != e\n\n starts.append(float(s))\n ends.append(float(e))\n\n for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),\n zip(ends[:-1], ends[1:])):\n assert sp < sn\n assert ep < en\n assert ep <= sn\n\n def test_cut_return_intervals(self):\n s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n res = cut(s, 3)\n exp_bins = np.linspace(0, 8, num=4).round(3)\n exp_bins[0] -= 0.008\n exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take(\n [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_qcut_return_intervals(self):\n s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n res = qcut(s, [0, 0.333, 0.666, 1])\n exp_levels = np.array([Interval(-0.001, 2.664),\n Interval(2.664, 5.328), Interval(5.328, 8)])\n exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(\n CDT(ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_series_retbins(self):\n # GH 8589\n s = Series(np.arange(4))\n result, bins = cut(s, 2, retbins=True)\n expected = Series(IntervalIndex.from_breaks(\n [-0.003, 1.5, 3], closed='right').repeat(2)).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n result, bins = qcut(s, 2, retbins=True)\n expected = Series(IntervalIndex.from_breaks(\n [-0.001, 1.5, 3], closed='right').repeat(2)).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n def test_cut_duplicates_bin(self):\n # issue 20947\n values = Series(np.array([1, 3, 5, 7, 9]),\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n bins = [0, 2, 4, 6, 10, 10]\n result = cut(values, bins, duplicates='drop')\n expected = cut(values, pd.unique(bins))\n tm.assert_series_equal(result, expected)\n\n pytest.raises(ValueError, cut, values, bins)\n pytest.raises(ValueError, cut, values, bins, duplicates='raise')\n\n # invalid\n pytest.raises(ValueError, cut, values, bins, duplicates='foo')\n\n def test_qcut_duplicates_bin(self):\n # GH 7751\n values = [0, 0, 0, 0, 1, 2, 3]\n expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])\n\n result = qcut(values, 3, duplicates='drop')\n tm.assert_index_equal(result.categories, expected)\n\n pytest.raises(ValueError, qcut, values, 3)\n pytest.raises(ValueError, qcut, values, 3, duplicates='raise')\n\n # invalid\n pytest.raises(ValueError, qcut, values, 3, duplicates='foo')\n\n def test_single_quantile(self):\n # issue 15431\n expected = Series([0, 0])\n\n s = Series([9., 9.])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(8.999, 9.0),\n Interval(8.999, 9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([-9., -9.])\n expected = Series([0, 0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-9.001, -9.0),\n Interval(-9.001, -9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([0., 0.])\n expected = Series([0, 0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-0.001, 0.0),\n Interval(-0.001, 0.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([9])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([-9])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([0])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n def test_single_bin(self):\n # issue 14652\n expected = Series([0, 0])\n\n s = Series([9., 9.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n s = Series([-9., -9.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n expected = Series([0])\n\n s = Series([9])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n s = Series([-9])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n # issue 15428\n expected = Series([0, 0])\n\n s = Series([0., 0.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n expected = Series([0])\n\n s = Series([0])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"array_1_writeable, array_2_writeable\",\n [(True, True), (True, False), (False, False)])\n def test_cut_read_only(self, array_1_writeable, array_2_writeable):\n # issue 18773\n array_1 = np.arange(0, 100, 10)\n array_1.flags.writeable = array_1_writeable\n\n array_2 = np.arange(0, 100, 10)\n array_2.flags.writeable = array_2_writeable\n\n hundred_elements = np.arange(100)\n\n tm.assert_categorical_equal(cut(hundred_elements, array_1),\n cut(hundred_elements, array_2))\n\n\nclass TestDatelike(object):\n\n @pytest.mark.parametrize('s', [\n Series(DatetimeIndex(['20180101', NaT, '20180103'])),\n Series(TimedeltaIndex(['0 days', NaT, '2 days']))],\n ids=lambda x: str(x.dtype))\n def test_qcut_nat(self, s):\n # GH 19768\n intervals = IntervalIndex.from_tuples(\n [(s[0] - Nano(), s[2] - Day()), np.nan, (s[2] - Day(), s[2])])\n expected = Series(Categorical(intervals, ordered=True))\n result = qcut(s, 2)\n tm.assert_series_equal(result, expected)\n\n def test_datetime_cut(self):\n # GH 14714\n # testing for time data to be present as series\n data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03']))\n\n result, bins = cut(data, 3, retbins=True)\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp('2012-12-31 23:57:07.200000'),\n Timestamp('2013-01-01 16:00:00')),\n Interval(Timestamp('2013-01-01 16:00:00'),\n Timestamp('2013-01-02 08:00:00')),\n Interval(Timestamp('2013-01-02 08:00:00'),\n Timestamp('2013-01-03 00:00:00'))]))\n .astype(CDT(ordered=True)))\n\n tm.assert_series_equal(result, expected)\n\n # testing for time data to be present as list\n data = [np.datetime64('2013-01-01'), np.datetime64('2013-01-02'),\n np.datetime64('2013-01-03')]\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n # testing for time data to be present as ndarray\n data = np.array([np.datetime64('2013-01-01'),\n np.datetime64('2013-01-02'),\n np.datetime64('2013-01-03')])\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n # testing for time data to be present as datetime index\n data = DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03'])\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n @pytest.mark.parametrize('bins', [\n 3, [Timestamp('2013-01-01 04:57:07.200000'),\n Timestamp('2013-01-01 21:00:00'),\n Timestamp('2013-01-02 13:00:00'),\n Timestamp('2013-01-03 05:00:00')]])\n @pytest.mark.parametrize('box', [list, np.array, Index, Series])\n def test_datetimetz_cut(self, bins, box):\n # GH 19872\n tz = 'US/Eastern'\n s = Series(date_range('20130101', periods=3, tz=tz))\n if not isinstance(bins, int):\n bins = box(bins)\n result = cut(s, bins)\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp('2012-12-31 23:57:07.200000', tz=tz),\n Timestamp('2013-01-01 16:00:00', tz=tz)),\n Interval(Timestamp('2013-01-01 16:00:00', tz=tz),\n Timestamp('2013-01-02 08:00:00', tz=tz)),\n Interval(Timestamp('2013-01-02 08:00:00', tz=tz),\n Timestamp('2013-01-03 00:00:00', tz=tz))]))\n .astype(CDT(ordered=True)))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('bins', [3, np.linspace(0, 1, 4)])\n def test_datetimetz_qcut(self, bins):\n # GH 19872\n tz = 'US/Eastern'\n s = Series(date_range('20130101', periods=3, tz=tz))\n result = qcut(s, bins)\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp('2012-12-31 23:59:59.999999999', tz=tz),\n Timestamp('2013-01-01 16:00:00', tz=tz)),\n Interval(Timestamp('2013-01-01 16:00:00', tz=tz),\n Timestamp('2013-01-02 08:00:00', tz=tz)),\n Interval(Timestamp('2013-01-02 08:00:00', tz=tz),\n Timestamp('2013-01-03 00:00:00', tz=tz))]))\n .astype(CDT(ordered=True)))\n tm.assert_series_equal(result, expected)\n\n def test_datetime_bin(self):\n data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]\n bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),\n Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))\n .astype(CDT(ordered=True)))\n\n for conv in [Timestamp, Timestamp, np.datetime64]:\n bins = [conv(v) for v in bin_data]\n result = cut(data, bins=bins)\n tm.assert_series_equal(Series(result), expected)\n\n bin_pydatetime = [Timestamp(v).to_pydatetime() for v in bin_data]\n result = cut(data, bins=bin_pydatetime)\n tm.assert_series_equal(Series(result), expected)\n\n bins = to_datetime(bin_data)\n result = cut(data, bins=bin_pydatetime)\n tm.assert_series_equal(Series(result), expected)\n\n def test_datetime_nan(self):\n\n def f():\n cut(date_range('20130101', periods=3), bins=[0, 2, 4])\n pytest.raises(ValueError, f)\n\n result = cut(date_range('20130102', periods=5),\n bins=date_range('20130101', periods=2))\n mask = result.categories.isna()\n tm.assert_numpy_array_equal(mask, np.array([False]))\n mask = result.isna()\n tm.assert_numpy_array_equal(\n mask, np.array([False, True, True, True, True]))\n\n @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Pacific'])\n def test_datetime_cut_roundtrip(self, tz):\n # GH 19891\n s = Series(date_range('20180101', periods=3, tz=tz))\n result, result_bins = cut(s, 2, retbins=True)\n expected = cut(s, result_bins)\n tm.assert_series_equal(result, expected)\n expected_bins = DatetimeIndex(['2017-12-31 23:57:07.200000',\n '2018-01-02 00:00:00',\n '2018-01-03 00:00:00'])\n expected_bins = expected_bins.tz_localize(tz)\n tm.assert_index_equal(result_bins, expected_bins)\n\n def test_timedelta_cut_roundtrip(self):\n # GH 19891\n s = Series(timedelta_range('1day', periods=3))\n result, result_bins = cut(s, 2, retbins=True)\n expected = cut(s, result_bins)\n tm.assert_series_equal(result, expected)\n expected_bins = TimedeltaIndex(['0 days 23:57:07.200000',\n '2 days 00:00:00',\n '3 days 00:00:00'])\n tm.assert_index_equal(result_bins, expected_bins)\n\n @pytest.mark.parametrize('arg, expected_bins', [\n [timedelta_range('1day', periods=3),\n TimedeltaIndex(['1 days', '2 days', '3 days'])],\n [date_range('20180101', periods=3),\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'])]])\n def test_datelike_qcut_bins(self, arg, expected_bins):\n # GH 19891\n s = Series(arg)\n result, result_bins = qcut(s, 2, retbins=True)\n tm.assert_index_equal(result_bins, expected_bins)\n"
]
| [
[
"pandas.tseries.offsets.Day",
"pandas.to_datetime",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"numpy.round",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"pandas.isna",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.allclose",
"pandas.util.testing.assert_categorical_equal",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.cut",
"pandas.core.reshape.tile._round_frac",
"pandas.Categorical.from_codes",
"pandas.core.algorithms.quantile",
"pandas.Categorical",
"pandas.util.testing.assert_almost_equal",
"numpy.append",
"pandas.unique",
"pandas.Interval",
"pandas.date_range",
"numpy.histogram_bin_edges",
"numpy.array",
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas.api.types.CategoricalDtype",
"numpy.ones",
"pandas.IntervalIndex.from_tuples",
"pandas.IntervalIndex.from_breaks",
"numpy.datetime64",
"pandas.tseries.offsets.Nano",
"pandas.compat.zip",
"pandas.Timestamp",
"pandas.qcut",
"numpy.loadtxt"
]
]
|
Zotkin/incremental_learning.pytorch | [
"6a0d7385d209abcd40a402dcad42293dd4e8b362"
]
| [
"inclearn/lib/vizualization.py"
]
| [
"import torch\n\n\ndef grad_cam(spatial_features, selected_logits):\n batch_size = spatial_features.shape[0]\n assert batch_size == len(selected_logits)\n\n formated_logits = [selected_logits[i] for i in range(batch_size)]\n\n import pdb\n pdb.set_trace()\n grads = torch.autograd.grad(\n formated_logits, spatial_features, retain_graph=True, create_graph=True\n )\n\n assert grads.shape == spatial_features.shape\n\n return grads\n"
]
| [
[
"torch.autograd.grad"
]
]
|
CMU-IDS-2021/fp--zixuc-jiajunb | [
"f2b751cc6c46f4fc8d8da822898c32b129d75992"
]
| [
"app.py"
]
| [
"import os\nimport math\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nimport altair as alt\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom matplotlib import image\nfrom torch.autograd import Variable\nimport util\nimport SessionState\nimport modeling\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # Prevent warning in MacOS\n\n\ndef show_overview_page():\n st.title('Overview of Datasets')\n st.text('By Jiajun Bao and Zixu Chen')\n\n st.write(\n \"We use two datasets in the application, the MNIST and FashionMNIST datasets. Since they are all relatively small datasets, it's easier to train than larger datasets like human face datasets. Additionally, these datasets are easy to understand, so we can easily evaluate the end results.\"\n )\n\n dataloaders = {}\n\n st.header('MNIST Dataset')\n st.write(\n 'MNIST (Modified National Institute of Standards and Technology) dataset is a set of handwritten digits from 0-9 (a total of 10 classes). The dataset images are all 28×28 pixel grayscale images. There are a total of 60,000 training images and 10,000 testing images.'\n )\n st.markdown('[MNIST Source](http://yann.lecun.com/exdb/mnist/)')\n\n st.subheader('MNIST Labels')\n st.image('assets/mnist-labels.png')\n st.text('Fig1: MNIST labels [ref: https://m-alcu.github.io/blog/2018/01/13/nmist-dataset/]')\n\n st.subheader('MNIST Sample Images')\n dataloaders['MNIST'] = util.load_sample_data('MNIST', torchvision.transforms.ToTensor())\n data, labels = next(iter(dataloaders['MNIST']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n st.header('FashionMNIST Dataset')\n st.write(\n 'Similar to MNIST, FashionMNIST is dataset of 10 classes of fashion items. The images are also 28×28 pixel grayscale images, and the dataset also consists of 60,000 training images and 10,000 testing images.'\n )\n st.markdown('[FashionMNIST Source](https://github.com/zalandoresearch/fashion-mnist)')\n\n st.subheader('FashionMNIST Labels')\n st.image('assets/fashionmnist-labels.png')\n st.text(\n 'Fig2: FashionMNIST labels [ref: https://www.researchgate.net/figure/Fashion-MNIST-Dataset-Images-with-Labels-and-Description-II-LITERATURE-REVIEW-In-image_fig1_340299295]'\n )\n\n st.subheader('FashionMNIST Sample Images')\n dataloaders['FashionMNIST'] = util.load_sample_data('FashionMNIST',\n torchvision.transforms.ToTensor())\n data, labels = next(iter(dataloaders['FashionMNIST']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n\ndef show_gan_intro_page():\n st.title('Introduction of GAN')\n st.text('By Jiajun Bao and Zixu Chen')\n\n st.header('What Is GAN?')\n st.image('assets/gan.png')\n st.text('Fig1: GAN architecture [ref: https://sthalles.github.io/intro-to-gans/]')\n st.write(\n 'Generative adversarial network (GAN) is a deep neural network architecture that can output new data that resembles input data. At a high level, GAN consists of a generator and a discriminator. The generator learns from the training set to generate outputs with similar distribution as the original data, while the discriminator tries to distinguish generated outputs from the authentic data. The two have to compete with each other in a zero-sum game fashion to avoid suffering loss.'\n )\n\n st.header('How Can GAN Visualizer Help You Understand GAN?')\n st.write(\n '''You can use the \"Trained Model Logs\", \"Model Training\", and \"Model Inference\" page to play around with training GAN and using GAN. In those pages, you will be able to tune some of the most essential hyperparameters to see how they can affect the generated images. In this way, you don't need to be confused with the abstract visualization of the gradients while getting the flexibility to tune the model.'''\n )\n st.write(\n 'GAN Visualizer also gives you a way of training your own GAN models without having to know how to implement the complex GAN architecture in PyTorch or TensorFlow. We abstract the model details away and allow you to directly tune the training hyperparameters in a GUI. You may freely play around with it to explore GAN. Have fun 😉'\n )\n\n st.header('Explanation of Provided Hyperparameters')\n st.write(\n 'These are the parameters that you will be able to tune in the model training pages. Having a good understanding of what they are can help you get a sense of how to train a good model.'\n )\n st.markdown('**Dataset**: the dataset you will be training on')\n st.markdown(\n '**Dataset Classes**: how many classes to look at. Using 1 class can give a good result in a short time'\n )\n st.markdown('**Learning Rate**: step size in the gradient descent optimization')\n st.markdown('**Latent Variable Dimension**: size of the hidden weight')\n st.markdown(\n '**Batch Size**: number of examples to work though in each optimization iteration. It is the subset of the entire training dataset)'\n )\n st.markdown('**Epochs**: each epoch means going through the entire training dataset once')\n st.markdown('**Sample Interval**: how frequently we sample the loss and generated images')\n\n st.header('How Does GAN Differ From CNN?')\n st.image('assets/lenet5.png')\n st.text(\n 'Fig2: example CNN architecture [ref: https://www.researchgate.net/figure/The-architecture-of-LeNet-5-23-a-CNN-used-for-digits-recognition-for-the-MNIST-dataset_fig2_321665783]'\n )\n st.write(\n 'Both GAN and Convolutional Neural Network (CNN) are deep learning neural network architectures that mainly target the computer vision tasks. CNN is often used in discriminative tasks like image classification, while GAN is often used in generative tasks like generating new images. For example, with the MNIST dataset, CNN can recognize the digits while GAN can generate random new handwritten digit images.'\n )\n\n st.header(\"What's The Relationship Between GAN, CNN, And Data Augmentation?\")\n st.write(\n 'Data augmentation is a common technique used with CNN. In real-world scenario, we are often lack of training data, so data augmentation is a way to generate new training data. Additionally, with the added noise from data augmentation, the technique can typically make CNN models work more robustly.'\n )\n st.write(\n 'Regarding GAN, we mentioned that GAN can be used to generate new images, what can you think of its relationship to data augmentation? We can use GAN to generate hallucinating images used in data augmentation!'\n )\n\n st.header('Explore Data Augmentation Techniques')\n st.write(\n '''Here you can play around with a few common data augmentation techniques to get a \"visualized\" sense of what data augmentation (mainly for CNN) is. You will get GAN's data augmentation once you train the model.'''\n )\n\n st.sidebar.subheader('Data Augmentation Configurations')\n dataset = st.sidebar.selectbox('Select dataset:', ('MNIST', 'FashionMNIST'))\n\n dataloaders = {}\n\n # Original\n st.subheader('Original Dataset')\n st.write('This is the original images in the dataset without any augmentations')\n\n dataloaders['Original'] = util.load_sample_data(dataset, transforms.ToTensor())\n data, labels = next(iter(dataloaders['Original']))\n orig_fig = util.plot_grayscale_img(data, labels)\n orig_fig\n\n # Crop In the Center\n st.subheader('Crop In the Center')\n st.write(\n 'Crops the given image at the center. With small crop size, the image is zoomed in to the center. With large crop size, the image is zoomed out from the center'\n )\n\n crop_size = int(st.text_input('Crop size (int):', '15'))\n if crop_size < 1:\n crop_size = 1\n\n transform = transforms.Compose([transforms.CenterCrop(crop_size), transforms.ToTensor()])\n dataloaders['CenterCrop'] = util.load_sample_data(dataset, transform)\n data, labels = next(iter(dataloaders['CenterCrop']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n if st.checkbox('Compare Crop In the Center With No Augmentation'):\n orig_fig\n\n # Color Jitter\n st.subheader('Color Jitter')\n st.write('Randomly change the brightness, contrast, saturation and hue of an image')\n\n brightness = float(st.text_input('Brightness (float):', '50'))\n contrast = float(st.text_input('Contrast (float):', '50'))\n saturation = float(st.text_input('Saturation (float):', '0'))\n hue = float(st.text_input('Hue (float, [0, 0.5]):', '0'))\n\n transform = transforms.Compose(\n [transforms.ColorJitter(brightness, contrast, saturation, hue),\n transforms.ToTensor()])\n dataloaders['ColorJitter'] = util.load_sample_data(dataset, transform)\n data, labels = next(iter(dataloaders['ColorJitter']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n if st.checkbox('Compare Color Jitter With No Augmentation'):\n orig_fig\n\n # Random Rotation\n st.subheader('Random Rotation')\n st.write('Rotate the image by angle. The rotation range will be [-Max Degree, +Max Degree]')\n\n degrees = int(st.text_input('Max Degree (int):', '90'))\n\n transform = transforms.Compose([transforms.RandomRotation(degrees), transforms.ToTensor()])\n dataloaders['RandomRotation'] = util.load_sample_data(dataset, transform)\n data, labels = next(iter(dataloaders['RandomRotation']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n if st.checkbox('Compare Random Rotation With No Augmentation'):\n orig_fig\n\n # Gaussian Blur\n st.subheader('Gaussian Blur')\n st.write('Blurs image with randomly chosen Gaussian blur')\n\n kernel_size = int(st.text_input('Kernel Size (odd and positive int):', '5'))\n\n transform = transforms.Compose([transforms.GaussianBlur(kernel_size), transforms.ToTensor()])\n dataloaders['GaussianBlur'] = util.load_sample_data(dataset, transform)\n data, labels = next(iter(dataloaders['GaussianBlur']))\n fig = util.plot_grayscale_img(data, labels)\n fig\n\n if st.checkbox('Compare Gaussian Blur With No Augmentation'):\n orig_fig\n\n\n# Reference: https://discuss.streamlit.io/t/is-there-any-working-example-for-session-state-for-streamlit-version-0-63-1/4551/2\nsession_state = SessionState.get(\n start_pretrain=False,\n pretrain_dataset='MNIST',\n pretrain_lr=0.0001,\n pretrain_latent_dim=25,\n start_train=False,\n finish_train=False,\n dataset='MNIST',\n dataset_classes=1,\n lr=0.0001,\n latent_dim=25,\n epochs=20,\n sample_interval=10,\n batch_step=[],\n g_loss=[],\n d_loss=[],\n imgs=[],\n saved_model=None,\n saved_latent_dim=25,\n)\n\n\ndef show_pretrain_page():\n st.title('Trained Model Logs')\n st.text('By Jiajun Bao and Zixu Chen')\n\n st.write(\n 'To avoid the long training wait time, we provide a set of pre-trained model logs with less flexibility in hyperparameter configurations.'\n )\n st.write(\n 'We have recorded the logs of the loss data and generated image when training these models, so you can simulate a real training process without having to acutally wait.'\n )\n st.markdown(\n '**Hint**: you may check the \"GAN Introduction\" page for explanation of the provided hyperparameters.'\n )\n\n st.sidebar.subheader('Hyperparameter Configurations')\n dataset = st.sidebar.selectbox('Training Dataset:', ('MNIST', 'FashionMNIST'))\n lr = st.sidebar.selectbox('Learning Rate:', (0.0001, 0.0002, 0.0004, 0.0008, 0.001, 0.002), 2)\n latent_dim = st.sidebar.selectbox('Latent Variable Dimension:', (25, 50, 75, 100, 125), 3)\n batch_size = st.sidebar.selectbox('Batch Size:', ([64]))\n epochs = st.sidebar.selectbox('Training Epochs:', ([20]))\n\n st.sidebar.subheader(\"Ready? Let's get started!\")\n start_train = st.sidebar.button('Start Training')\n if dataset != session_state.pretrain_dataset or lr != session_state.pretrain_lr or latent_dim != session_state.pretrain_latent_dim:\n session_state.start_pretrain = False\n\n st.header('Training Loss Results')\n if start_train or session_state.start_pretrain:\n session_state.start_pretrain = True\n session_state.pretrain_dataset = dataset\n session_state.pretrain_lr = lr\n session_state.pretrain_latent_dim = latent_dim\n\n # Configuring upperbound\n st.markdown('**Configuring Upperbound**')\n st.write(\n 'If you look at the loss figure below, for some hyperparameter configurations, the loss result may have outliers that are very high, which can impact the overall scale of the plots. The below field allows you to set an upperbound on the loss to smooth the plots. The default -1 means no upperbound is set.'\n )\n st.write(\n 'This typically happens when we have a high learning rate. If we look at the generated image after the outlier appears, they will be a mess.'\n )\n upperbound = int(st.text_input('Smoothing Upperbound (int):', '-1'))\n\n # Configuring rolling range\n st.markdown('**Configuring Range for Calculating Rolling Average**')\n st.write(\n 'The vanilla loss figure is zig-zagging a lot and hard to see the trend. Therefore, we can apply rolling average for a range of data to smooth the curves. Typically, the larger the range is, the smoother the curve is.'\n )\n roll_range = st.selectbox('Rolling Average Range:', (10, 25, 50, 100, 200), 2)\n\n # Plotting figures\n st.markdown('**Loss Figures**')\n batch_step, g_loss, d_loss = util.load_pretrain_stats(lr, latent_dim, dataset)\n\n scaled_loss_plot, loss_ravg_plot = util.plot_loss_figs(batch_step, g_loss, d_loss,\n upperbound, roll_range)\n scaled_loss_plot & loss_ravg_plot\n\n st.write(\n \"You may slide on the rolling average loss figure to select a range of batch steps. This allows you to zoom in to the corresponding range in the above vanilla loss figure to see the details.\"\n )\n else:\n st.write(\"Waiting for training to start or finish...\")\n\n st.header('Generated Images')\n if start_train or session_state.start_pretrain:\n st.write(\n 'You may use the slider below to see how the images generated by the GAN changes through the training process.'\n )\n\n step = st.slider(\n 'After How Many Batch Steps',\n min_value=0,\n max_value=6250,\n value=3000,\n step=500,\n )\n\n imgs = util.load_img(f'data/{dataset.lower()}_generated/{lr}_{latent_dim}')\n fig = util.plot_pretrain_generated_img(imgs, step)\n fig\n else:\n st.write(\"Waiting for training to start or finish...\")\n\n\ndef show_training_page():\n st.title('Model Training')\n st.text('By Jiajun Bao and Zixu Chen')\n\n st.write(\n \"You can reduce the number of training epochs to a small number to finish the training faster. However, the generated images might look very bad if we don't train enough.\"\n )\n st.write(\n 'By default you will use the small one class dataset to train the model. This helps train model faster but prevent you from generating images in other classes. You may switch to the full ten class dataset to generate images for different items.'\n )\n st.markdown('**One Class Dataset Label**: digit 8 (MNIST), bag (FashionMNIST)')\n st.markdown(\n '**Hint**: you may check the \"GAN Introduction\" page for explanation of the provided hyperparameters.'\n )\n\n st.sidebar.subheader('Hyperparameter Configurations')\n\n dataset = st.sidebar.selectbox('Training Dataset:', ('MNIST', 'FashionMNIST'))\n dataset_classes = st.sidebar.selectbox('Dataset Classes:', (1, 10), 0)\n lr = float(st.sidebar.text_input('Learning Rate (float):', '0.0004'))\n latent_dim = int(st.sidebar.text_input('Latent Variable Dimension (int):', '100'))\n batch_size = st.sidebar.selectbox('Batch Size:', ([32]))\n epochs = int(st.sidebar.text_input('Training Epochs (int):', '20'))\n sample_interval = st.sidebar.selectbox('Sample Interval:', (10, 20, 50, 100, 500, 1000), 0)\n show_progress = st.sidebar.checkbox(\"Show Intermediate Generated Images\", value=True)\n\n st.sidebar.subheader(\"Ready? Let's get started!\")\n start_train = st.sidebar.button('Start Training')\n if dataset != session_state.dataset or dataset_classes != session_state.dataset_classes or lr != session_state.lr or latent_dim != session_state.latent_dim or epochs != session_state.epochs or sample_interval != session_state.sample_interval:\n session_state.start_train = False\n session_state.finish_train = False\n\n st.header('Training Progress')\n batch_step = []\n g_loss = []\n d_loss = []\n imgs = []\n\n if dataset_classes == 1:\n total_steps = 63 * epochs\n dataset_path = 'data/small'\n elif dataset_classes == 10:\n total_steps = 625 * epochs\n dataset_path = 'data/ten_classes'\n\n if start_train or session_state.start_train:\n session_state.start_train = True\n session_state.dataset = dataset\n session_state.dataset_classes = dataset_classes\n session_state.lr = lr\n session_state.latent_dim = latent_dim\n session_state.epochs = epochs\n session_state.sample_interval = sample_interval\n progress_bar = st.progress(0)\n\n if not session_state.finish_train:\n for ret in modeling.train(lr, latent_dim, epochs, sample_interval, dataset,\n dataset_path):\n batch_step.append(ret['batches_done'])\n g_loss.append(ret['g_loss'])\n d_loss.append(ret['d_loss'])\n imgs.append(ret['first_25_images'])\n g_model = ret['g_model']\n progress = int(batch_step[-1] / total_steps * 100)\n if show_progress:\n fig = plt.figure()\n fig.suptitle(f'Step: {batch_step[-1]} | Progress: {progress}%')\n for i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.imshow(imgs[-1][i][0], cmap='gray')\n plt.xticks([])\n plt.yticks([])\n fig\n progress_bar.progress(progress)\n session_state.finish_train = True\n session_state.batch_step = batch_step\n session_state.g_loss = g_loss\n session_state.d_loss = d_loss\n session_state.imgs = imgs\n session_state.saved_model = g_model\n session_state.saved_latent_dim = latent_dim\n else:\n batch_step = session_state.batch_step\n g_loss = session_state.g_loss\n d_loss = session_state.d_loss\n imgs = session_state.imgs\n\n progress_bar.progress(100)\n st.write(\"Training finished! Model is automatically saved.\")\n else:\n st.write(\"Waiting for training to start or finish...\")\n\n st.header('Training Loss Results')\n if start_train or session_state.start_train:\n # Configuring upperbound\n st.markdown('**Configuring Upperbound**')\n st.write(\n 'If you look at the loss figure below, for some hyperparameter configurations, the loss result may have outliers that are very high, which can impact the overall scale of the plots. The below field allows you to set an upperbound on the loss to smooth the plots. The default -1 means no upperbound is set.'\n )\n st.write(\n 'This typically happens when we have a high learning rate. If we look at the generated image after the outlier appears, they will be a mess.'\n )\n upperbound = int(st.text_input('Smoothing Upperbound (int):', '-1'))\n\n # Configuring rolling range\n st.markdown('**Configuring Range for Calculating Rolling Average**')\n st.write(\n 'The vanilla loss figure is zig-zagging a lot and hard to see the trend. Therefore, we can apply rolling average for a range of data to smooth the curves. Typically, the larger the range is, the smoother the curve is.'\n )\n roll_range = st.selectbox('Rolling Average Range:', (10, 25, 50, 100, 200), 2)\n\n # Plotting figures\n st.markdown('**Loss Figures**')\n\n scaled_loss_plot, loss_ravg_plot = util.plot_loss_figs(batch_step, g_loss, d_loss,\n upperbound, roll_range)\n scaled_loss_plot & loss_ravg_plot\n\n st.write(\n \"You may slide on the rolling average loss figure to select a range of batch steps. This allows you to zoom in to the corresponding range in the above vanilla loss figure to see the details.\"\n )\n else:\n st.write(\"Waiting for training to start or finish...\")\n\n st.header('Generated Images')\n if start_train or session_state.start_train:\n st.write(\n 'You may use the slider below to see how the images generated by the GAN changes through the training process.'\n )\n\n # Note: sometimes the step will exceed max_value?\n step = st.slider(\n 'After How Many Batch Steps',\n min_value=0,\n max_value=total_steps,\n value=0,\n step=sample_interval,\n )\n\n fig = util.plot_generated_img(imgs, step, sample_interval)\n fig\n else:\n st.write(\"Waiting for training to start or finish...\")\n\n\ndef show_inference_page():\n st.title('Model Inference')\n st.text('By Jiajun Bao and Zixu Chen')\n\n st.write(\n 'You may generate new images with your saved model from the \"Model Training\" page or the pre-trained model we provide.'\n )\n\n model = st.sidebar.selectbox('Inference Model:', ('My Model', 'Pre-trained Model'))\n\n latent_dim = 100\n generator = None\n\n if model == 'My Model':\n st.header('Inference With The Model You Just Trained')\n\n # Load saved model\n if session_state.saved_model != None:\n latent_dim = session_state.saved_latent_dim\n generator = modeling.Generator(latent_dim)\n generator.load_state_dict(session_state.saved_model)\n generator.eval()\n else:\n st.write(\n 'No saved trained model found... Please train one at the \"Model Training\" page')\n else:\n st.header('Inference With Pre-trained Model')\n\n model_name = st.sidebar.selectbox('Trained-on:', ('MNIST', 'FashionMNIST'))\n ckpt = torch.load(os.path.join('ckpts', f'{model_name}_generator.pth.tar'))\n latent_dim = ckpt['config']['latent_dim']\n args = ckpt[\"generater_args\"]\n n_classes, latent_dim, img_shape = args['n_classes'], args['latent_dim'], args['img_shape']\n generator = modeling.Loaded_Generator(n_classes, latent_dim, img_shape)\n generator.load_state_dict(ckpt['generator'])\n generator.eval()\n\n if generator != None:\n st.write(\n 'You may use the slider below to adjust the random noise passed to the generator. If you tune it, you will see slight difference in the generated images.'\n )\n\n value = st.slider(\n 'Set the initial input vector',\n min_value=0,\n max_value=10,\n value=3,\n step=1,\n )\n start = st.button('Start Inference')\n\n st.header('Generated Images')\n if start:\n n_row = 5\n z = Variable(\n torch.FloatTensor(value / 10 * np.random.normal(0, 1, (n_row**2, latent_dim))))\n\n # Get labels ranging from 0 to n_classes for n rows\n labels = np.array([num for _ in range(n_row) for num in range(n_row)])\n labels = Variable(torch.LongTensor(labels))\n if model == 'My Model':\n gen_imgs = generator(z)\n else:\n gen_imgs = generator(z, labels)\n save_image(gen_imgs.data, \"tmp.png\", nrow=n_row, normalize=True)\n fig = plt.figure()\n plt.imshow(image.imread(\"tmp.png\"))\n plt.title(f'Generated Images')\n plt.xticks([])\n plt.yticks([])\n fig\n else:\n st.write('Waiting for inference to start...')\n\n\nst.sidebar.title('GAN Visualizer')\nst.sidebar.write('Help beginners to learn GAN more easily')\n\nst.sidebar.subheader('Page Navigation')\npages = [\n 'Dataset Overview', 'GAN Introduction', 'Trained Model Logs', 'Model Training',\n 'Model Inference'\n]\n\npage = st.sidebar.selectbox('Choose a stage to explore:', pages)\n\nif page == pages[0]:\n show_overview_page()\nelif page == pages[1]:\n show_gan_intro_page()\nelif page == pages[2]:\n show_pretrain_page()\nelif page == pages[3]:\n show_training_page()\nelif page == pages[4]:\n show_inference_page()\n"
]
| [
[
"torch.LongTensor",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.image.imread",
"numpy.random.normal",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.