repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
alexpearce/probfit
[ "24e19f01075b3684ba63bbb5328186777f380764" ]
[ "doc/conf.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# probfit documentation build configuration file, created by\n# sphinx-quickstart on Sat Nov 10 11:16:37 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport matplotlib\nmatplotlib.use('Agg')\nimport sys\nfrom os.path import dirname, join\n\n# For local development we use the `iminuit` from the source folder.\n# On readthedocs we use the one from `site-packages`.\n# See https://github.com/iminuit/iminuit/issues/126#issuecomment-61472227\n# and http://read-the-docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\nimport os\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd:\n sys.path.insert(0, join(dirname(__file__), '../'))\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['matplotlib.sphinxext.only_directives',\n 'matplotlib.sphinxext.plot_directive',\n 'IPython.sphinxext.ipython_directive',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.autosummary']\n\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'probfit'\ncopyright = u'2012, Piti Ongmongkolkul'\nautoclass_content = 'both'\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nimport probfit.info\nversion = probfit.info.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = probfit.info.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_themes']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif not on_rtd:\n try:\n # https://github.com/snide/sphinx_rtd_theme\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # Fallback to default theme\n print('WARNING: To get nice docs locally that look like the online docs, please do:')\n print('WARNING: $ pip install sphinx_rtd_theme --user')\n print('WARNING: Using default theme.')\n html_theme = \"nature\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# if not on_rtd:\n# html_theme_path = ['_themes', ]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'probfitdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'probfit.tex', u'probfit Documentation',\n u'Piti Ongmongkolkul', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'probfit', u'probfit Documentation',\n [u'Piti Ongmongkolkul'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'probfit', u'probfit Documentation',\n u'Piti Ongmongkolkul', 'probfit', 'Fitting Stuff',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n" ]
[ [ "matplotlib.use" ] ]
williams-c/covid_graphs
[ "8c5df91b5da660d217af66465537a315acc98ff9" ]
[ "server/data_scripts/format_state.py" ]
[ "\nimport pandas as pd\n\ndef format_csv_by_state(df, start, end, states):\n df = df.drop(columns=['UID', 'code3', 'iso2', 'iso3', 'FIPS', 'Lat', 'Long_'])\n df = df.groupby('Province_State').agg('sum')\n # flips columns and rows\n df = df.T\n # turns date from string to pandas datetime\n df_time = pd.to_datetime(df.index)\n datetime_index = pd.DatetimeIndex(df_time.values)\n df = df.set_index(datetime_index)\n # select between start and end dates\n df = df[start:end]\n if states == 'all':\n return df\n # select specific states\n df = df[states]\n return df" ]
[ [ "pandas.to_datetime", "pandas.DatetimeIndex" ] ]
hrsu/win_api
[ "be55004cc6f4c357475b45e80c06dc480a67927f" ]
[ "text-cnn/eval.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\nimport csv\n\n# Parameters\n# ==================================================\n\n# Data Parameters\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/pos.txt\", \"Data source for the positive data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/neg.txt\", \"Data source for the negative data.\")\n\n# Eval Parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"\", \"Checkpoint directory from training run\")\ntf.flags.DEFINE_boolean(\"eval_train\", False, \"Evaluate on all training data\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\n# CHANGE THIS: Load data. Load your own data here\nif FLAGS.eval_train:\n x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)\n y_test = np.argmax(y_test, axis=1)\nelse:\n x_raw = [\"a masterpiece four years in the making\", \"everything is off.\"]\n y_test = [1, 0]\n\n# Map data into vocabulary\nvocab_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"vocab\")\nvocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\nx_test = np.array(list(vocab_processor.transform(x_raw)))\n\nprint(\"\\nEvaluating...\\n\")\n\n# Evaluation\n# ==================================================\ncheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n # input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n\n # Generate batches for one epoch\n batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)\n\n # Collect the predictions here\n all_predictions = []\n\n for x_test_batch in batches:\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n\n# Print accuracy if y_test is defined\nif y_test is not None:\n correct_predictions = float(sum(all_predictions == y_test))\n print(\"Total number of test examples: {}\".format(len(y_test)))\n print(\"Accuracy: {:g}\".format(correct_predictions/float(len(y_test))))\n\n# Save the evaluation to a csv\npredictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))\nout_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"prediction.csv\")\nprint(\"Saving evaluation to {0}\".format(out_path))\nwith open(out_path, 'w') as f:\n csv.writer(f).writerows(predictions_human_readable)\n" ]
[ [ "numpy.concatenate", "numpy.array", "tensorflow.flags.DEFINE_string", "tensorflow.train.latest_checkpoint", "tensorflow.flags.DEFINE_boolean", "tensorflow.Graph", "tensorflow.Session", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.flags.DEFINE_integer" ] ]
futabato/mlops_demo
[ "c2563980327a516fea855c64311a340d34588e94" ]
[ "mlflow/example2.py" ]
[ "import mlflow\nfrom mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME\n\nexperiment_name = 'MNIST-Classification'\nmlflow.set_experiment(experiment_name)\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.datasets import mnist\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train, X_test =X_train.reshape((60000, 28, 28, 1)), X_test.reshape((10000, 28, 28, 1))\n\nX_train, X_valid = X_train[5000:], X_train[:5000]\ny_train, y_valid = y_train[5000:], y_train[:5000]\nX_train, X_valid = X_train /255, X_valid /255\n\nwith mlflow.start_run(run_name='2DCNN', nested=True):\n model = keras.models.Sequential()\n model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=[28, 28, 1]))\n model.add(keras.layers.MaxPooling2D(2, 2))\n model.add(keras.layers.Conv2D(32, (3, 3), activation='relu'))\n model.add(keras.layers.MaxPooling2D(2, 2))\n model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(64, activation='relu'))\n model.add(keras.layers.Dense(10, activation='softmax'))\n\n loss = 'sparse_categorical_crossentropy'\n optimizer = 'adam'\n metrics = ['accuracy']\n epoch = 10\n batch_size = 128\n\n model.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n mlflow.keras.autolog()\n\n model.fit(X_train, y_train, epochs=epoch, batch_size=batch_size, validation_data=(X_valid, y_valid))\n\n test_loss, test_accuracy = model.evaluate(X_test, y_test)\n" ]
[ [ "tensorflow.keras.datasets.mnist.load_data", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.models.Sequential" ] ]
Shunichi09/reinforcement_learning_with_DNN
[ "612635235fd48fad3773f77a89cabbe97996a319" ]
[ "catcher/DDQN_torch.py" ]
[ "import gym\nfrom gym import wrappers\nimport gym_ple\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom tensorboardX import SummaryWriter\n\nfrom PIL import Image\n\nimport os\n\nLOG_DIR = os.path.join(os.path.dirname(__file__), 'log') # これでtensorboard\nfile_names = os.listdir(LOG_DIR)\nfor file_name in file_names: # logファイルが残っていたら消去\n os.remove(LOG_DIR + '/' + file_name)\n\nwriter = SummaryWriter(log_dir = LOG_DIR)\n\nfrom collections import namedtuple, deque # .framesとかでアクセスできるようにしてる\nimport random\n\n# 保存する用の型を作成\nTransition = namedtuple('Transition', ('frames', 'action', 'next_frames', 'reward'))\n\nclass Net(nn.Module):\n \"\"\"CNN module\n num_actions : int\n number of actions\n \"\"\"\n def __init__(self, num_actions, input_channel_num=4):\n super(Net,self).__init__()\n # input channel is 4\n # input channel is 4\n # input size = 80 * 80\n self.conv1 = nn.Conv2d(input_channel_num, 32, kernel_size=8, stride=4, padding=122)\n torch.nn.init.normal_(self.conv1.weight, std=0.05)\n # input size = 80 * 80\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=41)\n torch.nn.init.normal_(self.conv2.weight, std=0.05)\n # input size = 80 * 80\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)\n torch.nn.init.normal_(self.conv3.weight, std=0.05)\n # input size = 64 * 80 * 80\n self.fc1 = nn.Linear(64 * 80 * 80, 256)\n torch.nn.init.normal_(self.fc1.weight, std=0.05)\n # input size = 256\n self.fc2 = nn.Linear(256, num_actions)\n torch.nn.init.normal_(self.fc2.weight, std=0.05)\n\n def forward(self, x):\n # input→conv1→activation(ReLU)\n x = F.relu(self.conv1(x))\n # input→conv2→activation(ReLU)\n x = F.relu(self.conv2(x))\n # input→conv3→activation(ReLU)\n x = F.relu(self.conv3(x))\n\n # to be flatten array , batch size * 80 * 80\n x = x.view(-1, 64 * 80 * 80)\n # input→fc1→activation(ReLU)\n x = F.relu(self.fc1(x))\n # input→fc2→output\n x = F.relu(self.fc2(x))\n\n return x\n\n# to add the model to tensorboard\n# tensor\ndummy_x = Variable(torch.rand(15, 4, 80, 80))\ntest_model = Net(3)\nwriter.add_graph(test_model, (dummy_x, ))\n\nclass ReplayMemory():\n \"\"\"\n Attributes\n -----------\n\n \"\"\"\n def __init__(self, capacity):\n \"\"\"\n \"\"\"\n self.capacity = capacity # how many do we have stock memory\n self.memory = [] # transition \n self.index = 0\n\n def push(self, frames, action, next_frames, reward):\n \"\"\"save the transition to memory\n Parameters\n ------------\n frames : torch.tensor, shape(1 * 4 * 80 * 80)\n the game frame\n action : int\n action number\n next_frames : torch.tensor, shape(1 * 4 * 80 * 80)\n the game frame\n reward : float\n \"\"\"\n\n if len(self.memory) < self.capacity: # should save\n self.memory.append(None)\n \n self.memory[self.index] = Transition(frames, action, next_frames, reward)\n\n self.index = (self.index + 1) % self.capacity # index_num start from 0 to self.capacity\n\n def sample(self, batch_size):\n \"\"\"\n take the data with random\n\n Parameters\n ----------\n batch_size : int\n batch size of input data\n \"\"\"\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n \"\"\"\n return length of memory\n \"\"\"\n return len(self.memory)\n\n\nclass DQNNet():\n \"\"\"\n \"\"\"\n def __init__(self, num_actions):\n \"\"\"\n \"\"\"\n self.num_actions = num_actions # アクションの数、これは環境で得れる\n\n # メモリを作っておく\n capacity = 50000\n self.memory = ReplayMemory(capacity)\n\n # バッチサイズ\n self.batch_size = 32\n self.init_memory_size = 200\n\n # 学習率\n self.gamma = 0.99\n\n self.model = Net(self.num_actions)\n # Fixed Q net\n self._teacher_model = Net(self.num_actions)\n\n print(self.model) # 確認する\n input()\n\n # tensorboard用\n self.count = 1\n \n # policy用\n self.ready_batch = False\n self.epsilon = 0.5\n\n # 最適化手法\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.0001)\n\n def replay(self):\n \"\"\"\n \"\"\"\n if len(self.memory) < self.init_memory_size :# memory check\n return\n \n self.ready_batch = True\n \n transitions = self.memory.sample(self.batch_size) # make mini batch\n\n # We have\n # Transition * Batchsize\n batch = Transition(*zip(*transitions)) # *zipで、tuple方向の変更、*listで取り出し, turn into [torch.FloatTensor of size 80 * 80 * 4] * BATCH_SIZE, have the name\n\n # torch.FloatTensor of size BATCH_SIZEx4\n frames_batch = torch.cat(batch.frames) # 1 set is 4 frames\n action_batch = torch.cat(batch.action) # action \n reward_batch = torch.cat(batch.reward) \n non_final_next_frames = torch.cat([s for s in batch.next_frames if s is not None])\n \n # estimate mode, Q[s, a] <= Q[s, a] + alpha[R + gamma max_a Q(st+1, at+1) - Q(s, a)]\n \n # calc => Q(s, a)\n self.model.eval()\n self._teacher_model.eval()\n\n # first input, batchsize * (1)\n state_action_values = self.model(frames_batch).gather(1, action_batch) # gather check the note, pick up action_num's value \n\n # calc max Q having next frames => gamma max_a Q(st+1, at+1)\n # if not done, check next_state\n non_final_mask = torch.ByteTensor(tuple(map(lambda s: s is not None, batch.next_frames)))\n # first all 0\n next_state_values = torch.zeros(self.batch_size)\n a_m = torch.zeros(self.batch_size).type(torch.LongTensor)\n\n # max(1) => see note, return \n # detach => pick only tensor but have same storage\n # calc main network max a\n a_m[non_final_mask] = self.model(non_final_next_frames).detach().max(1)[1] # index\n\n a_m_non_final_next_states = a_m[non_final_mask].view(-1, 1) # get max action of main \n\n # [minibatch * 1] --> [minibatch]\n next_state_values[non_final_mask] = self._teacher_model(non_final_next_frames).gather(1, a_m_non_final_next_states).detach().squeeze()\n \n # calc expected Q(st, at)\n expected_state_action_values = reward_batch + self.gamma * next_state_values\n \n # training mode\n self.model.train()\n\n # calc loss\n # unsqueeze => [1, 2, 3] => [[1], [2], [3]]\n # loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n MSE_loss = nn.MSELoss()\n loss = MSE_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n\n # save loss\n writer.add_scalar(tag='loss_data', scalar_value=loss.item(), global_step=self.count)\n self.count += 1\n\n self.optimizer.zero_grad() # reset grad\n loss.backward() # backpropagation\n clip_grad_norm_(self.model.parameters(), 1.0) # clip\n self.optimizer.step() # update\n\n def update_teacher_model(self):\n print(\"update teacher\")\n self._teacher_model.load_state_dict(self.model.state_dict())\n\n def save(self, episode):\n \"\"\"\n \"\"\"\n torch.save(self.model, \"./param/cnn_{}.pkl\".format(episode))\n\n def decide_action(self, frames, episode):\n \"\"\"\n only decide the action\n Parameters\n ------------\n frames : torch.tensor, shape(1, 4, 80, 80)\n episode : int\n episode num\n \"\"\"\n # ε-greedy\n # epsilon = 0.5 * (1 / (episode + 1))\n\n if np.random.random() < self.epsilon or not self.ready_batch:\n # random\n action = torch.LongTensor([[random.randrange(self.num_actions)]]) # random\n else:\n # arg max\n self.model.eval() # estimated mode\n with torch.no_grad():\n action = self.model(frames).max(1)[1].view(1, 1) # get the index\n\n return action\n\n def update_epsilon(self, MAX_EPISODE):\n \"\"\"\n Parameters\n -----------\n episode : int\n episode number\n \"\"\"\n # ε-greedy\n print(\"update parameters\")\n final_epsilon = 1e-3\n initial_epsilon = 0.5\n \n diff = (initial_epsilon - final_epsilon)\n decray = diff / float(MAX_EPISODE)\n self.epsilon = max(self.epsilon-decray, final_epsilon)\n print(\"epsilon = {}\".format(self.epsilon))\n\nclass Agent():\n def __init__(self, num_actions):\n \"\"\"\n Parameters\n -----------\n num_states : int\n num_actions : int\n \"\"\"\n self.brain = DQNNet(num_actions) # brain \n\n def update_q_function(self):\n \"\"\"\n updating Q function\n \"\"\"\n self.brain.replay()\n\n def get_action(self, frames, episode):\n \"\"\"\n Parameters\n ------------\n frames : torch.tensor, shape(1 * 4 * 80 * 80)\n the game frame\n episode : int\n episode number\n \"\"\"\n\n action = self.brain.decide_action(frames, episode)\n return action\n\n def memorize(self, frames, action, next_frames, reward):\n \"\"\"\n Parameters\n ------------\n frames : torch.tensor, shape(1 * 4 * 80 * 80)\n the game frame\n action : int\n action number\n next_frames : torch.tensor, shape(1 * 4 * 80 * 80)\n the game frame\n reward : float\n \"\"\"\n self.brain.memory.push(frames, action, next_frames, reward)\n\n def update_teacher(self):\n \"\"\"\n Parameters\n -----------\n\n \"\"\"\n self.brain.update_teacher_model()\n\n def update_parameters(self, MAX_EPISODE):\n \"\"\"\n\n \"\"\"\n self.brain.update_epsilon(MAX_EPISODE)\n \n def save(self, episode):\n \"\"\"\n \"\"\"\n self.brain.save(episode)\n\nclass Trainer():\n \"\"\"\n Attributes\n -----------\n env : gym.enviroment\n agent : Agent \n \"\"\"\n def __init__(self, observer):\n \"\"\"\n \"\"\"\n self.observer = observer\n\n # XXX: must change\n self.env = observer.env # game\n num_states = self.env.observation_space.shape[0] # states num but in this case dont need\n num_actions = self.env.action_space.n # num action in this case 3\n\n self.agent = Agent(num_actions)\n self.agent.update_teacher() # initialize\n self.agent.save(0)\n\n def run(self, MAX_EPISODE=1200, render=False, report_interval=50):\n \"\"\"\n Parameters\n ------------\n MAX_EPISODE : int, default is 5000\n render : bool, default is False\n report_interval : int, default is 50\n \"\"\"\n\n for episode in range(MAX_EPISODE):\n print(\"episode {}\".format(episode))\n\n frames = self.observer.init_reset()\n done = False\n total_reward = 0\n \n while not done: # this game does not have end\n if render: # \n # self.observer.render()\n self.agent.save(episode)\n \n action = self.agent.get_action(frames, episode) # get action\n\n # .item() => to numpy\n next_frames, reward, done = self.observer.step(action.item())\n\n if done: \n next_frames = None \n\n # add memory\n self.agent.memorize(frames, action, next_frames, reward)\n\n # update experience replay\n self.agent.update_q_function()\n\n # update frames\n frames = next_frames\n\n # update reward\n total_reward += reward.item()\n \n else:\n self.agent.update_parameters(MAX_EPISODE)\n if episode % 3 == 0:\n self.agent.update_teacher()\n\n # save loss\n writer.add_scalar(tag='reward', scalar_value=total_reward, global_step=episode)\n\n # report if yes, render and save the path\n if episode % report_interval == 0:\n render = True\n else : \n render = False\n\n\nclass Observer():\n \"\"\"\n \"\"\"\n\n def __init__(self, env, width, height, num_frame):\n \"\"\"\n Parameters\n -----------\n env : gym environment\n width : int\n height : int\n num_frame : int\n \"\"\"\n self.env = env\n self.width = width\n self.height = height\n self.num_frame = num_frame\n self._frames = None\n\n def init_reset(self):\n \"\"\"\n initial reset, when the episode starts\n Returns\n ----------\n torch_frames : torch.tensor\n \"\"\"\n self._frames = deque(maxlen=self.num_frame)\n frame = self.env.reset()\n torch_frames = self._transform(frame)\n\n return torch_frames\n\n def step(self, action):\n \"\"\"\n Parameters\n ------------\n action : int\n Returns\n ----------\n torch_frames : torch.tensor\n reward : torch.tensor\n done : bool\n \"\"\"\n next_frame, reward, done, _ = self.env.step(action)\n reward = torch.FloatTensor([reward]) # reward\n torch_frames = self._transform(next_frame)\n\n return torch_frames, reward, done\n \n def render(self):\n \"\"\"\n \"\"\"\n self.env.render()\n\n def _transform(self, frame):\n \"\"\"\n Parameters\n -------------\n frame : numpy.ndarray\n \"\"\"\n grayed = Image.fromarray(frame).convert(\"L\") # to gray\n\n resized = grayed.resize((self.width, self.height))\n resized = np.array(resized).astype(\"float\")\n normalized = resized / 255.0 # scale to 0~1\n\n if len(self._frames) == 0:\n for _ in range(self.num_frame):\n self._frames.append(normalized)\n else:\n self._frames.append(normalized)\n\n np_frames = np.array(self._frames)\n\n torch_frames = torch.from_numpy(np_frames).type(torch.FloatTensor) # numpy => torch.tensor\n # print(\"torch_size = {}\".format(torch_frames.size()))\n torch_frames = torch_frames.view(1, self.num_frame, self.width, self.height)\n # print(\"torch_size = {}\".format(torch_frames.size()))\n # input()\n\n return torch_frames\n \ndef main():\n \"\"\"\n \"\"\"\n env = gym.make('Catcher-v0')\n video_path = \"./DQN_video\"\n env = wrappers.Monitor(env, video_path, video_callable=(lambda ep: ep % 100 == 0), force=True)\n\n observer = Observer(env, 80, 80, 4)\n trainer = Trainer(observer)\n trainer.run()\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.cat", "torch.zeros", "numpy.array", "torch.nn.MSELoss", "torch.FloatTensor", "torch.no_grad", "torch.from_numpy", "torch.nn.Conv2d", "torch.nn.init.normal_", "numpy.random.random" ] ]
TharinduDR/OffensiveNN
[ "336b377c44a7067d2e23ca4a8d331ce7f99157cc" ]
[ "offensive_nn/callbacks.py" ]
[ "import tensorflow.keras.backend as K\nimport numpy as np\n\nfrom tensorflow.keras.callbacks import Callback\n\n\n# https://www.kaggle.com/hireme/fun-api-keras-f1-metric-cyclical-learning-rate/code\nclass CyclicLR(Callback):\n \"\"\"This callback implements a cyclical learning rate policy (CLR).\n The method cycles the learning rate between two boundaries with\n some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).\n The amplitude of the cycle can be scaled on a per-iteration or\n per-cycle basis.\n This class has three built-in policies, as put forth in the paper.\n \"triangular\":\n A basic triangular cycle w/ no amplitude scaling.\n \"triangular2\":\n A basic triangular cycle that scales initial amplitude by half each cycle.\n \"exp_range\":\n A cycle that scales initial amplitude by gamma**(cycle iterations) at each\n cycle iteration.\n For more detail, please see paper.\n\n # Example\n ```python\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., mode='triangular')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n\n Class also supports custom scaling functions:\n ```python\n clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., scale_fn=clr_fn,\n scale_mode='cycle')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n # Arguments\n base_lr: initial learning rate which is the\n lower boundary in the cycle.\n max_lr: upper boundary in the cycle. Functionally,\n it defines the cycle amplitude (max_lr - base_lr).\n The lr at any cycle is the sum of base_lr\n and some scaling of the amplitude; therefore\n max_lr may not actually be reached depending on\n scaling function.\n step_size: number of training iterations per\n half cycle. Authors suggest setting step_size\n 2-8 x training iterations in epoch.\n mode: one of {triangular, triangular2, exp_range}.\n Default 'triangular'.\n Values correspond to policies detailed above.\n If scale_fn is not None, this argument is ignored.\n gamma: constant in 'exp_range' scaling function:\n gamma**(cycle iterations)\n scale_fn: Custom scaling policy defined by a single\n argument lambda function, where\n 0 <= scale_fn(x) <= 1 for all x >= 0.\n mode paramater is ignored\n scale_mode: {'cycle', 'iterations'}.\n Defines whether scale_fn is evaluated on\n cycle number or cycle iterations (training\n iterations since start of cycle). Default is 'cycle'.\n \"\"\"\n\n def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular',\n gamma=1., scale_fn=None, scale_mode='cycle'):\n super(CyclicLR, self).__init__()\n\n self.base_lr = base_lr\n self.max_lr = max_lr\n self.step_size = step_size\n self.mode = mode\n self.gamma = gamma\n if scale_fn == None:\n if self.mode == 'triangular':\n self.scale_fn = lambda x: 1.\n self.scale_mode = 'cycle'\n elif self.mode == 'triangular2':\n self.scale_fn = lambda x: 1 / (2. ** (x - 1))\n self.scale_mode = 'cycle'\n elif self.mode == 'exp_range':\n self.scale_fn = lambda x: gamma ** (x)\n self.scale_mode = 'iterations'\n else:\n self.scale_fn = scale_fn\n self.scale_mode = scale_mode\n self.clr_iterations = 0.\n self.trn_iterations = 0.\n self.history = {}\n\n self._reset()\n\n def _reset(self, new_base_lr=None, new_max_lr=None,\n new_step_size=None):\n \"\"\"Resets cycle iterations.\n Optional boundary/step size adjustment.\n \"\"\"\n if new_base_lr != None:\n self.base_lr = new_base_lr\n if new_max_lr != None:\n self.max_lr = new_max_lr\n if new_step_size != None:\n self.step_size = new_step_size\n self.clr_iterations = 0.\n\n def clr(self):\n cycle = np.floor(1 + self.clr_iterations / (2 * self.step_size))\n x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1)\n if self.scale_mode == 'cycle':\n return self.base_lr + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * self.scale_fn(cycle)\n else:\n return self.base_lr + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * self.scale_fn(\n self.clr_iterations)\n\n def on_train_begin(self, logs={}):\n logs = logs or {}\n\n if self.clr_iterations == 0:\n K.set_value(self.model.optimizer.lr, self.base_lr)\n else:\n K.set_value(self.model.optimizer.lr, self.clr())\n\n def on_batch_end(self, epoch, logs=None):\n\n logs = logs or {}\n self.trn_iterations += 1\n self.clr_iterations += 1\n\n self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))\n self.history.setdefault('iterations', []).append(self.trn_iterations)\n\n for k, v in logs.items():\n self.history.setdefault(k, []).append(v)\n\n K.set_value(self.model.optimizer.lr, self.clr())\n" ]
[ [ "tensorflow.keras.backend.set_value", "tensorflow.keras.backend.get_value", "numpy.abs", "numpy.floor", "numpy.maximum" ] ]
oylb126/shakemap
[ "cbad8622bd520e1936447620edfb3a4feea1a8d9" ]
[ "shakelib/gmpe/nga_east.py" ]
[ "\"\"\"\nModule to simplify importing the OQ implementation of the\nNGA-East GMPE suite.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\nfrom openquake.hazardlib.gsim.base import GMPE\nfrom openquake.hazardlib import const\nfrom openquake.hazardlib import imt as IMT\nfrom openquake.hazardlib.gsim.usgs_ceus_2019 import NGAEastUSGSGMPE\n\n# Max distance for evaluating NGAEast. This *should* be 1500, but due to what\n# appears to be a floating point precision issue, we get a division by zero\n# error at a value of 1500 returning nans. So we have to cap the distance at a\n# value slightly smaller.\n\n# This is also used to zero out the results at greater distances.\n\nMAX_RRUP = 1499.9\n\n\nclass NGAEast(GMPE):\n \"\"\"\n Returns NGA East GMPE that combines all of the individual NGAEastUSGSGMPE\n GMPEs.\n \"\"\"\n DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL\n DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50\n DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([\n IMT.PGA,\n IMT.PGV,\n IMT.SA\n ])\n\n DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([\n const.StdDev.TOTAL,\n const.StdDev.INTER_EVENT,\n const.StdDev.INTRA_EVENT\n ])\n REQUIRES_SITES_PARAMETERS = set(('vs30', ))\n REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))\n REQUIRES_DISTANCES = set(('rrup', ))\n\n TABLE_PATHS = os.listdir(NGAEastUSGSGMPE.PATH)\n this_module = os.path.dirname(__file__)\n NGA_BASE_PATH = os.path.join(\n this_module, '..', '..', 'shakemap', 'data', 'nga_east_tables')\n\n NGA_EAST_USGS_WEIGHT = 0.667\n NGA_EAST_SEED_WEIGHT = 0.333\n\n NGA_EAST_USGS = pd.read_csv(os.path.join(\n NGA_BASE_PATH, 'nga-east-usgs-weights.dat'))\n NGA_EAST_SEEDS = pd.read_csv(os.path.join(\n NGA_BASE_PATH, 'nga-east-seed-weights.dat'))\n\n # Sigma models and their weights\n SIGMA_MODS = [\"EPRI\", \"PANEL\"]\n SIGMA_WEIGHTS = [0.8, 0.2]\n\n # -------------------------------------------------------------------------\n # To simplify, use the COLLAPSED branch, but cannot get inter and intra\n # event standard deviations in this case.\n # SIGMA_MODS = [\"COLLAPSED\"]\n # SIGMA_WEIGHTS = [1.0]\n\n # Parse the periods of the columns\n per_idx_start = 1\n per_idx_end = -2\n per_list_str = NGA_EAST_USGS.keys().tolist()[per_idx_start:per_idx_end]\n per_array = np.array(\n [float(p.replace('SA', '').replace('P', '.')) for p in per_list_str]\n )\n\n def __init__(self):\n gmpes = []\n sigma_wts = []\n all_table_paths = []\n for i, sigma_mod in enumerate(self.SIGMA_MODS):\n for table_path in self.TABLE_PATHS:\n gmpe = NGAEastUSGSGMPE(\n gmpe_table=table_path, sigma_model=sigma_mod)\n gmpes.append(gmpe)\n sigma_wts.append(self.SIGMA_WEIGHTS[i])\n all_table_paths.append(table_path)\n self.gmpes = gmpes\n self.sigma_weights = np.array(sigma_wts)\n self.ALL_TABLE_PATHS = all_table_paths\n\n def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):\n # List of GMPE weights, which is the product of the the branch weights\n # for the seed models vs the NGA East resampled models as well as the\n # weights for the indivudual GMPES as defined by Petersen et al. (2019)\n #\n # Note that the NGA East resampled models are a function of spectral\n # period.\n #\n # NGA East Seeds (1/3)\n # ├── B_bca10d (0.06633), wts = 0.333 * 0.06633 = 0.02208789\n # ├── B_ab95 (0.02211), wts = 0.333 * 0.02211 = 0.00736263\n # ...\n # NGA East Resampled or \"USGS\" (2/3)\n # ├── Model 1 (0.1009 for PGA), wts = 0.667 * 0.1009 = 0.0673003\n # ├── Model 2 (0.1606 for PGA), wts = 0.667 * 0.1606 = 0.1071202\n # ...\n #\n wts = [0] * len(self.gmpes)\n\n # Is IMT PGA or PGV?\n is_pga = imt == IMT.PGA()\n is_pgv = imt == IMT.PGV()\n\n for i, tp in enumerate(self.ALL_TABLE_PATHS):\n if 'usgs' in tp:\n # Get model number from i-th path using regex\n mod_num = int(re.search(r'\\d+', tp).group())\n coefs = np.array(\n self.NGA_EAST_USGS.iloc[mod_num - 1]\n )\n # Is the IMT PGA, PGA, or SA?\n if is_pga:\n iweight = coefs[-2]\n elif is_pgv:\n iweight = coefs[-1]\n else:\n # For SA, need to interpolate; we'll use log-period and\n # linear-weight interpolation.\n iweight = np.interp(\n np.log(imt.period),\n np.log(self.per_array),\n coefs[self.per_idx_start:self.per_idx_end]\n )\n wts[i] = self.NGA_EAST_USGS_WEIGHT * iweight\n else:\n # Strip off the cruft to get the string we need to match\n str_match = tp.replace('nga_east_', '').replace('.hdf5', '')\n matched = self.NGA_EAST_SEEDS[\n self.NGA_EAST_SEEDS['model'] == str_match]\n if len(matched):\n iweight = self.NGA_EAST_SEEDS[\n self.NGA_EAST_SEEDS['model'] == str_match].iloc[0, 1]\n wts[i] = self.NGA_EAST_SEED_WEIGHT * iweight\n\n total_gmpe_weights = self.sigma_weights * wts\n\n if not np.allclose(np.sum(total_gmpe_weights), 1.0):\n raise ValueError('Weights must sum to 1.0.')\n\n mean = np.full_like(sites.vs30, 0)\n stddevs = []\n for i in range(len(stddev_types)):\n stddevs.append(np.full_like(sites.vs30, 0))\n\n # Apply max distance to dists.rrup\n np.clip(dists.rrup, 0, MAX_RRUP)\n\n # Since we will be dropping the models that don't have PGV,\n # we now also need to track the total sum of weights for when\n # the imt is PGV so that we can re-distribute the weights.\n if is_pgv:\n twts = []\n\n # Loop over gmpes\n for i, gm in enumerate(self.gmpes):\n if is_pgv:\n # Is PGV and also not available for gm?\n try:\n gm._return_tables(rup.mag, imt, \"IMLs\")\n except KeyError:\n continue\n except:\n logging.error(\"Unexpected error:\", sys.exc_info()[0])\n tmean, tstddevs = gm.get_mean_and_stddevs(\n sites, rup, dists, imt, stddev_types)\n mean += tmean * total_gmpe_weights[i]\n for j, sd in enumerate(tstddevs):\n stddevs[j] += sd * total_gmpe_weights[i]\n if is_pgv:\n twts.append(total_gmpe_weights[i])\n\n if is_pgv:\n # Rescale the PGV wieghts so that they sum to 1 after dropping\n # the models that are not defined for PGV.\n mean = mean / np.sum(twts)\n for j, sd in enumerate(stddevs):\n stddevs[j] = stddevs[j] / np.sum(twts)\n\n # Zero out values at distances beyond the range for which NGA East\n # was defined.\n mean[dists.rrup > MAX_RRUP] = -999.0\n\n return mean, stddevs\n" ]
[ [ "numpy.array", "numpy.log", "numpy.sum", "numpy.clip", "numpy.full_like" ] ]
AkshayGuptaK/maze-solvers
[ "f324c1af83e6456dcf0da89ffadd85930adc62e5" ]
[ "GenerateSolver.py" ]
[ "'''Matrix based Maze Solvers'''\r\n#Solver Generator\r\n\r\nimport numpy as np\r\nimport random\r\n\r\ndef print_multi_nparray(multi_array): #output display function\r\n layers = multi_array.shape[-1]\r\n for layer in range(layers):\r\n print(multi_array[..., layer])\r\n\r\nclass MemorySolver():\r\n #class of solvers that always try the same path - they can be evolved to better solve a given maze\r\n def __init__(self, maze):\r\n self.genome = np.copy(maze)\r\n self.end = self.genome.shape[0] - 1\r\n \r\n def construct(self, random):\r\n #initializes this solver's path - i.e. its attempted solution\r\n i = 0\r\n j = 0\r\n end_reached = False\r\n self.path = []\r\n \r\n while j not in self.path:\r\n self.path.append(j)\r\n if j == self.end:\r\n end_reached = True\r\n j = random.choice(np.where(self.genome[i,:]==1)[0].tolist()) #choose a random path not previously chosen\r\n self.genome[i, j] = 2 #marks this as a path taken in the genome\r\n i = j\r\n if end_reached:\r\n break\r\n \r\n for i in [x for x in range(1, self.end+1) if x not in self.path]:\r\n j = random.choice(np.where(self.genome[i,:]==1)[0].tolist())\r\n self.genome[i, j] = 2\r\n #paths are added so all nodes are traversed at least once by solution\r\n \r\n def gen_path(self):\r\n #traces the path indicated by the genome\r\n #this is for post evolutionary use in determining the new solution\r\n i = 0\r\n j = 0\r\n end_reached = False\r\n self.path = []\r\n \r\n while j not in self.path:\r\n self.path.append(j)\r\n if j == self.end:\r\n end_reached = True\r\n (j, ) = np.where(self.genome[i,:]==2)[0]\r\n i = j\r\n if end_reached:\r\n break \r\n \r\ndef generate_memory_solver(random, args):\r\n solver = MemorySolver(args.get('maze'))\r\n solver.construct(random)\r\n return solver\r\n \r\ndef evaluate_memory_solver(candidates, args):\r\n fitness = []\r\n \r\n for solver in candidates:\r\n try:\r\n fitness.append(solver.path.index(solver.end)) #num of steps taken to solve\r\n except ValueError: #if solver cannot solve the maze\r\n fitness.append(solver.end + 1)\r\n \r\n return fitness \r\n\r\n#------------------------------------------------------------------------------ \r\n \r\ndef generate_smart_solver(random, args):\r\n #solvers that use a 3d matrix to generate a solution given a certain maze or set of mazes\r\n #can be evolved to produce a genetic 'algorithm' which can solve mazes in general\r\n nodes = args.get('nodes') #number of nodes in the maze\r\n complexity = args.get('complexity') #determines number of layers (third dimension size) of solver matrix\r\n\r\n solver = np.ones((nodes, nodes, complexity), np.int8)\r\n \r\n for layer in range(1, complexity):\r\n for row in range(nodes):\r\n solver[row, :, layer] = [random.randint(0,3) for i in range(nodes)]\r\n #initializes random values between 0 and 3 for all layers except bottom layer\r\n return solver\r\n\r\ndef identity_solver(nodes, complexity):\r\n #random but prefers not to revisit nodes - for performance comparison purposes\r\n solver = np.zeros((nodes, nodes, complexity), np.int8)\r\n\r\n for i in range(nodes):\r\n solver[i, i, -1] = 1\r\n \r\n solver[:, :, 0] = 1\r\n \r\n return solver\r\n \r\ndef dfs_solver(nodes, complexity):\r\n #a basic implementation of a depth first search solver - for performance comparison\r\n solver = np.ones((nodes, nodes, complexity), np.int8)\r\n \r\n solver[:, :, -1] = 2\r\n\r\n for i in range(nodes):\r\n solver[i, i, -1] = 1\r\n return solver\r\n \r\ndef optimal_solver(nodes):\r\n #believed to be optimal solving strategy - for performance comparison\r\n solver = np.ones((nodes, nodes, 3), np.int8)\r\n \r\n solver[:, :, -1] = 0\r\n \r\n for i in range(nodes):\r\n solver[i, i, -1] = 1\r\n\r\n solver[:, :, -2] = 2\r\n \r\n return solver\r\n \r\ndef visit(node, unvisited, tensor): #mark a node as visited\r\n unvisited[node] = 0\r\n tensor[node, :] = 1\r\n tensor[:, node] = 1\r\n \r\ndef operate(number, operator): #performs the operation specified by the solver matrix\r\n if operator == 0 or operator == 1: #a 0 or 1 operator returns itself as the result\r\n return operator\r\n elif operator == 2: #a 2 operator returns the number operated on without change\r\n return number\r\n else: #a 3 operator returns the inverse of the number operated on\r\n return 1 - number\r\n \r\ndef matrix_operate(matrix, operator): \r\n #performs row and column wise operations between number matrix and operator matrix\r\n result = np.zeros(matrix.shape, np.int8)\r\n for i in range(matrix.shape[0]):\r\n for j in range(matrix.shape[1]):\r\n result[i, j] = operate(matrix[i, j], operator[i, j])\r\n return result\r\n\r\ndef choose(random, row): #choose at random between possible options, if multiples\r\n if np.count_nonzero(row):\r\n return random.choices([n for n in range(row.shape[0])], weights=row)\r\n else:\r\n return (-1, )\r\n \r\ndef evaluate_smart_solver(candidates, args): #mazes is a numpy array of node x node x number_of_mazes\r\n random = args.get('random')\r\n mazes = args.get('mazes')\r\n\r\n fitness = [] #will store the success value for solving attempt on each maze\r\n nodes = mazes.shape[0] #number of nodes in the maze\r\n \r\n for solver in candidates:\r\n fit = 0\r\n for maze in range(mazes.shape[2]):\r\n unvisited = [1 for x in range(nodes)] #list of 1s in place of each unvisited node, 0s for each visited node\r\n tensor = np.zeros((nodes, nodes), np.int8)\r\n current = 0 #current node initialized to beginning node of maze\r\n steps = 0 #number of moves made so far\r\n visit(0, unvisited, tensor)\r\n \r\n while current != nodes - 1 and steps < nodes**2:\r\n choice = -1\r\n layer = -1\r\n known = np.multiply(tensor, mazes[:, :, maze]) #yields the information of maze adjacency as known by solver at current time\r\n \r\n while choice < 0:\r\n try:\r\n M = matrix_operate(known, solver[:, :, layer]) #performs solver matrix operation to determine which path to take\r\n except IndexError:\r\n print('Index Error')\r\n break\r\n except TypeError:\r\n print('Type Error')\r\n print(solver)\r\n break\r\n result = np.multiply(np.dot(unvisited, M), mazes[current, :, maze]) #multiplication by maze adj matrix eliminates impossible choices\r\n (choice,) = choose(random, result)\r\n layer -= 1\r\n \r\n current = choice\r\n steps += 1\r\n visit(choice, unvisited, tensor)\r\n \r\n fit += steps\r\n fitness.append(fit)\r\n \r\n return fitness\r\n" ]
[ [ "numpy.count_nonzero", "numpy.dot", "numpy.zeros", "numpy.copy", "numpy.ones", "numpy.multiply", "numpy.where" ] ]
amjankowska/SocSIM
[ "768e81d10f9b1015a336a2f4bdc9d860907d2353" ]
[ "SOC/models/tests/test_manna.py" ]
[ "from SOC.models import Manna\nimport numpy as np\nimport pytest\n\ndef test_boundary_shape():\n sim = Manna(L=10)\n assert sim.values.shape == (12, 12)\n assert sim.L_with_boundary == 12\n\ndef test_run_abel():\n sim = Manna(L=20)\n sim.run(5)\n\ndef test_run_nonabel():\n sim = Manna(L=20, abelian = False)\n sim.run(5)\n\ndef test_driving_does_not_pollute_boundary():\n sim = Manna(L=10)\n for i in range(1000):\n sim.drive()\n\ndef test_toppling_reduces_middle_to_max_one():\n sim = Manna(L=10)\n sim.values[1:-1, 1:-1] = 6\n sim.AvalancheLoop()\n assert (0 <= sim.values[1:-1, 1:-1]).all()\n assert (sim.values[1:-1, 1:-1] <= 1).all()\n\[email protected]\ndef test_whiteboard_case_1():\n sim = Manna(L=3)\n sim.values[2, 2] = 2\n results = sim.AvalancheLoop()\n assert int(results['AvalancheSize']) == 2\n assert int(results['number_of_iterations']) == 1\n\[email protected]\ndef test_whiteboard_case_2():\n sim = Manna(L=3)\n sim.values[2, 2] = 2\n results = sim.AvalancheLoop()\n assert int(results['AvalancheSize']) == 2\n assert int(results['number_of_iterations']) == 1\n\ndef test_resurrect():\n sim = Manna(L=10)\n filename = \"test_ressurrect.zarr\"\n sim.run(5, filename=filename)\n saved = sim.saved_snapshots[-1].copy()\n save_every_orig = sim.save_every\n\n sim2 = Manna.from_file(filename)\n np.testing.assert_allclose(sim2.values, saved)\n assert sim2.save_every == save_every_orig\n\ndef test_resurrect_default_name():\n sim = Manna(L=10)\n filename = sim.run(50, filename=False)\n saved = sim.saved_snapshots[-1].copy()\n save_every_orig = sim.save_every\n\n sim2 = Manna.from_file(filename)\n np.testing.assert_allclose(sim2.values, saved)\n assert sim2.save_every == save_every_orig\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
btrspg/deTiN
[ "8076827fb915e6d675dca3940721b7a49f7198d3" ]
[ "deTiN/deTiN_utilities.py" ]
[ "import gzip\nimport pickle\nimport random\nimport sys\nfrom itertools import compress\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import fisher_exact\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nrandom.seed(1)\n\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef is_member(a, b):\n # based on the matlab is_member function\n # code from stackoverflow user Joe Kington\n bind = {}\n for i, elt in enumerate(b):\n if elt not in bind:\n bind[elt] = i\n return [bind.get(itm, np.nan) for itm in a]\n\n\ndef chr2num(chr):\n if chr[0][0:2] == 'ch':\n chr = [c[3:] for c in chr]\n # convert chromosome from strings to ints\n chr = ['23' if x == 'X' else x for x in chr]\n chr = ['24' if x == 'Y' else x for x in chr]\n chr = ['25' if x == 'M' else x for x in chr]\n chr = ['25' if x == 'MT' else x for x in chr]\n chromosomes = np.array(range(1, 26))\n return np.array(is_member(chr, chromosomes.astype(np.str)))\n\n\ndef remove_sites_near_centromere_and_telomeres(het_table):\n positions = het_table['genomic_coord_x']\n centromere_positions = [125000001, 1718373143, 1720573143, 1867507890,\n 1869607890, 1984214406, 1986714406, 2101066301,\n 2102666301, 2216036179, 2217536179, 2323085719, 2326285719, 2444417111,\n 2446417111, 2522371864, 2524171864, 2596767074, 2598567074, 2683844322,\n 2685944322, 339750621, 342550621, 2744173305, 2746073305, 2792498825, 2794798825,\n 2841928720,\n 2844428720, 580349994, 583449994, 738672424, 740872424, 927726700, 930026700, 1121241960,\n 1123541960, 1291657027, 1293557027, 1435895690, 1438395690, 1586459712, 1588159712]\n\n lengths = np.array([249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663,\n 146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540,\n 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,\n 155270560, 59373566, 16569]) # chromosome lengths from genome-mysql.cse.ucsc.edu\n telomere_positions = np.append(1, np.cumsum(lengths))\n distance_centromere = np.zeros([len(positions), len(centromere_positions)])\n distance_telomere = np.zeros([len(positions), len(telomere_positions)])\n\n for i, centromere in enumerate(centromere_positions):\n distance_centromere[:, i] = np.abs(positions - centromere)\n distance_centromere = np.min(distance_centromere, axis=1)\n\n for i, telomere in enumerate(telomere_positions):\n distance_telomere[:, i] = np.abs(positions - telomere)\n distance_telomere = np.min(distance_telomere, axis=1)\n het_table = het_table[np.logical_and(distance_centromere > 5000000, distance_telomere > 5000000)]\n het_table.reset_index(inplace=True, drop=True)\n return het_table\n\n\ndef filter_hets_based_on_coverage(het_table, depth=10):\n het_table = het_table[np.logical_and(het_table['READ_DEPTH_N'] > depth, het_table['READ_DEPTH_T'] > depth)]\n het_table.reset_index(inplace=True, drop=True)\n return het_table\n\n\ndef filter_segments_based_on_size_f_and_tau(seg_table, aSCNA_thresh, n_probes=200):\n seg_table = seg_table[np.logical_and.reduce(np.array([np.array(seg_table['f']) < 0.5 - aSCNA_thresh,\n seg_table['n_probes'] > n_probes, seg_table['tau'] > 0]))]\n seg_table.reset_index(inplace=True, drop=True)\n return seg_table\n\n\ndef alternate_file_headers():\n headers = {'alternate_headers_position': ['Start', 'Start_bp', 'start', 'position', 'pos', 'POS', 'Start_position'],\n 'alternate_headers_start_position': ['Start', 'Start_bp', 'start', 'position', 'pos', 'POS',\n 'Start_position', 'START'],\n 'alternate_headers_end_position': ['End', 'End_bp', 'end', 'END'],\n 'alternate_headers_chromosome': ['Contig', 'chrom', 'CONTIG', 'chr', 'Chrom', 'CHROMOSOME', 'Chromosome',\n 'contig'],\n 'alternate_headers_f': ['f_acs', 'MAF_Post_Mode', 'MINOR_ALLELE_FRACTION_POSTERIOR_50'],\n 'alternate_headers_tau': ['CN', 'Segment_Mean_Post_Mode', 'LOG2_COPY_RATIO_POSTERIOR_50'],\n 'alternate_headers_alt_count': ['t_alt_count', 'n_alt_count', 'alt_count', 'i_t_alt_count',\n 'i_n_alt_count'],\n 'alternate_headers_ref_count': ['t_ref_count', 'n_ref_count', 'ref_count', 'i_t_ref_count',\n 'i_n_ref_count'],\n 'alternate_headers_n_probes': ['n_probes', 'NUM_POINTS_COPY_RATIO', 'Num_Targets']}\n return headers\n\n\ndef read_file_header(text_file):\n headers = alternate_file_headers()\n with open(text_file, 'rt') as f:\n for header_lines, line in enumerate(f):\n line = line.strip()\n if not line[0] == '#':\n break\n file_head = line.split('\\t')\n try:\n headers['alternate_headers_chromosome'].index(file_head[0])\n except ValueError:\n sys.exit(\n 'The first column of all input files should be chromosome: could not find any of the chromosome headers in the first column of ' +\n text_file)\n return file_head\n\n\ndef identify_aSCNAs(seg_table, het_table, aSCNA_thresh=0.1, n_snps=20, var_thresh=0.025):\n # identify aSCNAs based on minor allele fraction of segments\n mu_af_n = np.mean(het_table['AF_N'])\n f_detin = np.zeros([len(seg_table), 1])\n f_variance = np.zeros([len(seg_table), 1])\n n_snps_above_mu = np.zeros([len(seg_table), 1])\n n_snps_below_mu = np.zeros([len(seg_table), 1])\n fishers_p_convergent_seg = np.ones([len(seg_table), 1])\n thresh_snps = np.round(np.true_divide(n_snps, 2))\n for seg_id, seg in seg_table.iterrows():\n seg_hets = het_table[het_table['seg_id'] == seg_id]\n f_detin[seg_id] = mu_af_n - np.mean(np.abs(seg_hets['AF_T'] - mu_af_n))\n f_variance[seg_id] = np.var(np.abs(seg_hets['AF_T'] - mu_af_n))\n n_snps_above_mu[seg_id] = np.sum(seg_hets['AF_T'] > mu_af_n)\n n_snps_below_mu[seg_id] = np.sum(seg_hets['AF_T'] <= mu_af_n)\n try:\n fe_tuple = fisher_exact([[np.sum(np.logical_and(seg_hets['AF_T'] > mu_af_n,\n seg_hets['AF_N'] > mu_af_n)),\n np.sum(np.logical_and(seg_hets['AF_T'] > mu_af_n,\n seg_hets['AF_N'] <= mu_af_n))],\n [np.sum(np.logical_and(seg_hets['AF_T'] <= mu_af_n,\n seg_hets['AF_N'] > mu_af_n)),\n np.sum(np.logical_and(seg_hets['AF_T'] <= mu_af_n,\n seg_hets['AF_N'] <= mu_af_n))]], 'less')\n fishers_p_convergent_seg[seg_id] = fe_tuple[1]\n except ValueError:\n fishers_p_convergent_seg[seg_id] = 1\n seg_table['f_detin'] = f_detin\n seg_table['f_variance'] = f_variance\n seg_table['n_snps_above_mu'] = n_snps_above_mu\n seg_table['n_snps_below_mu'] = n_snps_below_mu\n seg_table['fishers_p_convergent_seg'] = fishers_p_convergent_seg\n if any((seg_table['fishers_p_convergent_seg'] * len(seg_table)) < 0.05):\n segs = (seg_table['fishers_p_convergent_seg'] * len(seg_table)) < 0.05\n ix = list(compress(xrange(len(segs)), segs))\n print('identified convergent aSCNA in normal on chromosomes:' + str(np.unique(seg_table['Chromosome'][ix] + 1)))\n convergent_segs = seg_table[seg_table['fishers_p_convergent_seg'] * len(seg_table) <= 0.05]\n else:\n convergent_segs = None\n aSCNAs = seg_table[\n np.logical_and.reduce(np.array([np.array(seg_table['fishers_p_convergent_seg'] * len(seg_table)) > 0.05,\n seg_table['n_snps_above_mu'] > thresh_snps,\n seg_table['n_snps_below_mu'] > thresh_snps,\n seg_table['f_detin'] <= 0.5 - aSCNA_thresh,\n seg_table['f_variance'] < var_thresh]))]\n return aSCNAs, convergent_segs\n\n\ndef ensure_balanced_hets(seg_table, het_table):\n seg_table['aSCNA'] = np.zeros([len(seg_table), 1])\n aSCNA_hets = []\n for seg_id, seg in seg_table.iterrows():\n seg_hets = het_table[het_table['seg_id'] == seg_id]\n if np.sum(seg_hets['d'] == -1) > 10 and np.sum(seg_hets['d'] == 1) > 10:\n if sum(seg_hets['AF_T'] > 0.5) < sum(seg_hets['AF_T'] <= 0.5):\n sites = seg_hets['AF_T'] <= 0.5\n index = list(compress(xrange(len(sites)), sites))\n ixs = random.sample(index, (sum(seg_hets['AF_T'] <= 0.5) - sum(seg_hets['AF_T'] > 0.5)))\n seg_hets = seg_hets.drop(seg_hets.index[[ixs]])\n seg_hets.reset_index(inplace=True, drop=True)\n\n if sum(seg_hets['AF_T'] > 0.5) > sum(seg_hets['AF_T'] <= 0.5):\n sites = seg_hets['AF_T'] > 0.5\n index = list(compress(xrange(len(sites)), sites))\n ixs = random.sample(index, (sum(seg_hets['AF_T'] > 0.5) - sum(seg_hets['AF_T'] <= 0.5)))\n seg_hets = seg_hets.drop(seg_hets.index[[ixs]])\n seg_hets.reset_index(inplace=True, drop=True)\n if len(aSCNA_hets) == 0:\n aSCNA_hets = seg_hets\n else:\n aSCNA_hets = pd.concat([aSCNA_hets, seg_hets])\n aSCNA_hets.reset_index(inplace=True, drop=True)\n return aSCNA_hets\n\n\ndef plot_kmeans_info(ascna_based_model, output_path, sample_name):\n # method for plotting clustering results of aSCNA TiN estimates\n X = np.array(ascna_based_model.segs['TiN_MAP'])\n X_low = np.array(ascna_based_model.segs['TiN_ci_l'])\n X_high = np.array(ascna_based_model.segs['TiN_ci_h'])\n Y = np.array(ascna_based_model.segs['Chromosome'])\n kIdx = np.max(ascna_based_model.cluster_assignment)\n K = range(1, 4)\n\n # variance explained by incorporating additional clusters\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(K, ascna_based_model.sum_squared_distance, 'b.-')\n ax.plot(K[kIdx], ascna_based_model.sum_squared_distance[kIdx], marker='o', markersize=12,\n markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')\n plt.grid(True)\n plt.xlabel('Number of clusters')\n plt.ylabel('Average within-cluster sum of squares')\n plt.title('KMeans residual')\n plt.xticks([1, 2, 3])\n fig.set_dpi(150)\n fig.savefig(output_path + '/' + sample_name + '_KmeansEval_plot.png', bbox_inches='tight')\n\n # scatter plot of TiN estimates per segment by chromosome location and cluster\n fig = plt.figure()\n ax = fig.add_subplot(111)\n clr = ['b', 'g', 'r']\n if len(X) > 1:\n for i in range(K[kIdx]):\n ind = (ascna_based_model.cluster_assignment == i)\n ax.errorbar(X[ind], Y[ind], xerr=[X[ind] - X_low[ind], X_high[ind] - X[ind]], c=clr[i],\n label='Cluster %d' % i, ls='None', marker='.')\n else:\n ax.errorbar(X, Y + 1, xerr=[X - X_low, X_high - X], c='b', label='Cluster 1', ls='None', marker='.')\n\n plt.xlabel('MAP tumor in normal estimate (%)')\n plt.ylabel('Chromosome')\n plt.title('Cluster by chromosome and TiN')\n plt.yticks(np.arange(min(Y), max(Y) + 2, 2.0))\n plt.xticks(np.arange(0, max(X) + 1, np.max([np.round(np.true_divide(np.max(X), 10)), 1])))\n ax.set_xlim([-2, np.max(X) + 2])\n\n fig.set_dpi(150)\n fig.savefig(output_path + '/' + sample_name + '_KmeansEval_scatter_plot.png', bbox_inches='tight')\n\n\ndef plot_aSCNA_het_data(do):\n fig, ax = plt.subplots(1, 1)\n ax.plot(do.input.het_table['genomic_coord_x'], do.input.het_table['AF_T'], c=[0.5, 0.5, 0.5], marker='.', ls='None',\n ms=1, alpha=0.5)\n tumor_af = ax.plot(do.ascna_based_model.hets['genomic_coord_x'], do.ascna_based_model.hets['AF_T'], c=[0, 0, 1],\n marker='.',\n ls='None', ms=5)\n normal_af = ax.plot(do.ascna_based_model.hets['genomic_coord_x'], do.ascna_based_model.hets['AF_N'], c=[1, 0, 0],\n marker='.',\n ls='None', ms=5)\n fig.set_dpi(300)\n chrs = hg19_to_linear_positions(np.linspace(0, 23, 24), np.ones([23]))\n for c in chrs:\n ax.plot([c, c], [0, 1], 'k--')\n plt.legend(handles=[tumor_af[0], normal_af[0]], labels=['Tumor', 'Normal'])\n ax.set_xticks((chrs[1:] + chrs[:-1]) / 2)\n ax.set_xticklabels((np.linspace(1, 24, 24, dtype=int)), size=5, rotation=90)\n ax.set_yticks(np.linspace(0, 1, 5))\n ax.set_yticklabels(np.linspace(0, 1, 5), size=5)\n ax.set_xlabel('Chromosomes')\n ax.set_ylabel('Allele fraction')\n fig.set_dpi(150)\n fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_hets_aSCNA_model.png', bbox_inches='tight')\n fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_hets_aSCNA_model.eps', bbox_inches='tight')\n\n\ndef plot_TiN_models(do):\n fig, ax = plt.subplots(1, 1)\n TiN_range = np.linspace(0, 1, num=do.input.resolution)\n if ~np.isnan(do.ascna_based_model.TiN):\n ascna = ax.plot(TiN_range,\n np.true_divide(np.exp(\n do.ascna_based_model.TiN_likelihood - np.nanmax(do.ascna_based_model.TiN_likelihood)),\n np.nansum(np.exp(do.ascna_based_model.TiN_likelihood - np.nanmax(\n do.ascna_based_model.TiN_likelihood))))\n , 'r--', lw=1)\n ssnv = ax.plot(TiN_range,\n np.true_divide(\n np.exp(do.ssnv_based_model.TiN_likelihood - np.nanmax(do.ssnv_based_model.TiN_likelihood)),\n np.nansum(\n np.exp(do.ssnv_based_model.TiN_likelihood - np.nanmax(do.ssnv_based_model.TiN_likelihood))))\n , 'b--', lw=1)\n\n joint = ax.plot(TiN_range, do.joint_posterior\n , 'k-', lw=2)\n plt.xlabel('Tumor in normal estimate')\n plt.ylabel('p(TiN=x)')\n plt.title('TiN estimate posterior')\n if ~np.isnan(do.ascna_based_model.TiN):\n plt.legend(handles=[ascna[0], ssnv[0], joint[0]], labels=['aSCNA', 'SSNV', 'Joint Est.'])\n else:\n plt.legend(handles=[ssnv[0], joint[0]], labels=['SSNV', 'Joint Est.'])\n fig.set_dpi(150)\n fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_models_plot.png', bbox_inches='tight')\n\n\ndef plot_SSNVs(do):\n fig, ax = plt.subplots(1, 1)\n TiN_fit = ax.plot(np.linspace(0, 1, do.input.resolution),\n np.multiply(do.TiN, np.linspace(0, 1, do.input.resolution)), '--', lw=1, alpha=1,\n color='#1D1D1D')\n background = ax.plot(do.ssnv_based_model.tumor_f, do.ssnv_based_model.normal_f\n , '.', lw=0.1, alpha=0.75, color=[0.75, 0.75, 0.75])\n nod_kept = np.logical_and(do.SSNVs['judgement'] == 'KEEP', do.SSNVs.isnull()['failure_reasons']).values\n cis = do.ssnv_based_model.rv_normal_af.interval(0.6825)\n\n kept_def = ax.plot(do.ssnv_based_model.tumor_f[nod_kept], do.ssnv_based_model.normal_f[nod_kept],\n 'b.', lw=0.1)\n d_kept = np.logical_and(do.SSNVs['judgement'] == 'KEEP', ~do.SSNVs.isnull()['failure_reasons']).values\n yerr_low = do.ssnv_based_model.normal_f[d_kept] - cis[0][d_kept]\n yerr_low[yerr_low < 0] = 0\n detin_kept = ax.errorbar(do.ssnv_based_model.tumor_f[d_kept], do.ssnv_based_model.normal_f[d_kept],\n yerr=[yerr_low,\n cis[1][d_kept] - do.ssnv_based_model.normal_f[d_kept]], fmt='r.', capsize=2)\n\n plt.xlabel('Tumor AF')\n plt.ylabel('Normal AF')\n plt.title('SSNVs considered and recovered')\n plt.legend(handles=[background[0], kept_def[0], detin_kept[0], TiN_fit[0]],\n labels=['Candidate Sites', 'Called w/o deTiN ', 'deTiN recovered', 'TiN_fit'])\n fig.set_dpi(300)\n fig.savefig(do.input.output_path + '/' + do.input.output_name + '_SSNVs_plot.png', bbox_inches='tight')\n fig.savefig(do.input.output_path + '/' + do.input.output_name + '_SSNVs_plot.eps', format='eps',\n bbox_inches='tight')\n\n\ndef select_candidate_mutations(call_stats_table, exac_db_file):\n # filter sites in call stats table to those only rejected for presence in the normal\n failure_reasons = np.array(call_stats_table['failure_reasons'])\n\n candidate_sites = call_stats_table[np.logical_or.reduce(np.array([np.array(call_stats_table['judgement']) == 'KEEP',\n failure_reasons == 'normal_lod,alt_allele_in_normal',\n failure_reasons == 'alt_allele_in_normal']))]\n candidate_sites['t_depth'] = candidate_sites['t_alt_count'] + candidate_sites['t_ref_count']\n candidate_sites['n_depth'] = candidate_sites['n_alt_count'] + candidate_sites['n_ref_count']\n candidate_sites.reset_index(inplace=True, drop=True)\n candidate_sites = remove_exac_sites_from_call_stats(candidate_sites, exac_db_file)\n\n candidate_sites.reset_index(inplace=True, drop=True)\n return candidate_sites\n\n\ndef hg19_to_linear_positions(chromosome, position, **keyword_parameters):\n # type: (nparray, nparray,string) -> nparray\n \"\"\"\n Change chromosome-position to continuous linear coordinates\n \"\"\"\n if ('build' in keyword_parameters):\n build = keyword_parameters['build']\n else:\n build = 'hg19'\n if build == 'hg19':\n L = np.array([249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663,\n 146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540,\n 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,\n 155270560, 59373566, 16569]) # chromosome lengths from genome-mysql.cse.ucsc.edu\n if build == 'hg38':\n L = np.array([248956422, 242193529, 198295559, 190214555, 181538259, 170805979, 159345973,\n 145138636, 138394717, 133797422, 135086622, 133275309, 114364328, 107043718,\n 101991189, 90338345, 83257441, 80373285, 58617616, 64444167, 46709983, 50818468,\n 156040895, 57227415, 16569])\n\n C = np.append(1, np.cumsum(L))\n x = np.array([chromosome[int(i)] for i in np.arange(0, len(position))])\n return C[[x.astype(int)]] + position\n\n\ndef fix_het_file_header(het_file):\n # allowing flexibility in het file headers to accommodate changing versions of GATK4 and other CN tools\n # in order to add support for your het file headers please modify the alternate header lists above\n headers = alternate_file_headers()\n\n required_headers = ['CONTIG', 'POSITION', 'ALT_COUNT', 'REF_COUNT']\n\n if np.sum(np.isfinite((is_member(required_headers, het_file.columns)))) == 4:\n return het_file\n else:\n missing_idx = np.where(~np.isfinite((is_member(required_headers, het_file.columns))))\n for i in missing_idx[0]:\n if required_headers[i] == 'POSITION':\n if np.sum(\n np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) > 1:\n sys.exit('missing required header POSITION and could not replace with POS,position, or pos!')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns)))\n het_file.rename(columns={headers['alternate_headers_position'][idx_replace[0][0]]: 'POSITION'},\n inplace=True)\n print('changing header of het file from ' + headers['alternate_headers_position'][\n idx_replace[0][0]] + ' to POSITION')\n\n if required_headers[i] == 'CONTIG':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_chromosome'], het_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns))) > 1:\n sys.exit(\n 'missing required header CONTIG and could not replace with any one of CHR, chrom, Chromosome, chr, Chrom!')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns)))\n het_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'CONTIG'},\n inplace=True)\n print('changing header of het file from ' + headers['alternate_headers_chromosome'][\n idx_replace[0][0]] + ' to CONTIG')\n\n if required_headers[i] == 'ALT_COUNT':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_alt_count'], het_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns))) > 1:\n sys.exit(\n 'missing required header ALT_COUNT and could not replace with any one of t_alt_count, n_alt_count, alt_count')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns)))\n het_file.rename(columns={headers['alternate_headers_alt_count'][idx_replace[0][0]]: 'ALT_COUNT'},\n inplace=True)\n print('changing header of het file from ' + headers['alternate_headers_alt_count'][\n idx_replace[0][0]] + ' to ALT_COUNT')\n\n if required_headers[i] == 'REF_COUNT':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_ref_count'], het_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns))) > 1:\n sys.exit(\n 'missing required header ALT_COUNT and could not replace with any one of t_ref_count, n_ref_count, ref_count')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns)))\n het_file.rename(columns={headers['alternate_headers_ref_count'][idx_replace[0][0]]: 'REF_COUNT'},\n inplace=True)\n print('changing header of het file from ' + headers['alternate_headers_ref_count'][\n idx_replace[0][0]] + ' to REF_COUNT')\n\n return het_file\n\n\ndef fix_seg_file_header(seg_file):\n # allowing flexibility in seg file headers to accommodate changing versions of GATK4 and other CN tools\n # in order to add support for your seg file headers please modify the alternate header lists above\n\n headers = alternate_file_headers()\n\n required_headers = ['Chromosome', 'Start.bp', 'End.bp', 'f', 'tau', 'n_probes']\n\n if np.sum(np.isfinite((is_member(required_headers, seg_file.columns)))) == 5:\n return seg_file\n else:\n missing_idx = np.where(~np.isfinite((is_member(required_headers, seg_file.columns))))\n for i in missing_idx[0]:\n if required_headers[i] == 'Start.bp':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_start_position'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns))) > 1:\n sys.exit('missing required header Start.bp and could not replace with Start or Start_bp')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns)))\n seg_file.rename(\n columns={headers['alternate_headers_start_position'][idx_replace[0][0]]: 'Start.bp'},\n inplace=True)\n print('changing header of seg file from ' + headers['alternate_headers_start_position'][\n idx_replace[0][0]] + ' to Start.bp')\n\n if required_headers[i] == 'End.bp':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_end_position'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns))) > 1:\n sys.exit('missing required header End.bp and could not replace with End or End_bp')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns)))\n seg_file.rename(columns={headers['alternate_headers_end_position'][idx_replace[0][0]]: 'End.bp'},\n inplace=True)\n print('changing header of seg file from ' + headers['alternate_headers_end_position'][\n idx_replace[0][0]] + ' to End.bp')\n\n if required_headers[i] == 'Chromosome':\n if np.sum(np.isfinite(\n is_member(headers['alternate_headers_chromosome'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns))) > 1:\n sys.exit(\n 'missing required header Chromosome and could not replace with any other header')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns)))\n seg_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'Chromosome'},\n inplace=True)\n print('changing header of seg file from ' + headers['alternate_headers_chromosome'][\n idx_replace[0][0]] + ' to Chromosome')\n\n if required_headers[i] == 'f':\n if np.sum(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) > 1:\n sys.exit(\n 'missing required header f and could not replace with any one of f_acs')\n else:\n idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns)))\n seg_file.rename(columns={headers['alternate_headers_f'][idx_replace[0][0]]: 'f'}, inplace=True)\n print('changing header of seg file from ' + headers['alternate_headers_f'][\n idx_replace[0][0]] + ' to f')\n\n if required_headers[i] == 'tau':\n if np.sum(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) > 1:\n sys.exit(\n 'missing required header tau and could not replace with any one of CN')\n else:\n idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns)))\n seg_file.rename(columns={headers['alternate_headers_tau'][idx_replace[0][0]]: 'tau'}, inplace=True)\n if headers['alternate_headers_tau'][idx_replace[0][0]] == 'LOG2_COPY_RATIO_POSTERIOR_50':\n print('transforming log2 data tau column to 2 centered: 2^(CNratio)+1')\n seg_file['tau'] = np.power(2, seg_file['tau']) + 1\n print('changing header of seg file from ' + headers['alternate_headers_tau'][\n idx_replace[0][0]] + ' to tau')\n if required_headers[i] == 'n_probes':\n if np.sum(\n np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) == 0 or np.sum(\n np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) > 1:\n sys.exit(\n 'missing required header n_probes and could not replace with any one of alternates')\n else:\n idx_replace = np.where(\n np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns)))\n seg_file.rename(columns={headers['alternate_headers_n_probes'][idx_replace[0][0]]: 'n_probes'},\n inplace=True)\n print('changing header of seg file from ' + headers['alternate_headers_n_probes'][\n idx_replace[0][0]] + ' to n_probes')\n\n return seg_file\n\n\ndef read_indel_vcf(vcf, seg_table, indel_type):\n content = []\n if vcf[-2:] == 'gz':\n with gzip.open(vcf, 'r') as f:\n content = f.readlines()\n else:\n with open(vcf) as f:\n content = f.readlines()\n\n cols_type = {0: str}\n for line in content:\n if line[0] == '#' and line[1] != '#':\n headerline = line.split('\\t')\n break\n\n if indel_type.lower() == 'strelka':\n indel_table = pd.read_csv(vcf, sep='\\t', comment='#', header=None, low_memory=False, dtype=cols_type)\n indel_table.rename(\n columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',\n 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},\n inplace=True)\n counts_format = indel_table['format'][0].split(':')\n depth_ix = counts_format.index('DP')\n alt_indel_ix = counts_format.index('TIR')\n ref_indel_ix = counts_format.index('TAR')\n indel_table = indel_table[np.isfinite(is_member(indel_table['filter'], ['PASS', 'QSI_ref']))]\n indel_table.reset_index(inplace=True, drop=True)\n\n elif indel_type.lower() == 'mutect2':\n indel_table = pd.read_csv(vcf, sep='\\t', comment='#', header=None, low_memory=False, dtype=cols_type)\n # CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tTUMOR\tNORMAL\n normal_sample = 'normal'\n tumor_sample = 'tumor'\n for line in content:\n if line[0:15] == '##normal_sample':\n normal_sample = line.split('=')[1][0:-1]\n if line[0:14] == '##tumor_sample':\n tumor_sample = line.split('=')[1][0:-1]\n if tumor_sample == 'tumor' and normal_sample == 'normal':\n indel_table.rename(\n columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',\n 6: 'filter', 9: 'tumor', 10: 'normal'},\n inplace=True)\n else:\n if tumor_sample == headerline[9]:\n indel_table.rename(\n columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',\n 6: 'filter', 9: 'tumor', 10: 'normal'},\n inplace=True)\n elif tumor_sample == headerline[10][0:-1]:\n indel_table.rename(\n columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',\n 6: 'filter', 9: 'normal', 10: 'tumor'},\n inplace=True)\n else:\n print('failed to read MuTect 2 indels VCF')\n sys.exit()\n counts_format = indel_table['format'][0].split(':')\n depth_ix = counts_format.index('AD')\n indel_table = indel_table[\n np.isfinite(is_member(indel_table['filter'], ['PASS', 'alt_allele_in_normal', 'artifact_in_normal']))]\n indel_table.reset_index(inplace=True, drop=True)\n\n elif indel_type.lower() == 'sanger':\n indel_table = pd.read_csv(vcf, sep='\\t', comment='#', header=None, low_memory=False, dtype=cols_type)\n # CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NORMAL TUMOUR\n indel_table.rename(\n columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',\n 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},\n inplace=True)\n b1 = np.logical_or.reduce([indel_table['filter'] == 'F012', indel_table['filter'] == 'F012;F015'])\n b2 = np.logical_or.reduce([indel_table['filter'] == 'PASS', indel_table['filter'] == 'F015'])\n indel_table = indel_table[np.logical_or.reduce([b1, b2])]\n indel_table.reset_index(inplace=True, drop=True)\n format_string = indel_table['format'][0].split(':')\n total_depth_idx = [format_string.index('PR'), format_string.index('NR')]\n alt_count_idx = [format_string.index('PU'), format_string.index('NU')]\n\n # parsing format line and file to determine required alt and ref columns\n # we use \"tier 1\" read counts for varaints\n n_depth = np.zeros([len(indel_table), 1])\n n_alt_count = np.zeros([len(indel_table), 1])\n n_ref_count = np.zeros([len(indel_table), 1])\n\n t_depth = np.zeros([len(indel_table), 1])\n t_alt_count = np.zeros([len(indel_table), 1])\n t_ref_count = np.zeros([len(indel_table), 1])\n\n for index, row in indel_table.iterrows():\n spl_n = row['normal'].split(':')\n spl_t = row['tumor'].split(':')\n if indel_type.lower() == 'strelka':\n n_depth[index] = int(spl_n[depth_ix])\n n_alt_count[index] = int(spl_n[alt_indel_ix].split(',')[0])\n n_ref_count[index] = int(spl_n[ref_indel_ix].split(',')[0])\n t_depth[index] = int(spl_t[depth_ix])\n t_alt_count[index] = int(spl_t[alt_indel_ix].split(',')[0])\n t_ref_count[index] = int(spl_t[ref_indel_ix].split(',')[0])\n if indel_type.lower() == 'mutect2':\n n_alt_count[index] = int(spl_n[depth_ix].split(',')[1])\n n_ref_count[index] = int(spl_n[depth_ix].split(',')[0])\n n_depth[index] = n_alt_count[index] + n_ref_count[index]\n t_alt_count[index] = int(spl_t[depth_ix].split(',')[1])\n t_ref_count[index] = int(spl_t[depth_ix].split(',')[0])\n t_depth[index] = t_alt_count[index] + t_ref_count[index]\n if indel_type.lower() == 'sanger':\n n_depth[index] = np.sum([int(spl_n[i]) for i in total_depth_idx])\n n_alt_count[index] = np.sum([int(spl_n[i]) for i in alt_count_idx])\n n_ref_count[index] = n_depth[index] - n_alt_count[index]\n t_depth[index] = np.sum([int(spl_t[i]) for i in total_depth_idx])\n t_alt_count[index] = np.sum([int(spl_t[i]) for i in alt_count_idx])\n t_ref_count[index] = t_depth[index] - t_alt_count[index]\n if len(indel_table) == 0:\n indel_table = pd.DataFrame(index=[0],\n columns=['contig', 'position', 'ID', 'REF', 'ALT', 'QUAL', 'INFO', 'format',\n 'filter', headerline[9].lower(), headerline[10][0:-1].lower(),\n 't_depth', 't_alt_count', 't_ref_count', 'n_alt_count', 'n_depth',\n 'n_ref_count', 'tau', 'f_acs', 'Chromosome', 'genomic_coord_x'])\n else:\n indel_table['t_depth'] = t_alt_count + t_ref_count\n indel_table['t_alt_count'] = t_alt_count\n indel_table['t_ref_count'] = t_ref_count\n\n indel_table['n_depth'] = n_alt_count + n_ref_count\n indel_table['n_alt_count'] = n_alt_count\n indel_table['n_ref_count'] = n_ref_count\n # only consider sites which were rejected as germline or were passed\n if type(indel_table['contig'][0]) == str:\n indel_table['Chromosome'] = chr2num(indel_table['contig'])\n else:\n indel_table['Chromosome'] = indel_table['contig'] - 1\n # add linear position field and consider only sites which are rejected as germline i.e. PASS or QSI_ref\n indel_table = indel_table[np.isfinite(indel_table['Chromosome'])]\n indel_table.reset_index(inplace=True, drop=True)\n indel_table['genomic_coord_x'] = hg19_to_linear_positions(indel_table['Chromosome'], indel_table['position'])\n # annotate with acs data\n f_acs = np.zeros([len(indel_table), 1]) + 0.5\n tau = np.zeros([len(indel_table), 1]) + 2\n for i, r in seg_table.iterrows():\n f_acs[np.logical_and(np.array(indel_table['genomic_coord_x']) >= r['genomic_coord_start'],\n np.array(indel_table['genomic_coord_x']) <= r['genomic_coord_end'])] = r.f\n tau[np.logical_and(np.array(indel_table['genomic_coord_x']) >= r['genomic_coord_start'],\n np.array(indel_table['genomic_coord_x']) <= r[\n 'genomic_coord_end'])] = r.tau + 0.001\n indel_table['tau'] = tau\n indel_table['f_acs'] = f_acs\n return indel_table\n\n\ndef build_exac_pickle(exac_file):\n # create ExAC site dictionary from VCF file\n exac_site_info = {}\n print('Filtering ExAC sites from candidate mutations')\n with gzip.open(exac_file, \"rb\") as vcf_file:\n for line_index, line in enumerate(vcf_file):\n if line_index % 10000 == 0:\n print('processed ' + str(line_index) + ' ExAC sites')\n spl = line.strip(\"\\n\").split(\"\\t\")\n\n # line is a comment\n if line[0] == '#':\n continue\n site = spl[0] + '_' + spl[1]\n info = spl[7]\n info_dict = {}\n info_dict['ref_allele'] = spl[3]\n info_dict['alt_allele'] = spl[4]\n for field in info.strip(\"\\n\").split(\";\"):\n if field.split(\"=\")[0] not in ['AC', 'AF']: continue\n try:\n info_dict[field.split(\"=\")[0]] = field.split(\"=\")[1]\n except:\n pass\n # print 'bad field:', field\n # select only sites where population allele fractions exceeds 0.01\n if np.sum(np.array(info_dict['AF'].split(','), dtype=float)) >= 0.01:\n exac_site_info[site] = info_dict\n with open('exac.pickle', 'wb') as handle:\n pickle.dump(exac_site_info, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef remove_exac_sites_from_call_stats(call_stats_table, exac_file):\n # use ExAC vcf to filter likely germline variants out of candidate sites\n with open(exac_file, 'rb') as handle:\n exac_dict = pickle.load(handle)\n keep = np.ones_like(call_stats_table['position'], dtype=bool)\n for index, row in call_stats_table.iterrows():\n key = str(row['contig']) + '_' + str(row['position'])\n try:\n exac_dict[key]\n # print 'removing site '+ key+ ' minor allele fraction = ' + str(exac_dict[key]['AF'])\n keep[index] = False\n except KeyError:\n pass\n return call_stats_table[keep]\n" ]
[ [ "numpy.ones_like", "numpy.true_divide", "numpy.min", "numpy.mean", "numpy.cumsum", "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.xticks", "numpy.max", "matplotlib.pyplot.subplots", "numpy.logical_and", "numpy.isfinite", "numpy.nanmax", "matplotlib.use", "numpy.array", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.power", "numpy.isnan", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.legend", "numpy.ones", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.logical_or.reduce", "numpy.linspace", "numpy.unique" ] ]
elifesciences-publications/temporal_prediction_model
[ "bbad285cf7d543f049f047d8941132d1b2a7223c" ]
[ "src/python_code/network_code/predictive_network_base.py" ]
[ "import os \nimport copy\nimport pickle as pkl\nfrom imp import reload\nimport numpy as np\nimport theano\nimport lasagne\nfrom visualisation.network_visualisation import plot_loss \nfrom network_code import optimizer_lasagne\nreload(optimizer_lasagne)\n\n#-------------------------------------------------------------------------------\n#----------------------------------CONSTANTS------------------------------------\n#-------------------------------------------------------------------------------\n\nFLOATX = 'float32' # needed to use the GPU\nGRAD_CLIP = 100\n# DEFAULT_N_HIDDEN = 100\n# DEFAULT_N_LAYERS = 1\n# DEFAULT_BATCH_PERC = 0.05\n\nNON_LINEARITIES = {'sigmoid':lasagne.nonlinearities.sigmoid,\n 'tanh':lasagne.nonlinearities.tanh,\n 'rectify':lasagne.nonlinearities.rectify,\n 'relu':lasagne.nonlinearities.rectify,\n 'leaky_rectify':lasagne.nonlinearities.leaky_rectify,\n 'softplus':lasagne.nonlinearities.softplus,\n 'linear':None}\n\nUPDATE_FUNCS = {'sgd': lasagne.updates.sgd,\n 'momentum': lasagne.updates.momentum,\n 'nesterov_momentum': lasagne.updates.nesterov_momentum,\n 'adagrad': lasagne.updates.adagrad,\n 'rmsprop': lasagne.updates.rmsprop,\n 'adadelta': lasagne.updates.adadelta,\n 'adam': lasagne.updates.adam}\nREGULARIZATION = {'l1':lasagne.regularization.l1,\n 'l2':lasagne.regularization.l2}\n\nclass PredictiveNetwork(object): \n\n \"\"\"\n Predictive coding network. \n Network and cost settings as well as other \n class attributes can be set by passing in **kwargs \n when instantiating.\n \"\"\"\n\n def __init__(self, **kwargs):\n\n self._init_default_attributes()\n \n for key, value in kwargs.items():\n if key in self.network_settings:\n self.network_settings[key] = value\n elif key in self.cost_settings:\n self.cost_settings[key] = value\n elif key in self.cost_history:\n self.cost_history[key] = value\n elif key in self.input_settings:\n self.input_settings[key] = value\n # print('recieved: ' + key + ' in input settings, overwriting default value with: ' + str(value))\n #TODO:check that this works!\n elif hasattr(self, key):\n # print(key)\n # print(value)\n setattr(self, key, value)\n \n # if self.network is None:\n # elif ((self.train_fn is None) or (self.val_fn is None)):\n # self._init_cost_funcs()\n\n#-------------------------------------------------------------------------------\n#---------------------------Initialization functions----------------------------\n#-------------------------------------------------------------------------------\n def _init_default_attributes(self):\n self.network_settings = {'nonlinearity': 'sigmoid',\n 'is_recurrent': False,\n 'model': None,#'fully_connected_nn',\n 'num_hidden_units': None,\n 'num_layers': None,\n 'dropout': 0}\n\n self.cost_settings = {'update_func': 'adam',\n 'regularization': 'l1',\n 'reg_factor': 0,\n 'learning_rate': 0.01,\n 'act_reg': None,\n 'act_reg_factor': None, \n 'elastic_alpha': None, \n 'output_distribution': None}\n self.cost_history = {'train_costs': [],\n 'val_costs': [],\n 'final_train_cost': 0,\n 'final_val_cost': 0}\n\n self.input_settings = {'RF_size': 20,\n 'filter_type': 2,\n 'noise_ratio': 0,\n 'input_noise_ratio': 0,\n 'data_path': '', \n 'post_dict': False, \n 'norm_type': 0,\n 't_past':7, \n 't_future':1}\n\n #Theano specific variables\n # self.theano_vars = {'network':None,\n # '_input_var':None,\n # '_target_var':None,\n # 'train_fn':None,\n # 'val_fn':None}\n self.network = None\n self._input_var = None\n self._target_var = None\n self.train_fn = None\n self.val_fn = None\n\n self.network_param_values = None\n self.initial_param_values = None\n self.save_path = '' #TODO perhaps this doesn't belong here\n \n \n # Because theano vars are dependant on CudNN or similar variables, it is not a \n # good idea to save them together with the rest of the model as they won't load \n # on computers with different backends. To get around this, define functions to \n # construct and delete theano dependant vars.\n \n\n def init_theano_vars(self):\n \"\"\"\n NB: requires that all necessary class atributes are already initialised\n correctly. Depending on the subclass, these will vary.\n\n Can call this either from __init__ when instantiaiting the object for\n the first time or when loading from pickle and need to recreate the theano vars \n \"\"\"\n self._init_network()\n self._init_cost_funcs()\n\n if self.network_param_values is not None:\n lasagne.layers.set_all_param_values(self.network, \n self.network_param_values)\n\n return \n\n def erase_theano_vars(self):\n \"\"\"\n Set all Theano dependant variables to None\n \"\"\"\n self.network = None\n self._input_var = None\n self._target_var = None\n self.train_fn = None\n self.val_fn = None\n self.test_prediction = None\n self.precision = None\n return \n\n\n def _init_network(self, *args, **kwargs):\n \"\"\"\n This is an abstract method that must be implemented in the child class\n \"\"\"\n raise NotImplementedError() \n\n def _init_cost_funcs(self):\n network = self.network\n reg_factor = self.cost_settings['reg_factor']\n regularization = self.cost_settings['regularization']\n update_func = self.cost_settings['update_func']\n\n prediction = lasagne.layers.get_output(network, deterministic=False)\n if 'output_distribution' in self.cost_settings:\n if self.cost_settings['output_distribution'] is not None: \n if self.cost_settings['output_distribution'] == 'independent_unimodal_gaussian':\n mu = prediction[:,:self.n_output_units,:]\n precision = prediction[:,self.n_output_units:,:]\n sq_error = lasagne.objectives.squared_error(mu, self._target_var)\n cost = (np.exp(precision)*sq_error) - precision\n cost = cost.mean() \n elif self.cost_settings['output_distribution'] == 'input_independent_unimodal_gaussian':\n mu = prediction\n self.precision = theano.shared(np.zeros(self.n_output_units).astype('float32'))\n sq_error = lasagne.objectives.squared_error(mu, self._target_var)\n cost = (np.exp(self.precision)*sq_error) - self.precision\n cost = cost.mean() \n # Retrieve all trainable parameters in network\n params = lasagne.layers.get_all_params(network, trainable=True)\n params.append(self.precision)\n\n else:\n cost = lasagne.objectives.squared_error(prediction, self._target_var)\n # cost = cost.sum(axis=2) #sum across space\n # cost = cost.sum(axis=1) #sum across time\n\n # cost = cost.mean(axis=2) #take the mean across space\n # cost = cost.mean(axis=1) #take the mean across time\n cost = cost.mean() #take the mean across training examples\n\n if regularization == 'l1' or regularization == 'l2':\n reg_penalty = reg_factor * lasagne.regularization.regularize_network_params(network, REGULARIZATION[regularization])\n else:\n print(\"Unrecognised regularisation type. Setting to None!\")\n\n cost = cost + reg_penalty\n \n\n act_reg_factor = self.cost_settings['act_reg_factor']\n act_reg = self.cost_settings['act_reg']\n if act_reg is not None:\n all_layers = lasagne.layers.get_all_layers(network)\n #get rid of inupt and output layers\n hidden_layers = all_layers[1:-1]\n hu_activities = lasagne.layers.get_output(hidden_layers)\n if act_reg =='l2' or act_reg=='l1': \n act_reg_penalty = act_reg_factor*lasagne.regularization.apply_penalty(hu_activities, REGULARIZATION[act_reg]) \n cost = cost + act_reg_penalty\n \n params = lasagne.layers.get_all_params(network, trainable=True)\n \n # params = lasagne.layers.get_all_params(network)\n\n\n u_func = UPDATE_FUNCS[update_func]\n if (update_func == 'sgd' or update_func == 'momentum' \n or update_func == 'nesterov_momentum'):\n updates = u_func(cost, params, self.network_settings['learning_rate'])\n else:\n updates = u_func(cost, params)\n\n test_prediction = lasagne.layers.get_output(network, deterministic=True)\n self.test_prediction = theano.function([self._input_var], test_prediction)\n\n test_cost = lasagne.objectives.squared_error(test_prediction, self._target_var)\n # test_cost = test_cost.sum(axis=1)\n \n test_cost = test_cost.mean()\n self.train_fn = theano.function([self._input_var, self._target_var], cost, \n updates=updates, allow_input_downcast=True)\n self.val_fn = theano.function([self._input_var, self._target_var], cost,\n allow_input_downcast=True)\n return self.train_fn, self.val_fn \n\n#-------------------------------------------------------------------------------\n#-------------------------------Public functions--------------------------------\n#-------------------------------------------------------------------------------\n\n def train_network(self, X_train, y_train, X_val= None, y_val = None, \n num_epochs = 100, show_graph = False, max_epochs = None):\n\n if max_epochs is not None:\n n_epochs_run = len(self.cost_history['train_costs'])\n n_to_run = max_epochs - n_epochs_run\n if n_to_run < num_epochs:\n print('Already run %i' %n_epochs_run + ' epochs out of %i' %max_epochs)\n print('Only running %i' %n_to_run + ' epochs instead of %i' %num_epochs)\n num_epochs = n_to_run\n \n if num_epochs>0:\n\n optimizer = optimizer_lasagne.Optimizer(self.network, \n self.train_fn, \n val_fn=self.val_fn,\n verbose=1, \n batch_size=None)\n\n print(\"Starting training ...\")\n\n network_param_values, cost_history = optimizer.optimize(X_train, y_train, \n X_val=X_val, y_val=y_val, \n num_epochs=num_epochs,\n )\n\n self.network_param_values = network_param_values\n self.cost_history['train_costs'].extend(cost_history['train_costs'])\n self.cost_history['val_costs'].extend(cost_history['val_costs'])\n if self.cost_history['val_costs'] !=[]:\n self.cost_history['final_val_cost'] = self.cost_history['val_costs'][-1]\n self.cost_history['final_train_cost'] = self.cost_history['train_costs'][-1]\n if show_graph:\n plot_loss(self.cost_history['train_costs'], \n val_losses=self.cost_history['val_costs'])\n return \n\n\n\n def get_network_results_and_settings(self, use_nans=False):\n '''Get parameters and settings of network'''\n d = {}\n # d['network'] = self.network\n if self.network is not None:\n d['network_param_values'] = lasagne.layers.get_all_param_values(self.network)\n else:\n d['network_param_values'] = self.network_param_values\n d['network_settings'] = self.network_settings\n d['cost_settings'] = self.cost_settings\n d['input_settings'] = self.input_settings\n d['cost_history'] = self.cost_history\n d['save_path'] = self.save_path\n\n #Would want to replace None with np.nan if saving to .matfile\n if use_nans:\n for kk,vv in d.items():\n if isinstance(vv, dict):\n for k,v in vv.items():\n if v is None:\n vv[k] = np.nan\n return d\n\n\n def to_pickle(self, save_path=None):\n '''Pickle the network object'''\n if save_path is not None:\n self.save_path = save_path\n save_path = os.path.expanduser(self.save_path)\n\n if not os.path.exists(os.path.dirname(os.path.abspath(save_path))):\n os.makedirs(os.path.dirname(os.path.abspath(save_path)))\n\n #ensure the network_param_values are up to date:\n self.network_param_values = lasagne.layers.get_all_param_values(self.network)\n #first eliminate any theano dependant vars\n #but if we are in mid-training and just want to save a copy at an intermediate\n #stage, then don't want to lose update parameters (such as momentum etc.)\n #to avoid this, make a copy, delete theano vars and save the copy\n obj_to_save = copy.copy(self)\n obj_to_save.erase_theano_vars()\n pkl.dump(obj_to_save, open(save_path, 'wb'))\n # #instead of saving the pn object which is dependant on theano variables, \n # # rather save the network settings etc\n # pkl.dump(self.get_network_results_and_settings(use_nans=False), open(save_path, 'wb'))\n return\n\n\n#function rather than class method\ndef from_pickle(load_path):\n load_path = os.path.expanduser(load_path)\n loaded_pn = pkl.load(open(load_path, 'rb'))\n loaded_pn.init_theano_vars()\n return loaded_pn\n\n" ]
[ [ "numpy.exp", "numpy.zeros" ] ]
mikiya1130/deep-learning-from-scratch
[ "d265bc25b2dc38f13a540e43fcd36195a71f2fc1", "d265bc25b2dc38f13a540e43fcd36195a71f2fc1" ]
[ "ch03/sig_step_compare.py", "ch07/train_convnet.py" ]
[ "#%%\n# coding: utf-8\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef step_function(x):\n return np.array(x > 0, dtype=np.int)\n\nx = np.arange(-5.0, 5.0, 0.1)\ny1 = sigmoid(x)\ny2 = step_function(x)\n\nplt.plot(x, y1)\nplt.plot(x, y2, 'k--')\nplt.ylim(-0.1, 1.1) #図で描画するy軸の範囲を指定\nplt.show()\n", "# %%\n# coding: utf-8\nimport sys\nimport os\nsys.path.append(os.pardir) # nopep8 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom simple_convnet import SimpleConvNet\nfrom common.trainer import Trainer\n\n# データの読み込み\n(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)\n\n# 処理に時間のかかる場合はデータを削減\n#x_train, t_train = x_train[:5000], t_train[:5000]\n#x_test, t_test = x_test[:1000], t_test[:1000]\n\nmax_epochs = 20\n\nnetwork = SimpleConvNet(input_dim=(1, 28, 28),\n conv_param={'filter_num': 30,\n 'filter_size': 5, 'pad': 0, 'stride': 1},\n hidden_size=100, output_size=10, weight_init_std=0.01)\n\ntrainer = Trainer(network, x_train, t_train, x_test, t_test,\n epochs=max_epochs, mini_batch_size=100,\n optimizer='Adam', optimizer_param={'lr': 0.001},\n evaluate_sample_num_per_epoch=1000)\ntrainer.train()\n\n# パラメータの保存\nnetwork.save_params(\"params.pkl\")\nprint(\"Saved Network Parameters!\")\n\n# グラフの描画\nmarkers = {'train': 'o', 'test': 's'}\nx = np.arange(max_epochs)\nplt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)\nplt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)\nplt.xlabel(\"epochs\")\nplt.ylabel(\"accuracy\")\nplt.ylim(0, 1.0)\nplt.legend(loc='lower right')\nplt.show()\n" ]
[ [ "numpy.array", "numpy.exp", "matplotlib.pylab.show", "numpy.arange", "matplotlib.pylab.ylim", "matplotlib.pylab.plot" ], [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
Meng-Lingjun-xjtu/ttt-plus-plus
[ "0f26c689fcaa666fa3524e7c5b6de0c1cb510e89" ]
[ "cifar/ttt++.py" ]
[ "from __future__ import print_function\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom utils.misc import *\nfrom utils.test_helpers import *\nfrom utils.prepare_dataset import *\n\n# ----------------------------------\n\nimport copy\nimport time\nimport pandas as pd\n\nimport random\nimport numpy as np\nimport torch.backends.cudnn as cudnn\n\nfrom discrepancy import *\nfrom offline import *\nfrom utils.trick_helpers import *\nfrom utils.contrastive import *\n\nfrom online import FeatureQueue\n\n# ----------------------------------\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='cifar10')\nparser.add_argument('--dataroot', default=None)\nparser.add_argument('--shared', default=None)\n########################################################################\nparser.add_argument('--depth', default=26, type=int)\nparser.add_argument('--width', default=1, type=int)\nparser.add_argument('--batch_size', default=128, type=int)\nparser.add_argument('--batch_size_align', default=512, type=int)\nparser.add_argument('--queue_size', default=256, type=int)\nparser.add_argument('--group_norm', default=0, type=int)\nparser.add_argument('--workers', default=0, type=int)\nparser.add_argument('--num_sample', default=1000000, type=int)\n########################################################################\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--nepoch', default=500, type=int, help='maximum number of epoch for ttt')\nparser.add_argument('--bnepoch', default=2, type=int, help='first few epochs to update bn stat')\nparser.add_argument('--delayepoch', default=0, type=int)\nparser.add_argument('--stopepoch', default=25, type=int)\n########################################################################\nparser.add_argument('--outf', default='.')\n########################################################################\nparser.add_argument('--level', default=5, type=int)\nparser.add_argument('--corruption', default='snow')\nparser.add_argument('--resume', default=None, help='directory of pretrained model')\nparser.add_argument('--ckpt', default=None, type=int)\nparser.add_argument('--fix_ssh', action='store_true')\n########################################################################\nparser.add_argument('--method', default='ssl', choices=['ssl', 'align', 'both'])\nparser.add_argument('--divergence', default='all', choices=['all', 'coral', 'mmd'])\nparser.add_argument('--scale_ext', default=0.5, type=float, help='scale of align loss on ext')\nparser.add_argument('--scale_ssh', default=0.2, type=float, help='scale of align loss on ssh')\n########################################################################\nparser.add_argument('--ssl', default='contrastive', help='self-supervised task')\nparser.add_argument('--temperature', default=0.5, type=float)\n########################################################################\nparser.add_argument('--align_ext', action='store_true')\nparser.add_argument('--align_ssh', action='store_true')\n########################################################################\nparser.add_argument('--model', default='resnet50', help='resnet50')\nparser.add_argument('--save_every', default=100, type=int)\n########################################################################\nparser.add_argument('--tsne', action='store_true')\n########################################################################\nparser.add_argument('--seed', default=0, type=int)\n\n\nargs = parser.parse_args()\n\nprint(args)\n\nmy_makedir(args.outf)\n\ntorch.manual_seed(args.seed)\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\n\ncudnn.benchmark = True\n\n# -------------------------------\n\nnet, ext, head, ssh, classifier = build_resnet50(args)\n\n_, teloader = prepare_test_data(args)\n\n# -------------------------------\n\nargs.batch_size = min(args.batch_size, args.num_sample)\nargs.batch_size_align = min(args.batch_size_align, args.num_sample)\n\nargs_align = copy.deepcopy(args)\nargs_align.ssl = None\nargs_align.batch_size = args.batch_size_align\n\nif args.method == 'align':\n _, trloader = prepare_test_data(args_align, ttt=True, num_sample=args.num_sample)\nelse:\n _, trloader = prepare_train_data(args, args.num_sample)\n\nif args.method == 'both':\n _, trloader_extra = prepare_test_data(args_align, ttt=True, num_sample=args.num_sample)\n trloader_extra_iter = iter(trloader_extra)\n\n# -------------------------------\n\nprint('Resuming from %s...' %(args.resume))\n\nload_resnet50(net, head, ssh, classifier, args)\n\nif torch.cuda.device_count() > 1:\n ext = torch.nn.DataParallel(ext)\n\n# ----------- Offline Feature Summarization ------------\n\nif args.method in ['align', 'both']:\n\n if args.queue_size > args.batch_size_align:\n assert args.queue_size % args.batch_size_align == 0\n # reset batch size by queue size\n args_align.batch_size = args.queue_size\n\n _, offlineloader = prepare_train_data(args_align)\n\n MMD_SCALE_FACTOR = 0.5\n if args.align_ext:\n args_align.scale = args.scale_ext\n cov_src_ext, coral_src_ext, mu_src_ext, mmd_src_ext = offline(offlineloader, ext, args.scale_ext)\n scale_coral_ext = args.scale_ext / coral_src_ext\n scale_mmd_ext = args.scale_ext / mmd_src_ext * MMD_SCALE_FACTOR\n\n # construct queue\n if args.queue_size > args.batch_size_align:\n queue_ext = FeatureQueue(dim=mu_src_ext.shape[0], length=args.queue_size-args.batch_size_align)\n\n if args.align_ssh:\n args_align.scale = args.scale_ssh\n from models.SSHead import ExtractorHead\n cov_src_ssh, coral_src_ssh, mu_src_ssh, mmd_src_ssh = offline(offlineloader, ExtractorHead(ext, head).cuda(), args.scale_ssh)\n scale_align_ssh = args.scale_ssh / coral_src_ssh\n scale_mmd_ssh = args.scale_ssh / mmd_src_ssh * MMD_SCALE_FACTOR\n\n if args.queue_size > args.batch_size_align:\n queue_ssh = FeatureQueue(dim=mu_src_ssh.shape[0], length=args.queue_size-args.batch_size_align)\n\n# ----------- Test ------------\n\nif args.tsne:\n args_src = copy.deepcopy(args)\n args_src.corruption = 'original'\n _, srcloader = prepare_test_data(args_src)\n feat_src, label_src, tsne_src = visu_feat(ext, srcloader, os.path.join(args.outf, 'original.pdf'))\n feat_tar, label_tar, tsne_tar = visu_feat(ext, teloader, os.path.join(args.outf, args.corruption + '_test_class.pdf'))\n calculate_distance(feat_src, label_src, tsne_src, feat_tar, label_tar, tsne_tar)\n # comp_feat(feat_src, label_src, feat_tar, label_tar, os.path.join(args.outf, args.corruption + '_test_marginal.pdf'))\n\nall_err_cls = []\nall_err_ssh = []\n\nprint('Running...')\nprint('Error (%)\\t\\ttest')\n\nerr_cls = test(teloader, net)[0]\nprint(('Epoch %d/%d:' %(0, args.nepoch)).ljust(24) +\n '%.2f\\t\\t' %(err_cls*100))\n\n# -------------------------------\n\nif args.fix_ssh:\n optimizer = optim.SGD(ext.parameters(), lr=args.lr, momentum=0.9)\nelse:\n optimizer = optim.SGD(ssh.parameters(), lr=args.lr, momentum=0.9)\n\nscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n 'min', factor=0.5, patience=10, cooldown=10,\n threshold=0.0001, threshold_mode='rel', min_lr=0.0001, verbose=True)\n\ncriterion = SupConLoss(temperature=args.temperature).cuda()\n\n# ----------- Improved Test-time Training ------------\n\nis_both_activated = False\n\nfor epoch in range(1, args.nepoch+1):\n\n tic = time.time()\n\n if args.fix_ssh:\n classifier.eval()\n head.eval()\n else:\n classifier.train()\n head.train()\n ext.train()\n\n for batch_idx, (inputs, labels) in enumerate(trloader):\n\n optimizer.zero_grad()\n\n if args.method in ['ssl', 'both']:\n images = torch.cat([inputs[0], inputs[1]], dim=0)\n images = images.cuda(non_blocking=True)\n labels = labels.cuda(non_blocking=True)\n bsz = labels.shape[0]\n features = ssh(images)\n f1, f2 = torch.split(features, [bsz, bsz], dim=0)\n features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)\n loss = criterion(features)\n loss.backward()\n del loss\n\n if args.method == 'align':\n if args.align_ext:\n\n loss = 0\n feat_ext = ext(inputs.cuda())\n\n # queue\n if args.queue_size > args.batch_size_align:\n feat_queue = queue_ext.get()\n queue_ext.update(feat_ext)\n if feat_queue is not None:\n feat_ext = torch.cat([feat_ext, feat_queue.cuda()])\n\n # coral\n if args.divergence in ['coral', 'all']:\n cov_ext = covariance(feat_ext)\n loss += coral(cov_src_ext, cov_ext) * scale_coral_ext\n\n # mmd\n if args.divergence in ['mmd', 'all']:\n mu_ext = feat_ext.mean(dim=0)\n loss += linear_mmd(mu_src_ext, mu_ext) * scale_mmd_ext\n\n loss.backward()\n del loss\n\n if args.align_ssh:\n\n loss = 0\n feat_ssh = head(ext(inputs.cuda()))\n\n # queue\n if args.queue_size > args.batch_size_align:\n feat_queue = queue_ssh.get()\n queue_ssh.update(feat_ssh)\n if feat_queue is not None:\n feat_ssh = torch.cat([feat_ssh, feat_queue.cuda()])\n\n if args.divergence in ['coral', 'all']:\n cov_ssh = covariance(feat_ssh)\n loss += coral(cov_src_ssh, cov_ssh) * scale_align_ssh\n\n if args.divergence in ['mmd', 'all']:\n mu_ssh = feat_ssh.mean(dim=0)\n loss += linear_mmd(mu_src_ssh, mu_ssh) * scale_mmd_ssh\n\n loss.backward()\n del loss\n\n if args.method == 'both' and is_both_activated:\n\n try:\n inputs, _ = next(trloader_extra_iter)\n except StopIteration:\n del trloader_extra_iter\n trloader_extra_iter = iter(trloader_extra)\n inputs, _ = next(trloader_extra_iter)\n\n if args.align_ext:\n\n loss = 0\n feat_ext = ext(inputs.cuda())\n\n # queue\n if args.queue_size > args.batch_size_align:\n feat_queue = queue_ext.get()\n queue_ext.update(feat_ext)\n if feat_queue is not None:\n feat_ext = torch.cat([feat_ext, feat_queue.cuda()])\n\n if args.divergence in ['coral', 'all']:\n cov_ext = covariance(feat_ext)\n loss += coral(cov_src_ext, cov_ext) * scale_coral_ext\n if args.divergence in ['mmd', 'all']:\n mu_ext = feat_ext.mean(dim=0)\n loss += linear_mmd(mu_src_ext, mu_ext) * scale_mmd_ext\n\n loss.backward()\n del loss\n\n if args.align_ssh: \n\n loss = 0\n\n feat_ssh = head(ext(inputs.cuda()))\n\n # queue\n if args.queue_size > args.batch_size_align:\n feat_queue = queue_ssh.get()\n queue_ssh.update(feat_ssh)\n if feat_queue is not None:\n feat_ssh = torch.cat([feat_ssh, feat_queue.cuda()])\n\n if args.divergence in ['coral', 'all']:\n cov_ssh = covariance(feat_ssh)\n loss += coral(cov_src_ssh, cov_ssh) * scale_align_ssh\n if args.divergence in ['mmd', 'all']:\n mu_ssh = feat_ssh.mean(dim=0)\n loss += linear_mmd(mu_src_ssh, mu_ssh) * scale_mmd_ssh\n\n loss.backward()\n del loss\n\n if epoch > args.bnepoch:\n optimizer.step()\n\n err_cls = test(teloader, net)[0]\n all_err_cls.append(err_cls)\n\n toc = time.time()\n print(('Epoch %d/%d (%.0fs):' %(epoch, args.nepoch, toc-tic)).ljust(24) +\n '%.2f\\t\\t' %(err_cls*100))\n\n # both components\n if args.method == 'both' and not is_both_activated and epoch > args.bnepoch + args.delayepoch:\n is_both_activated = True\n\n # termination\n if epoch > (args.stopepoch + 1) and all_err_cls[-args.stopepoch] < min(all_err_cls[-args.stopepoch+1:]):\n print(\"Termination: {:.2f}\".format(all_err_cls[-args.stopepoch]*100))\n break\n\n # save\n if epoch > args.bnepoch and epoch % args.save_every == 0 and all_err_cls[-1] < min(all_err_cls[:-2]):\n state = {'net': net.state_dict(), 'head': head.state_dict()}\n save_file = os.path.join(args.outf, args.corruption + '_' + args.method + '.pth')\n torch.save(state, save_file)\n print('Save model to', save_file)\n\n if args.tsne and epoch > args.bnepoch and err_cls < min(all_err_cls[:-1]):\n ext_best = copy.deepcopy(ext.state_dict())\n\n # lr decay\n scheduler.step(err_cls)\n\n# -------------------------------\n\nif args.method == 'ssl':\n prefix = os.path.join(args.outf, args.corruption + '_ssl')\nelif args.method == 'align':\n prefix = os.path.join(args.outf, args.corruption + '_align')\nelif args.method == 'both':\n prefix = os.path.join(args.outf, args.corruption + '_tttpp')\nelse:\n raise NotImplementedError\n\nif args.tsne:\n ext.load_state_dict(ext_best, strict=True)\n feat_tar, label_tar, tsne_tar = visu_feat(ext, teloader, prefix+'_class.pdf')\n calculate_distance(feat_src, label_src, tsne_src, feat_tar, label_tar, tsne_tar)\n # comp_feat(feat_src, label_src, feat_tar, label_tar, prefix+'_marginal.pdf')\n\n# -------------------------------\n\n# df = pd.DataFrame([all_err_cls, all_err_ssh]).T\n# df.to_csv(prefix, index=False, float_format='%.4f', header=False)\n" ]
[ [ "torch.cat", "numpy.random.seed", "torch.save", "torch.split", "torch.cuda.device_count", "torch.manual_seed", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.DataParallel" ] ]
J08nY/minerva
[ "8182d0006c434257ecb3689b2b4506b02e70303a" ]
[ "experiments/random/plot_new.py" ]
[ "#!/usr/bin/env python\nimport argparse\nimport itertools as it\nfrom copy import copy\nfrom math import sqrt, ceil\nimport re\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm, gridspec\nfrom matplotlib.colors import Normalize\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter)\n\nfrom join import load_transformed, AttackRun\n\nd_list = list(range(50, 142, 2))\nn_list = list(it.chain(range(500, 7100, 100), range(8000, 11000, 1000)))\n\n\ndef get_gridspec(datas, grid):\n if grid is None:\n w = ceil(sqrt(len(datas)))\n if len(datas) == 2:\n gs = gridspec.GridSpec(2, 1)\n else:\n gs = gridspec.GridSpec(w, w)\n else:\n gs = gridspec.GridSpec(*grid)\n return gs\n\n\ndef reshape_grid(x, y, z, n_list, d_list):\n X, Y = np.meshgrid(n_list, d_list)\n zs = np.array(z)\n Z = zs.reshape(X.shape, order=\"F\")\n return X, Y, Z\n\n\ndef sync_viewing(axes, fig):\n def on_move(event):\n for ax in axes:\n if event.inaxes == ax:\n break\n else:\n return\n\n for axx in axes:\n if axx != ax:\n if ax.button_pressed in ax._rotate_btn:\n axx.view_init(elev=ax.elev, azim=ax.azim)\n elif ax.button_pressed in ax._zoom_btn:\n axx.set_xlim3d(ax.get_xlim3d())\n axx.set_ylim3d(ax.get_ylim3d())\n axx.set_zlim3d(ax.get_zlim3d())\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect(\"motion_notify_event\", on_move)\n\n\ndef remap(vals, default, ns=n_list, ds=d_list):\n N = []\n D = []\n V = []\n for n in ns:\n for d in ds:\n N.append(n)\n D.append(d)\n key = (n, d)\n if key in vals:\n V.append(vals[key])\n else:\n V.append(default)\n return N, D, V\n\n\ndef map2success(data):\n vals = {}\n min_n = None\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n if run.success:\n vals[key] += 1\n if min_n is None or min_n > run.N:\n min_n = run.N\n N, D, S = remap(vals, 0)\n return N, D, S, min_n\n\n\ndef map2normdist(data):\n vals = {}\n cnts = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n if run.success and run.normdist is not None:\n vals[key] += run.normdist\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, S = remap(vals, 0)\n return N, D, S, None\n\n\ndef map2row(data):\n vals = {}\n cnts = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n if run.success and run.row is not None:\n vals[key] += run.row\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, S = remap(vals, 0)\n return N, D, S, None\n\ndef map2runs(data):\n vals = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n vals[key] += 1\n N, D, R = remap(vals, 0)\n return N, D, R, None\n\ndef _map2single(data, attr):\n vals = {}\n cnts = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n vals[key] += getattr(run, attr)\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, V = remap(vals, 0)\n return N, D, V, None\n\n\ndef map2liars(data):\n return _map2single(data, \"liars\")\n\ndef map2info(data):\n return _map2single(data, \"info\")\n\ndef map2realinfo(data):\n return _map2single(data, \"real_info\")\n\ndef map2goodinfo(data):\n return _map2single(data, \"good_info\")\n\ndef map2badinfo(data):\n return _map2single(data, \"bad_info\")\n\ndef map2success_avg(data):\n res = {n: 0 for n in n_list}\n cnts = {n: 0 for n in n_list}\n for run in data:\n cnts[run.N] += 1\n if run.success:\n res[run.N] += 1\n for n in n_list:\n if cnts[n] != 0:\n res[n] /= cnts[n]\n return [res[n] for n in n_list]\n\n\ndef map2liarpos(data, dim):\n liar_indices = [0 for _ in range(dim)]\n count = 0\n for run in data:\n if run.d == dim:\n count += 1\n liar_positions = run.liar_positions\n if liar_positions:\n liars = liar_positions.split(\";\")\n for liar in liars:\n i = int(liar.split(\"@\")[0])\n liar_indices[i] += 1\n if count != 0:\n liar_indices = [i/count for i in liar_indices]\n return liar_indices\n\n\ndef map2liarpos_heat(data):\n vals = {}\n cnts = {}\n for run in data:\n cnts.setdefault(run.d, 0)\n cnts[run.d] += 1\n if run.liar_positions:\n liars = run.liar_positions.split(\";\")\n for liar in liars:\n i = int(liar.split(\"@\")[0])\n key = (run.d, i)\n vals.setdefault(key, 0)\n vals[key] += 1\n for key in vals:\n if cnts[key[0]] != 0:\n vals[key] /= cnts[key[0]]\n N, D, V = remap(vals, 0, d_list, list(range(0, 50, 2)) + d_list)\n return N, D, V, None\n\n\ndef map2liardepth(data, dim):\n liar_indices = [0 for _ in range(dim)]\n liar_counts = [0 for _ in range(dim)]\n for run in data:\n if run.d == dim:\n liar_positions = run.liar_positions\n if liar_positions:\n liars = liar_positions.split(\";\")\n for liar in liars:\n i, j = liar.split(\"@\")\n i = int(i)\n j = int(j)\n liar_indices[i] += j\n liar_counts[i] += 1\n for i in range(dim):\n if liar_counts[i] != 0:\n liar_indices[i] /= liar_counts[i]\n return liar_indices\n\ndef map2liardepth_heat(data):\n vals = {}\n cnts = {}\n for run in data:\n if run.liar_positions:\n liars = run.liar_positions.split(\";\")\n for liar in liars:\n i, j = liar.split(\"@\")\n i = int(i)\n j = int(j)\n key = (run.d, i)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n vals[key] += j\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, V = remap(vals, 0, d_list, list(range(0, 50, 2)) + d_list)\n return N, D, V, None\n\ndef map2liarinfo(data, dim):\n liar_indices = [0 for _ in range(dim)]\n count = 0\n for run in data:\n if run.d == dim:\n count += 1\n liar_positions = run.liar_positions\n if liar_positions:\n liars = liar_positions.split(\";\")\n for liar in liars:\n i, j = liar.split(\"@\")\n i = int(i)\n j = int(j)\n liar_indices[i] += j\n if count != 0:\n liar_indices = [i/count for i in liar_indices]\n return liar_indices\n\ndef map2liarinfo_heat(data):\n vals = {}\n cnts = {}\n for run in data:\n cnts.setdefault(run.d, 0)\n cnts[run.d] += 1\n if run.liar_positions:\n liars = run.liar_positions.split(\";\")\n for liar in liars:\n i, j = liar.split(\"@\")\n i = int(i)\n j = int(j)\n key = (run.d, i)\n vals.setdefault(key, 0)\n vals[key] += j\n for key in vals:\n if cnts[key[0]] != 0:\n vals[key] /= cnts[key[0]]\n N, D, V = remap(vals, 0, d_list, list(range(0, 50, 2)) + d_list)\n return N, D, V, None\n\ndef map2blocks(data):\n vals = {}\n cnts = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n if run.success:\n vals[key] += run.block_size\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, B = remap(vals, 0)\n return N, D, B, None\n\n\ndef map2runtime(data):\n vals = {}\n cnts = {}\n for run in data:\n key = (run.N, run.d)\n vals.setdefault(key, 0)\n cnts.setdefault(key, 0)\n if run.success:\n vals[key] += run.time\n cnts[key] += 1\n for key in vals:\n if cnts[key] != 0:\n vals[key] /= cnts[key]\n N, D, R = remap(vals, 0)\n return N, D, R, None\n\n\ndef plot_todim(datas, fig, map_func, ylabel):\n ax = fig.add_subplot(111)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(\"Dimension of matrix (D)\")\n for name, data in sorted(datas.items()):\n value = map_func(data)\n ax.plot(d_list, value, label=name)\n ax.legend()\n\n\ndef plot_toN(datas, fig, map_func, ylabel):\n ax = fig.add_subplot(111)\n ax.set_ylabel(ylabel)\n ax.xaxis.set_major_locator(MultipleLocator(1000))\n ax.xaxis.set_major_formatter(FormatStrFormatter(\"%d\"))\n ax.xaxis.set_minor_locator(MultipleLocator(100))\n ax.set_xlabel(\"Number of signatures (N)\")\n for name, data in sorted(datas.items()):\n value = map_func(data)\n ax.plot(n_list, value, label=name)\n ax.legend()\n\n\ndef plot_dim(datas, fig, map_func, dim, ylabel):\n ax = fig.add_subplot(111)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(\"Index\")\n for name, data in sorted(datas.items()):\n value = map_func(data, dim)\n ax.plot(range(dim), value, label=name)\n ax.legend()\n\n\ndef plot_heatmap(datas, fig, map_func, zlabel, flat=True, grid=None, ns=n_list, ds=d_list, xlabel=\"Number of signatures (N)\", ylabel=\"Dimension of matrix (D)\"):\n gs = get_gridspec(datas, grid)\n i = 0\n axes = []\n for name, data in sorted(datas.items()):\n if flat:\n ax = fig.add_subplot(gs[i])\n else:\n ax = fig.add_subplot(gs[i], projection=\"3d\")\n axes.append(ax)\n x, y, z, min_n = map_func(data)\n X, Y, Z = reshape_grid(x, y, z, ns, ds)\n cmap = copy(cm.get_cmap(\"viridis\"))\n cmap.set_under(\"black\")\n #norm = Normalize(vmin=0.0001)\n norm = None\n if flat:\n im = ax.pcolormesh(X, Y, Z, cmap=cmap, norm=norm)\n if min_n is not None:\n ax.axvline(x=min_n, label=\"{}\".format(min_n), color=\"red\")\n fig.colorbar(im)\n else:\n ax.plot_surface(X, Y, Z, cmap=cmap, norm=norm)\n ax.set_zlabel(zlabel)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(name)\n i += 1\n if not flat:\n sync_viewing(axes, fig)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--figsize\", type=str, default=\"7x4\")\n parser.add_argument(\"--grid\", type=str)\n parser.add_argument(\"--data\", type=str, required=True)\n parser.add_argument(\"--bounds\", type=str, required=True)\n parser.add_argument(\"--methods\", type=str, required=True)\n parser.add_argument(\"--recenter\", type=str, required=True)\n parser.add_argument(\"--c\", type=str, required=True)\n parser.add_argument(\"--flat\", action=\"store_true\")\n parser.add_argument(\"figure\", type=str)\n\n args = parser.parse_args()\n figsize = tuple(map(float, args.figsize.split(\"x\")))\n grid = tuple(map(int, args.grid.split(\"x\"))) if args.grid else None\n data_types = args.data.split(\",\")\n bound_types = args.bounds.split(\",\")\n method_types = args.methods.split(\",\")\n recenter_types = args.recenter.split(\",\")\n c_types = args.c.split(\",\")\n figure = args.figure\n\n runs = load_transformed(\"results/runs.pickle\")\n fig = plt.figure(figsize=figsize)\n datas = {}\n for run in runs:\n if run.dataset in data_types and run.bounds in bound_types and run.method in method_types and run.recenter in recenter_types and run.c in c_types:\n s = datas.setdefault(\"_\".join((run.dataset, run.bounds, run.method, run.recenter, run.c)), set())\n s.add(run)\n if figure == \"success\":\n plot_heatmap(datas, fig, map2success, \"Successes (out of 5)\", flat=args.flat, grid=grid)\n elif figure == \"normdist\":\n plot_heatmap(datas, fig, map2normdist, \"Normdist\", flat=args.flat, grid=grid)\n elif figure == \"row\":\n plot_heatmap(datas, fig, map2row, \"Row\", flat=args.flat, grid=grid)\n elif figure == \"blocks\":\n plot_heatmap(datas, fig, map2blocks, \"Block size\", flat=args.flat, grid=grid)\n elif figure == \"runtime\":\n plot_heatmap(datas, fig, map2runtime, \"Runtime (s)\", flat=args.flat, grid=grid)\n elif figure == \"runs\":\n plot_heatmap(datas, fig, map2runs, \"Total runs\", flat=args.flat, grid=grid)\n elif figure == \"liars\":\n plot_heatmap(datas, fig, map2liars, \"Liars\", flat=args.flat, grid=grid)\n elif figure == \"info\":\n plot_heatmap(datas, fig, map2info, \"info\", flat=args.flat, grid=grid)\n elif figure == \"goodinfo\":\n plot_heatmap(datas, fig, map2goodinfo, \"good info\", flat=args.flat, grid=grid)\n elif figure == \"badinfo\":\n plot_heatmap(datas, fig, map2badinfo, \"bad info\", flat=args.flat, grid=grid)\n elif figure == \"realinfo\":\n plot_heatmap(datas, fig, map2realinfo, \"real info\", flat=args.flat, grid=grid)\n elif figure == \"success_avg\":\n plot_toN(datas, fig, map2success_avg, \"Successes\")\n elif figure == \"liarpos\":\n plot_heatmap(datas, fig, map2liarpos_heat, \"liar amount\", flat=args.flat, grid=grid, ns=d_list, ds=list(range(0, 50, 2)) + d_list, xlabel=\"run.D\", ylabel=\"D\")\n elif figure == \"liardepth\":\n plot_heatmap(datas, fig, map2liardepth_heat, \"liar depth(average)\", flat=args.flat, grid=grid, ns=d_list, ds=list(range(0, 50, 2)) + d_list, xlabel=\"run.D\", ylabel=\"D\")\n elif figure == \"liarinfo\":\n plot_heatmap(datas, fig, map2liarinfo_heat, \"liar info\", flat=args.flat, grid=grid, ns=d_list, ds=list(range(0, 50, 2)) + d_list, xlabel=\"run.D\", ylabel=\"D\")\n elif dim_match := re.match(\"liarpos\\(([0-9]+)\\)\", figure):\n plot_dim(datas, fig, map2liarpos, int(dim_match.group(1)), \"liar amount\")\n elif dim_match := re.match(\"liardepth\\(([0-9]+)\\)\", figure):\n plot_dim(datas, fig, map2liardepth, int(dim_match.group(1)), \"liar depth (average)\")\n elif dim_match := re.match(\"liarinfo\\(([0-9]+)\\)\", figure):\n plot_dim(datas, fig, map2liarinfo, int(dim_match.group(1)), \"liar info\")\n else:\n print(\"Unknown figure.\")\n exit(1)\n fig.tight_layout()\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.ticker.MultipleLocator", "matplotlib.cm.get_cmap", "matplotlib.ticker.FormatStrFormatter", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.meshgrid", "matplotlib.gridspec.GridSpec" ] ]
itohamy/dpmmpython
[ "7a84da36e07dd22a581de2138f368bdded32ac4e" ]
[ "trials_py2.py" ]
[ "\n\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom julia.api import Julia\njl = Julia(compiled_modules=False)\nfrom sys import path\nfrom dpmmpython.priors import niw\nfrom dpmmpython.dpmmwrapper import DPMMPython\nfrom dpmmpython.dpmmwrapper import DPMModel\nimport random\n\n#path[4] = '/Users/irita/Documents/Master/Research/PhD_research/BNP/Code - Or/DPMM_python_my_wrapper/dpmmpython' \n#import dpmmpython\n#print(dpmmpython)\n#1/0\n\n\nrandom.seed(10)\n\n# --- Toy data #1:\n# N = 20000 #Number of points\n# D = 2 # Dimension\n# modes = 20 # Number of Clusters\n# var_scale = 100.0 # The variance of the MV-Normal distribution where the clusters means are sampled from.\n\n# --- Toy data #2:\nN = 100 # 10000 # number of points\nD = 2 # data dimension\nmodes = 3 # number of modes\nvar_scale = 80.0\n\n# --- Extract the data in the shape (D,N).\ndata, labels = DPMMPython.generate_gaussian_data(N, D, modes, var_scale)\nprint(labels)\n# Changing the lables to be incorrect (to see how the splits work)\n#labels[labels==3] = 2\n#labels[labels==4] = 3\n#labels[labels==5] = 4\n#labels[labels==6] = 130\n\n# --- hyper params #1:\n# hyper_prior = niw(1,np.zeros(D),5,np.eye(D)*0.5)\n# alpha = 10.\n# iters = 500\n\n# --- hyper params #2:\ninit_clusters = np.unique(labels).size\nm = np.zeros(D)\nk = init_clusters #1.0\nnu = 130. # should be > D\npsi = np.cov(data)*0.01 # shape (D,D)\nhyper_prior = niw(k, m, nu, psi)\nalpha = 1.\niters = 50 #200\n\n# --- Print original label counts: (need to fix, see how it's done in trials.jl)\n# label_counts = np.zeros(init_clusters)\n# for i in range(len(labels)):\n# l = int(labels[i]-1)\n# label_counts[l] = label_counts[l] + 1\n\n# for i in range(len(label_counts)):\n# print(\"label \", str(i+1), \": \", str(label_counts[i]))\n\n\n# --- Run DP:\n# This call runs the fit function of DPMM and also provides the \"predict\" function for later:\ndp = DPMModel(data, alpha, prior = hyper_prior, iterations=iters, outlier_params=labels, verbose=True)\n\n# print('results:')\n# print(\"---\")\n# print(dp._labels)\n# print(\"---\")\n# print(dp._k)\n# print(\"---\")\n# print(dp._mu)\n# print(\"---\")\n# print(dp._weights)\n# print(\"---\")\n# print(dp._label_mapping)\n# print(\"---\")\n\n#print(\"data 2:\", data[:,2], \" data 10:\", data[:,10], \"data 98:\", data[:,98])\n\n#x_new = np.ones((1,2))*(-1)\n#print(dp.predict(x_new))\n#print(\"---\")\nprint(dp.predict(data.T))\n\n# _k = len(results[1])\n# _labels = results[0] - 1\n\n# from julia import Main as jl\n# jl.dpmm = results\n# _d = jl.eval(\"dpmm[2][1].μ\").shape[0] # infer d\n# #_weights = jl.eval(\"dpmm[4]\")\n# _sublabels = jl.eval(\"dpmm[3]\")\n\n# _mu = np.empty((_k, _d))\n# _sigma = np.empty((_k, _d, _d))\n# _logdetsigma = np.empty(_k)\n# _invsigma = np.empty((_k, _d, _d))\n# _invchol = np.empty((_k, _d, _d))\n\n# for i in range(1, _k+1):\n# _mu[i-1] = jl.eval(f\"dpmm[2][{i}].μ\")\n# _sigma[i-1] = jl.eval(f\"dpmm[2][{i}].Σ\")\n# _logdetsigma[i-1] = jl.eval(f\"dpmm[2][{i}].logdetΣ\")\n# _invsigma[i-1] = jl.eval(f\"dpmm[2][{i}].invΣ\")\n# _invchol[i-1] = jl.eval(f\"dpmm[2][{i}].invChol\")\n# print(_invchol[i-1])\n\n# _det_sigma_inv_sqrt = 1/np.sqrt(np.exp(_logdetsigma))\n" ]
[ [ "numpy.cov", "numpy.zeros", "numpy.unique" ] ]
joaochenriques/MCTE_2022
[ "b999d60b6c4153be5a314da262a18e467cb41d7e" ]
[ "libs/mpl_utils.py" ]
[ "import matplotlib.pyplot as mpl\nimport numpy as np\nfrom cycler import cycler\nimport os\n\ndef config_plots( font_sans_serif=False ):\n \n if font_sans_serif:\n mpl.rcParams['mathtext.fontset'] = 'dejavusans'\n mpl.rcParams['font.family'] = 'DejaVu Sans'\n mpl.rcParams['font.sans-serif'] = 'dejavusans'\n else:\n mpl.style.use('classic')\n mpl.rcParams['figure.facecolor'] = '1.0'\n mpl.rcParams['font.family'] = 'STIXGeneral'\n mpl.rcParams['font.sans-serif'] = 'stix'\n mpl.rcParams['mathtext.fontset'] = 'stix'\n\n VFont=16\n mpl.rcParams['axes.titlesize'] = VFont\n mpl.rcParams['axes.labelsize'] = VFont\n mpl.rcParams['xtick.labelsize'] = VFont*0.9\n mpl.rcParams['ytick.labelsize'] = VFont*0.9\n mpl.rcParams['legend.fontsize'] = VFont*0.9 \n\n mpl.rcParams['axes.formatter.useoffset'] = False\n mpl.rcParams['savefig.directory'] = \"\"\n mpl.rcParams['savefig.format'] = 'pdf'\n mpl.rcParams[\"figure.dpi\"] = 280\n mpl.rcParams[\"figure.figsize\"] = ( 6, 4.5 )\n mpl.rcParams[\"lines.markersize\"] = 5\n \n dc = ( cycler( linestyle = [ \\\n (0,(6,0)), (0,(6,2)), (0,(6,1.5,1.5,1.5)), \\\n (0,(8,1.5,1.5,1.5,1.5,1.5)), (0,(11,3)), (0,(2,2)), \\\n (0,(11,3,3,3,)), (0,(11,2,5,2)), (0,(17,2)), \\\n (0,(17,2,2,2)) ] ) + \\\n cycler('color', [ \\\n '#1f77b4', '#ff7f0e', '#2ca02c', \\\n '#d62728', '#9467bd', '#8c564b', \\\n '#e377c2', '#7f7f7f', '#bcbd22', \\\n '#17becf'] ) \\\n ) \n mpl.rcParams[\"axes.prop_cycle\"] = dc\n \n mpl.rcParams['lines.linewidth'] = 1.5\n\n mpl.rcParams['figure.subplot.left' ] = 0.14\n mpl.rcParams['figure.subplot.right' ] = 0.96\n mpl.rcParams['figure.subplot.bottom'] = 0.14\n mpl.rcParams['figure.subplot.top' ] = 0.95\n mpl.rcParams['figure.subplot.wspace'] = 0.20\n mpl.rcParams['figure.subplot.hspace'] = 0.20\n\ndef useTeX():\n cmds = ( ( 'sudo apt-get install texlive-latex-recommended' ),\n ( 'sudo apt-get install dvipng texlive-fonts-recommended' ),\n ( 'wget http://mirrors.ctan.org/macros/latex/contrib/type1cm.zip' ),\n ( 'unzip type1cm.zip -d /tmp/type1cm' ),\n ( 'cd /tmp/type1cm/type1cm/ && sudo latex type1cm.ins' ),\n ( 'sudo mkdir /usr/share/texmf/tex/latex/type1cm' ),\n ( 'sudo cp /tmp/type1cm/type1cm/type1cm.sty /usr/share/texmf/tex/latex/type1cm' ),\n ( 'sudo texhash' ) )\n\n for cmd in cmds:\n print( cmd )\n os.system( cmd )\n \n mpl.rcParams['text.usetex'] = True\n mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\n\ndef inline_label( lbl, x, y, i, fontsize = 8, color = 'k' ):\n \n # get screen coordinates (physical display)\n screen_dx, screen_dy = mpl.gca().transData.transform( (x[i+1], y[i+1]) ) \\\n - mpl.gca().transData.transform( (x[i-1], y[i-1]) )\n \n angle = ( np.degrees( np.arctan2( screen_dy, screen_dx ) ) + 90 ) % 180 - 90\n \n mpl.gca().text( x[i], y[i], lbl,\n rotation=angle,\n fontsize=fontsize, color=color, \n horizontalalignment='center',\n verticalalignment='center', \n bbox=dict( boxstyle='round, pad=-0.2', \n edgecolor='k', facecolor='w', \n linewidth=0,\n alpha=0.90) \n )\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.arctan2", "matplotlib.pyplot.style.use", "matplotlib.pyplot.rc" ] ]
mencattini/bmdt
[ "681b31c409fa1ea8d218b8b004aa4a0ce75bca11" ]
[ "bmdt/core.py" ]
[ "from collections import Counter\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\n\nclass Bmdt(BaseEstimator, ClassifierMixin):\n \"\"\"Binary Multiclass Decision Tree classification algorithm. It can be\n choice when there is a big majority class. There is fit and score\n methods like in Scikit.\"\"\"\n\n def __init__(self, class_weight=\"balanced\"):\n self.binary = DecisionTreeClassifier()\n\n self.multi = DecisionTreeClassifier()\n\n self.class_weight = class_weight\n self.binary.class_weight = self.class_weight\n self.majority_class = None\n self.classes = None\n\n def fit(self, X, y):\n \"\"\"Training function. It takes a training vector features and a\n training class vector.\"\"\"\n X = np.array(X)\n y = np.array(y)\n copy_y = y.copy()\n self.classes = np.unique(y)\n # we find the majority class\n self.majority_class = Counter(y).most_common()[0][0]\n # create a mask for the binary classification\n mask = copy_y == self.majority_class\n # apply the mask\n copy_y[mask] = self.majority_class\n copy_y[~mask] = 0\n # fit the binary classifier if the mask is enough\n if np.any(mask):\n self.binary.fit(X, copy_y)\n # get the predictions\n y_pred = self.binary.predict(X)\n # filter the non majority class\n mask = y_pred != self.majority_class\n if np.any(mask):\n # fit on it\n self.multi.fit(X[mask], y[mask])\n else:\n self.multi.fit(X, y)\n else:\n self.multi.fit(X, y)\n\n def predict(self, X):\n \"\"\"Predict function. It predict the class, based on given features\n vector.\"\"\"\n X = np.array(X)\n y_pred = self.binary.predict(X)\n mask = y_pred != self.majority_class\n # to avoid the case of empty array\n if np.any(mask):\n y_pred[mask] = self.multi.predict(X[mask])\n return y_pred\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Score function. It computes the accuracy based on given features\n vector and class vector\"\"\"\n X = np.array(X)\n y = np.array(y)\n y_pred = self.predict(X)\n return np.sum(y_pred == y) / y.shape[0]\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.any", "sklearn.tree.DecisionTreeClassifier", "numpy.unique" ] ]
LongerVision/cudf
[ "b3dc9d6adf16635c94a1735ad9ca73c12494884e" ]
[ "python/cudf/cudf/core/column/column.py" ]
[ "# Copyright (c) 2018-2022, NVIDIA CORPORATION.\n\nfrom __future__ import annotations\n\nimport builtins\nimport pickle\nimport warnings\nfrom functools import cached_property\nfrom types import SimpleNamespace\nfrom typing import (\n Any,\n Dict,\n List,\n MutableSequence,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n)\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom numba import cuda\n\nimport cudf\nfrom cudf import _lib as libcudf\nfrom cudf._lib.column import Column\nfrom cudf._lib.null_mask import (\n MaskState,\n bitmask_allocation_size_bytes,\n create_null_mask,\n)\nfrom cudf._lib.scalar import as_device_scalar\nfrom cudf._lib.stream_compaction import (\n apply_boolean_mask,\n distinct_count as cpp_distinct_count,\n drop_duplicates,\n drop_nulls,\n)\nfrom cudf._lib.transform import bools_to_mask\nfrom cudf._typing import BinaryOperand, ColumnLike, Dtype, ScalarLike\nfrom cudf.api.types import (\n _is_non_decimal_numeric_dtype,\n _is_scalar_or_zero_d_array,\n infer_dtype,\n is_bool_dtype,\n is_categorical_dtype,\n is_decimal32_dtype,\n is_decimal64_dtype,\n is_decimal128_dtype,\n is_decimal_dtype,\n is_dtype_equal,\n is_integer_dtype,\n is_interval_dtype,\n is_list_dtype,\n is_scalar,\n is_string_dtype,\n is_struct_dtype,\n)\nfrom cudf.core.abc import Serializable\nfrom cudf.core.buffer import Buffer\nfrom cudf.core.dtypes import (\n CategoricalDtype,\n IntervalDtype,\n ListDtype,\n StructDtype,\n)\nfrom cudf.core.mixins import Reducible\nfrom cudf.utils import utils\nfrom cudf.utils.dtypes import (\n cudf_dtype_from_pa_type,\n get_time_unit,\n min_unsigned_type,\n np_to_pa_dtype,\n pandas_dtypes_alias_to_cudf_alias,\n pandas_dtypes_to_np_dtypes,\n)\nfrom cudf.utils.utils import NotIterable, mask_dtype\n\nT = TypeVar(\"T\", bound=\"ColumnBase\")\n# TODO: This workaround allows type hints for `slice`, since `slice` is a\n# method in ColumnBase.\nSlice = TypeVar(\"Slice\", bound=slice)\n\n\nclass ColumnBase(Column, Serializable, Reducible, NotIterable):\n _VALID_REDUCTIONS = {\n \"any\",\n \"all\",\n \"max\",\n \"min\",\n }\n\n def as_frame(self) -> \"cudf.core.frame.Frame\":\n \"\"\"\n Converts a Column to Frame\n \"\"\"\n return cudf.core.single_column_frame.SingleColumnFrame(\n {None: self.copy(deep=False)}\n )\n\n @property\n def data_array_view(self) -> \"cuda.devicearray.DeviceNDArray\":\n \"\"\"\n View the data as a device array object\n \"\"\"\n return cuda.as_cuda_array(self.data).view(self.dtype)\n\n @property\n def mask_array_view(self) -> \"cuda.devicearray.DeviceNDArray\":\n \"\"\"\n View the mask as a device array\n \"\"\"\n return cuda.as_cuda_array(self.mask).view(mask_dtype)\n\n def __len__(self) -> int:\n return self.size\n\n def __repr__(self):\n return (\n f\"{object.__repr__(self)}\\n\"\n f\"{self.to_arrow().to_string()}\\n\"\n f\"dtype: {self.dtype}\"\n )\n\n def to_pandas(self, index: pd.Index = None, **kwargs) -> \"pd.Series\":\n \"\"\"Convert object to pandas type.\n\n The default implementation falls back to PyArrow for the conversion.\n \"\"\"\n # This default implementation does not handle nulls in any meaningful\n # way, but must consume the parameter to avoid passing it to PyArrow\n # (which does not recognize it).\n kwargs.pop(\"nullable\", None)\n pd_series = self.to_arrow().to_pandas(**kwargs)\n\n if index is not None:\n pd_series.index = index\n return pd_series\n\n @property\n def values_host(self) -> \"np.ndarray\":\n \"\"\"\n Return a numpy representation of the Column.\n \"\"\"\n if len(self) == 0:\n return np.array([], dtype=self.dtype)\n\n if self.has_nulls():\n raise ValueError(\"Column must have no nulls.\")\n\n return self.data_array_view.copy_to_host()\n\n @property\n def values(self) -> \"cupy.ndarray\":\n \"\"\"\n Return a CuPy representation of the Column.\n \"\"\"\n if len(self) == 0:\n return cupy.array([], dtype=self.dtype)\n\n if self.has_nulls():\n raise ValueError(\"Column must have no nulls.\")\n\n return cupy.asarray(self.data_array_view)\n\n def find_and_replace(\n self: T,\n to_replace: ColumnLike,\n replacement: ColumnLike,\n all_nan: bool = False,\n ) -> T:\n raise NotImplementedError\n\n def clip(self, lo: ScalarLike, hi: ScalarLike) -> ColumnBase:\n return libcudf.replace.clip(self, lo, hi)\n\n def equals(self, other: ColumnBase, check_dtypes: bool = False) -> bool:\n if self is other:\n return True\n if other is None or len(self) != len(other):\n return False\n if check_dtypes and (self.dtype != other.dtype):\n return False\n return self.binary_operator(\"NULL_EQUALS\", other).all()\n\n def all(self, skipna: bool = True) -> bool:\n # The skipna argument is only used for numerical columns.\n # If all entries are null the result is True, including when the column\n # is empty.\n\n if self.null_count == self.size:\n return True\n\n return libcudf.reduce.reduce(\"all\", self, dtype=np.bool_)\n\n def any(self, skipna: bool = True) -> bool:\n # Early exit for fast cases.\n\n if not skipna and self.has_nulls():\n return True\n elif skipna and self.null_count == self.size:\n return False\n\n return libcudf.reduce.reduce(\"any\", self, dtype=np.bool_)\n\n def dropna(self, drop_nan: bool = False) -> ColumnBase:\n # The drop_nan argument is only used for numerical columns.\n return drop_nulls([self])[0]\n\n def to_arrow(self) -> pa.Array:\n \"\"\"Convert to PyArrow Array\n\n Examples\n --------\n >>> import cudf\n >>> col = cudf.core.column.as_column([1, 2, 3, 4])\n >>> col.to_arrow()\n <pyarrow.lib.Int64Array object at 0x7f886547f830>\n [\n 1,\n 2,\n 3,\n 4\n ]\n \"\"\"\n return libcudf.interop.to_arrow(\n cudf.core.frame.Frame(\n cudf.core.column_accessor.ColumnAccessor({\"None\": self})\n ),\n [[\"None\"]],\n keep_index=False,\n )[\"None\"].chunk(0)\n\n @classmethod\n def from_arrow(cls, array: pa.Array) -> ColumnBase:\n \"\"\"\n Convert PyArrow Array/ChunkedArray to column\n\n Parameters\n ----------\n array : PyArrow Array/ChunkedArray\n\n Returns\n -------\n column\n\n Examples\n --------\n >>> import pyarrow as pa\n >>> import cudf\n >>> cudf.core.column.ColumnBase.from_arrow(pa.array([1, 2, 3, 4]))\n <cudf.core.column.numerical.NumericalColumn object at 0x7f8865497ef0>\n \"\"\"\n if not isinstance(array, (pa.Array, pa.ChunkedArray)):\n raise TypeError(\"array should be PyArrow array or chunked array\")\n\n data = pa.table([array], [None])\n\n if isinstance(array.type, pa.DictionaryType):\n indices_table = pa.table(\n {\n \"None\": pa.chunked_array(\n [chunk.indices for chunk in data[\"None\"].chunks],\n type=array.type.index_type,\n )\n }\n )\n dictionaries_table = pa.table(\n {\n \"None\": pa.chunked_array(\n [chunk.dictionary for chunk in data[\"None\"].chunks],\n type=array.type.value_type,\n )\n }\n )\n\n codes = libcudf.interop.from_arrow(\n indices_table, indices_table.column_names\n )[0][\"None\"]\n categories = libcudf.interop.from_arrow(\n dictionaries_table, dictionaries_table.column_names\n )[0][\"None\"]\n\n return build_categorical_column(\n categories=categories,\n codes=codes,\n mask=codes.base_mask,\n size=codes.size,\n ordered=array.type.ordered,\n )\n elif isinstance(array.type, pa.StructType):\n return cudf.core.column.StructColumn.from_arrow(array)\n elif isinstance(\n array.type, pd.core.arrays._arrow_utils.ArrowIntervalType\n ):\n return cudf.core.column.IntervalColumn.from_arrow(array)\n\n result = libcudf.interop.from_arrow(data, data.column_names)[0][\"None\"]\n\n return result._with_type_metadata(cudf_dtype_from_pa_type(array.type))\n\n def _get_mask_as_column(self) -> ColumnBase:\n return libcudf.transform.mask_to_bools(\n self.base_mask, self.offset, self.offset + len(self)\n )\n\n @cached_property\n def memory_usage(self) -> int:\n n = 0\n if self.data is not None:\n n += self.data.size\n if self.nullable:\n n += bitmask_allocation_size_bytes(self.size)\n return n\n\n def _fill(\n self,\n fill_value: ScalarLike,\n begin: int,\n end: int,\n inplace: bool = False,\n ) -> Optional[ColumnBase]:\n if end <= begin or begin >= self.size:\n return self if inplace else self.copy()\n\n # Constructing a cuDF scalar can cut unnecessary DtoH copy if\n # the scalar is None when calling `is_valid`.\n slr = cudf.Scalar(fill_value, dtype=self.dtype)\n\n if not inplace:\n return libcudf.filling.fill(self, begin, end, slr.device_value)\n\n if is_string_dtype(self.dtype):\n return self._mimic_inplace(\n libcudf.filling.fill(self, begin, end, slr.device_value),\n inplace=True,\n )\n\n if not slr.is_valid() and not self.nullable:\n mask = create_null_mask(self.size, state=MaskState.ALL_VALID)\n self.set_base_mask(mask)\n\n libcudf.filling.fill_in_place(self, begin, end, slr.device_value)\n\n return self\n\n def shift(self, offset: int, fill_value: ScalarLike) -> ColumnBase:\n # libcudf currently doesn't handle case when offset > len(df)\n # ticket to fix the bug in link below:\n # https://github.com/rapidsai/cudf/issues/10314\n if abs(offset) > len(self):\n if fill_value is None:\n return column_empty_like(self, masked=True)\n else:\n return full(len(self), fill_value, dtype=self.dtype)\n return libcudf.copying.shift(self, offset, fill_value)\n\n @property\n def valid_count(self) -> int:\n \"\"\"Number of non-null values\"\"\"\n return len(self) - self.null_count\n\n @property\n def nullmask(self) -> Buffer:\n \"\"\"The gpu buffer for the null-mask\"\"\"\n if not self.nullable:\n raise ValueError(\"Column has no null mask\")\n return self.mask_array_view\n\n def copy(self: T, deep: bool = True) -> T:\n \"\"\"Columns are immutable, so a deep copy produces a copy of the\n underlying data and mask and a shallow copy creates a new column and\n copies the references of the data and mask.\n \"\"\"\n if deep:\n result = libcudf.copying.copy_column(self)\n return cast(T, result._with_type_metadata(self.dtype))\n else:\n return cast(\n T,\n build_column(\n self.base_data,\n self.dtype,\n mask=self.base_mask,\n size=self.size,\n offset=self.offset,\n children=self.base_children,\n ),\n )\n\n def view(self, dtype: Dtype) -> ColumnBase:\n \"\"\"\n View the data underlying a column as different dtype.\n The source column must divide evenly into the size of\n the desired data type. Columns with nulls may only be\n viewed as dtypes with size equal to source dtype size\n\n Parameters\n ----------\n dtype : NumPy dtype, string\n The dtype to view the data as\n\n \"\"\"\n\n dtype = cudf.dtype(dtype)\n\n if dtype.kind in (\"o\", \"u\", \"s\"):\n raise TypeError(\n \"Bytes viewed as str without metadata is ambiguous\"\n )\n\n if self.dtype.itemsize == dtype.itemsize:\n return build_column(\n self.base_data,\n dtype=dtype,\n mask=self.base_mask,\n size=self.size,\n offset=self.offset,\n )\n\n else:\n if self.null_count > 0:\n raise ValueError(\n \"Can not produce a view of a column with nulls\"\n )\n\n if (self.size * self.dtype.itemsize) % dtype.itemsize:\n raise ValueError(\n f\"Can not divide {self.size * self.dtype.itemsize}\"\n + f\" total bytes into {dtype} with size {dtype.itemsize}\"\n )\n\n # This assertion prevents mypy errors below.\n assert self.base_data is not None\n new_buf_ptr = (\n self.base_data.ptr + self.offset * self.dtype.itemsize\n )\n new_buf_size = self.size * self.dtype.itemsize\n view_buf = Buffer(\n data=new_buf_ptr,\n size=new_buf_size,\n owner=self.base_data._owner,\n )\n return build_column(view_buf, dtype=dtype)\n\n def element_indexing(self, index: int):\n \"\"\"Default implementation for indexing to an element\n\n Raises\n ------\n ``IndexError`` if out-of-bound\n \"\"\"\n idx = np.int32(index)\n if idx < 0:\n idx = len(self) + idx\n if idx > len(self) - 1 or idx < 0:\n raise IndexError(\"single positional indexer is out-of-bounds\")\n\n return libcudf.copying.get_element(self, idx).value\n\n def slice(self, start: int, stop: int, stride: int = None) -> ColumnBase:\n stride = 1 if stride is None else stride\n if start < 0:\n start = start + len(self)\n if stop < 0 and not (stride < 0 and stop == -1):\n stop = stop + len(self)\n if (stride > 0 and start >= stop) or (stride < 0 and start <= stop):\n return column_empty(0, self.dtype, masked=True)\n # compute mask slice\n if stride == 1:\n return libcudf.copying.column_slice(self, [start, stop])[\n 0\n ]._with_type_metadata(self.dtype)\n else:\n # Need to create a gather map for given slice with stride\n gather_map = arange(\n start=start,\n stop=stop,\n step=stride,\n dtype=cudf.dtype(np.int32),\n )\n return self.take(gather_map)\n\n def __getitem__(self, arg) -> Union[ScalarLike, ColumnBase]:\n if _is_scalar_or_zero_d_array(arg):\n return self.element_indexing(int(arg))\n elif isinstance(arg, slice):\n start, stop, stride = arg.indices(len(self))\n return self.slice(start, stop, stride)\n else:\n arg = as_column(arg)\n if len(arg) == 0:\n arg = as_column([], dtype=\"int32\")\n if is_integer_dtype(arg.dtype):\n return self.take(arg)\n if is_bool_dtype(arg.dtype):\n return self.apply_boolean_mask(arg)\n raise NotImplementedError(type(arg))\n\n def __setitem__(self, key: Any, value: Any):\n \"\"\"\n Set the value of ``self[key]`` to ``value``.\n\n If ``value`` and ``self`` are of different types, ``value`` is coerced\n to ``self.dtype``. Assumes ``self`` and ``value`` are index-aligned.\n \"\"\"\n\n # Normalize value to scalar/column\n value_normalized = (\n cudf.Scalar(value, dtype=self.dtype)\n if is_scalar(value)\n else as_column(value, dtype=self.dtype)\n )\n\n out: Optional[ColumnBase] # If None, no need to perform mimic inplace.\n if isinstance(key, slice):\n out = self._scatter_by_slice(key, value_normalized)\n else:\n key = as_column(key)\n if not isinstance(key, cudf.core.column.NumericalColumn):\n raise ValueError(f\"Invalid scatter map type {key.dtype}.\")\n out = self._scatter_by_column(key, value_normalized)\n\n if out:\n self._mimic_inplace(out, inplace=True)\n\n def _scatter_by_slice(\n self, key: Slice, value: Union[cudf.core.scalar.Scalar, ColumnBase]\n ) -> Optional[ColumnBase]:\n \"\"\"If this function returns None, it's either a no-op (slice is empty),\n or the inplace replacement is already performed (fill-in-place).\n \"\"\"\n start, stop, step = key.indices(len(self))\n if start >= stop:\n return None\n num_keys = (stop - start) // step\n\n self._check_scatter_key_length(num_keys, value)\n\n if step == 1:\n if isinstance(value, cudf.core.scalar.Scalar):\n return self._fill(value, start, stop, inplace=True)\n else:\n return libcudf.copying.copy_range(\n value, self, 0, num_keys, start, stop, False\n )\n\n # step != 1, create a scatter map with arange\n scatter_map = arange(\n start=start, stop=stop, step=step, dtype=cudf.dtype(np.int32),\n )\n\n return self._scatter_by_column(scatter_map, value)\n\n def _scatter_by_column(\n self,\n key: cudf.core.column.NumericalColumn,\n value: Union[cudf.core.scalar.Scalar, ColumnBase],\n ) -> ColumnBase:\n if is_bool_dtype(key.dtype):\n # `key` is boolean mask\n if len(key) != len(self):\n raise ValueError(\n \"Boolean mask must be of same length as column\"\n )\n if isinstance(value, ColumnBase) and len(self) == len(value):\n # Both value and key are aligned to self. Thus, the values\n # corresponding to the false values in key should be\n # ignored.\n value = value.apply_boolean_mask(key)\n # After applying boolean mask, the length of value equals\n # the number of elements to scatter, we can skip computing\n # the sum of ``key`` below.\n num_keys = len(value)\n else:\n # Compute the number of element to scatter by summing all\n # `True`s in the boolean mask.\n num_keys = key.sum()\n else:\n # `key` is integer scatter map\n num_keys = len(key)\n\n self._check_scatter_key_length(num_keys, value)\n\n try:\n if is_bool_dtype(key.dtype):\n return libcudf.copying.boolean_mask_scatter(\n [value], [self], key\n )[0]._with_type_metadata(self.dtype)\n else:\n return libcudf.copying.scatter([value], key, [self])[\n 0\n ]._with_type_metadata(self.dtype)\n except RuntimeError as e:\n if \"out of bounds\" in str(e):\n raise IndexError(\n f\"index out of bounds for column of size {len(self)}\"\n ) from e\n raise\n\n def _check_scatter_key_length(\n self, num_keys: int, value: Union[cudf.core.scalar.Scalar, ColumnBase]\n ):\n \"\"\"`num_keys` is the number of keys to scatter. Should equal to the\n number of rows in ``value`` if ``value`` is a column.\n \"\"\"\n if isinstance(value, ColumnBase):\n if len(value) != num_keys:\n msg = (\n f\"Size mismatch: cannot set value \"\n f\"of size {len(value)} to indexing result of size \"\n f\"{num_keys}\"\n )\n raise ValueError(msg)\n\n def fillna(\n self: T,\n value: Any = None,\n method: builtins.str = None,\n dtype: Dtype = None,\n ) -> T:\n \"\"\"Fill null values with ``value``.\n\n Returns a copy with null filled.\n \"\"\"\n return libcudf.replace.replace_nulls(\n input_col=self, replacement=value, method=method, dtype=dtype\n )\n\n def isnull(self) -> ColumnBase:\n \"\"\"Identify missing values in a Column.\"\"\"\n result = libcudf.unary.is_null(self)\n\n if self.dtype.kind == \"f\":\n # Need to consider `np.nan` values incase\n # of a float column\n result = result | libcudf.unary.is_nan(self)\n\n return result\n\n def notnull(self) -> ColumnBase:\n \"\"\"Identify non-missing values in a Column.\"\"\"\n result = libcudf.unary.is_valid(self)\n\n if self.dtype.kind == \"f\":\n # Need to consider `np.nan` values incase\n # of a float column\n result = result & libcudf.unary.is_non_nan(self)\n\n return result\n\n def find_first_value(\n self, value: ScalarLike, closest: bool = False\n ) -> int:\n \"\"\"\n Returns offset of first value that matches\n \"\"\"\n # FIXME: Inefficient, may be need a libcudf api\n index = cudf.core.index.RangeIndex(0, stop=len(self))\n indices = index.take(self == value)\n if not len(indices):\n raise ValueError(\"value not found\")\n return indices[0]\n\n def find_last_value(self, value: ScalarLike, closest: bool = False) -> int:\n \"\"\"\n Returns offset of last value that matches\n \"\"\"\n # FIXME: Inefficient, may be need a libcudf api\n index = cudf.core.index.RangeIndex(0, stop=len(self))\n indices = index.take(self == value)\n if not len(indices):\n raise ValueError(\"value not found\")\n return indices[-1]\n\n def append(self, other: ColumnBase) -> ColumnBase:\n return concat_columns([self, as_column(other)])\n\n def quantile(\n self, q: Union[float, Sequence[float]], interpolation: str, exact: bool\n ) -> ColumnBase:\n raise TypeError(f\"cannot perform quantile with type {self.dtype}\")\n\n def take(\n self: T, indices: ColumnBase, nullify: bool = False, check_bounds=True\n ) -> T:\n \"\"\"Return Column by taking values from the corresponding *indices*.\n\n Skip bounds checking if check_bounds is False.\n Set rows to null for all out of bound indices if nullify is `True`.\n \"\"\"\n # Handle zero size\n if indices.size == 0:\n return cast(T, column_empty_like(self, newsize=0))\n\n # TODO: For performance, the check and conversion of gather map should\n # be done by the caller. This check will be removed in future release.\n if not is_integer_dtype(indices.dtype):\n indices = indices.astype(\"int32\")\n if not libcudf.copying._gather_map_is_valid(\n indices, len(self), check_bounds, nullify\n ):\n raise IndexError(\"Gather map index is out of bounds.\")\n\n return libcudf.copying.gather([self], indices, nullify=nullify)[\n 0\n ]._with_type_metadata(self.dtype)\n\n def isin(self, values: Sequence) -> ColumnBase:\n \"\"\"Check whether values are contained in the Column.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a TypeError. Instead, turn a single string into a list\n of one element.\n\n Returns\n -------\n result: Column\n Column of booleans indicating if each element is in values.\n \"\"\"\n try:\n lhs, rhs = self._process_values_for_isin(values)\n res = lhs._isin_earlystop(rhs)\n if res is not None:\n return res\n except ValueError:\n # pandas functionally returns all False when cleansing via\n # typecasting fails\n return full(len(self), False, dtype=\"bool\")\n\n return lhs._obtain_isin_result(rhs)\n\n def _process_values_for_isin(\n self, values: Sequence\n ) -> Tuple[ColumnBase, ColumnBase]:\n \"\"\"\n Helper function for `isin` which pre-process `values` based on `self`.\n \"\"\"\n lhs = self\n rhs = as_column(values, nan_as_null=False)\n if lhs.null_count == len(lhs):\n lhs = lhs.astype(rhs.dtype)\n elif rhs.null_count == len(rhs):\n rhs = rhs.astype(lhs.dtype)\n return lhs, rhs\n\n def _isin_earlystop(self, rhs: ColumnBase) -> Union[ColumnBase, None]:\n \"\"\"\n Helper function for `isin` which determines possibility of\n early-stopping or not.\n \"\"\"\n if self.dtype != rhs.dtype:\n if self.null_count and rhs.null_count:\n return self.isnull()\n else:\n return cudf.core.column.full(len(self), False, dtype=\"bool\")\n elif self.null_count == 0 and (rhs.null_count == len(rhs)):\n return cudf.core.column.full(len(self), False, dtype=\"bool\")\n else:\n return None\n\n def _obtain_isin_result(self, rhs: ColumnBase) -> ColumnBase:\n \"\"\"\n Helper function for `isin` which merges `self` & `rhs`\n to determine what values of `rhs` exist in `self`.\n \"\"\"\n ldf = cudf.DataFrame({\"x\": self, \"orig_order\": arange(len(self))})\n rdf = cudf.DataFrame(\n {\"x\": rhs, \"bool\": full(len(rhs), True, dtype=\"bool\")}\n )\n res = ldf.merge(rdf, on=\"x\", how=\"left\").sort_values(by=\"orig_order\")\n res = res.drop_duplicates(subset=\"orig_order\", ignore_index=True)\n return res._data[\"bool\"].fillna(False)\n\n def as_mask(self) -> Buffer:\n \"\"\"Convert booleans to bitmask\n\n Returns\n -------\n Buffer\n \"\"\"\n\n if self.has_nulls():\n raise ValueError(\"Column must have no nulls.\")\n\n return bools_to_mask(self)\n\n @property\n def is_unique(self) -> bool:\n return self.distinct_count() == len(self)\n\n @property\n def is_monotonic_increasing(self) -> bool:\n return not self.has_nulls() and self.as_frame()._is_sorted(\n ascending=None, null_position=None\n )\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n return not self.has_nulls() and self.as_frame()._is_sorted(\n ascending=[False], null_position=None\n )\n\n def get_slice_bound(\n self, label: ScalarLike, side: builtins.str, kind: builtins.str\n ) -> int:\n \"\"\"\n Calculate slice bound that corresponds to given label.\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n Parameters\n ----------\n label : Scalar\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n \"\"\"\n if kind not in {\"ix\", \"loc\", \"getitem\", None}:\n raise ValueError(\n f\"Invalid value for ``kind`` parameter,\"\n f\" must be either one of the following: \"\n f\"{'ix', 'loc', 'getitem', None}, but found: {kind}\"\n )\n if side not in {\"left\", \"right\"}:\n raise ValueError(\n \"Invalid value for side kwarg,\"\n \" must be either 'left' or 'right': %s\" % (side,)\n )\n\n # TODO: Handle errors/missing keys correctly\n # Not currently using `kind` argument.\n if side == \"left\":\n return self.find_first_value(label, closest=True)\n elif side == \"right\":\n return self.find_last_value(label, closest=True) + 1\n else:\n raise ValueError(f\"Invalid value for side: {side}\")\n\n def sort_by_values(\n self: ColumnBase,\n ascending: bool = True,\n na_position: builtins.str = \"last\",\n ) -> Tuple[ColumnBase, \"cudf.core.column.NumericalColumn\"]:\n col_inds = self.as_frame()._get_sorted_inds(\n ascending=ascending, na_position=na_position\n )\n col_keys = self.take(col_inds)\n return col_keys, col_inds\n\n def distinct_count(\n self, method: builtins.str = \"sort\", dropna: bool = True\n ) -> int:\n if method != \"sort\":\n msg = \"non sort based distinct_count() not implemented yet\"\n raise NotImplementedError(msg)\n try:\n return self._distinct_count[dropna]\n except KeyError:\n self._distinct_count[dropna] = cpp_distinct_count(\n self, ignore_nulls=dropna\n )\n return self._distinct_count[dropna]\n\n def can_cast_safely(self, to_dtype: Dtype) -> bool:\n raise NotImplementedError()\n\n def astype(self, dtype: Dtype, **kwargs) -> ColumnBase:\n if is_categorical_dtype(dtype):\n return self.as_categorical_column(dtype, **kwargs)\n\n dtype = (\n pandas_dtypes_alias_to_cudf_alias.get(dtype, dtype)\n if isinstance(dtype, str)\n else pandas_dtypes_to_np_dtypes.get(dtype, dtype)\n )\n if _is_non_decimal_numeric_dtype(dtype):\n return self.as_numerical_column(dtype, **kwargs)\n elif is_categorical_dtype(dtype):\n return self.as_categorical_column(dtype, **kwargs)\n elif cudf.dtype(dtype).type in {\n np.str_,\n np.object_,\n str,\n }:\n return self.as_string_column(dtype, **kwargs)\n elif is_list_dtype(dtype):\n if not self.dtype == dtype:\n raise NotImplementedError(\n \"Casting list columns not currently supported\"\n )\n return self\n elif is_struct_dtype(dtype):\n if not self.dtype == dtype:\n raise NotImplementedError(\n \"Casting struct columns not currently supported\"\n )\n return self\n elif is_interval_dtype(self.dtype):\n return self.as_interval_column(dtype, **kwargs)\n elif is_decimal_dtype(dtype):\n return self.as_decimal_column(dtype, **kwargs)\n elif np.issubdtype(cast(Any, dtype), np.datetime64):\n return self.as_datetime_column(dtype, **kwargs)\n elif np.issubdtype(cast(Any, dtype), np.timedelta64):\n return self.as_timedelta_column(dtype, **kwargs)\n else:\n return self.as_numerical_column(dtype, **kwargs)\n\n def as_categorical_column(self, dtype, **kwargs) -> ColumnBase:\n if \"ordered\" in kwargs:\n ordered = kwargs[\"ordered\"]\n else:\n ordered = False\n\n sr = cudf.Series(self)\n\n # Re-label self w.r.t. the provided categories\n if isinstance(dtype, (cudf.CategoricalDtype, pd.CategoricalDtype)):\n labels = sr._label_encoding(cats=dtype.categories)\n if \"ordered\" in kwargs:\n warnings.warn(\n \"Ignoring the `ordered` parameter passed in `**kwargs`, \"\n \"will be using `ordered` parameter of CategoricalDtype\"\n )\n\n return build_categorical_column(\n categories=dtype.categories,\n codes=labels._column,\n mask=self.mask,\n ordered=dtype.ordered,\n )\n\n cats = sr.unique().astype(sr.dtype)\n label_dtype = min_unsigned_type(len(cats))\n labels = sr._label_encoding(\n cats=cats, dtype=label_dtype, na_sentinel=1\n )\n\n # columns include null index in factorization; remove:\n if self.has_nulls():\n cats = cats._column.dropna(drop_nan=False)\n min_type = min_unsigned_type(len(cats), 8)\n labels = labels - 1\n if cudf.dtype(min_type).itemsize < labels.dtype.itemsize:\n labels = labels.astype(min_type)\n\n return build_categorical_column(\n categories=cats,\n codes=labels._column,\n mask=self.mask,\n ordered=ordered,\n )\n\n def as_numerical_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.NumericalColumn\":\n raise NotImplementedError\n\n def as_datetime_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.DatetimeColumn\":\n raise NotImplementedError\n\n def as_interval_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.IntervalColumn\":\n raise NotImplementedError\n\n def as_timedelta_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.TimeDeltaColumn\":\n raise NotImplementedError\n\n def as_string_column(\n self, dtype: Dtype, format=None, **kwargs\n ) -> \"cudf.core.column.StringColumn\":\n raise NotImplementedError\n\n def as_decimal_column(\n self, dtype: Dtype, **kwargs\n ) -> Union[\"cudf.core.column.decimal.DecimalBaseColumn\"]:\n raise NotImplementedError\n\n def as_decimal128_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.Decimal128Column\":\n raise NotImplementedError\n\n def as_decimal64_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.Decimal64Column\":\n raise NotImplementedError\n\n def as_decimal32_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.Decimal32Column\":\n raise NotImplementedError\n\n def apply_boolean_mask(self, mask) -> ColumnBase:\n mask = as_column(mask)\n if not is_bool_dtype(mask.dtype):\n raise ValueError(\"boolean_mask is not boolean type.\")\n\n return apply_boolean_mask([self], mask)[0]._with_type_metadata(\n self.dtype\n )\n\n def argsort(\n self, ascending: bool = True, na_position: builtins.str = \"last\"\n ) -> ColumnBase:\n\n return self.as_frame()._get_sorted_inds(\n ascending=ascending, na_position=na_position\n )\n\n def __arrow_array__(self, type=None):\n raise TypeError(\n \"Implicit conversion to a host PyArrow Array via __arrow_array__ \"\n \"is not allowed, To explicitly construct a PyArrow Array, \"\n \"consider using .to_arrow()\"\n )\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"Implicit conversion to a host NumPy array via __array__ is not \"\n \"allowed. To explicitly construct a host array, consider using \"\n \".to_numpy()\"\n )\n\n @property\n def __cuda_array_interface__(self):\n raise NotImplementedError(\n f\"dtype {self.dtype} is not yet supported via \"\n \"`__cuda_array_interface__`\"\n )\n\n def __add__(self, other):\n return self.binary_operator(\"add\", other)\n\n def __sub__(self, other):\n return self.binary_operator(\"sub\", other)\n\n def __mul__(self, other):\n return self.binary_operator(\"mul\", other)\n\n def __eq__(self, other):\n return self.binary_operator(\"eq\", other)\n\n def __ne__(self, other):\n return self.binary_operator(\"ne\", other)\n\n def __or__(self, other):\n return self.binary_operator(\"or\", other)\n\n def __and__(self, other):\n return self.binary_operator(\"and\", other)\n\n def __floordiv__(self, other):\n return self.binary_operator(\"floordiv\", other)\n\n def __truediv__(self, other):\n return self.binary_operator(\"truediv\", other)\n\n def __mod__(self, other):\n return self.binary_operator(\"mod\", other)\n\n def __pow__(self, other):\n return self.binary_operator(\"pow\", other)\n\n def __lt__(self, other):\n return self.binary_operator(\"lt\", other)\n\n def __gt__(self, other):\n return self.binary_operator(\"gt\", other)\n\n def __le__(self, other):\n return self.binary_operator(\"le\", other)\n\n def __ge__(self, other):\n return self.binary_operator(\"ge\", other)\n\n def searchsorted(\n self,\n value,\n side: builtins.str = \"left\",\n ascending: bool = True,\n na_position: builtins.str = \"last\",\n ):\n values = as_column(value).as_frame()\n return self.as_frame().searchsorted(\n values, side, ascending=ascending, na_position=na_position\n )\n\n def unique(self) -> ColumnBase:\n \"\"\"\n Get unique values in the data\n \"\"\"\n # TODO: We could avoid performing `drop_duplicates` for\n # columns with values that already are unique.\n # Few things to note before we can do this optimization is\n # the following issue resolved:\n # https://github.com/rapidsai/cudf/issues/5286\n\n return drop_duplicates([self], keep=\"first\")[0]\n\n def serialize(self) -> Tuple[dict, list]:\n header: Dict[Any, Any] = {}\n frames = []\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"dtype\"] = self.dtype.str\n\n if self.data is not None:\n data_header, data_frames = self.data.serialize()\n header[\"data\"] = data_header\n frames.extend(data_frames)\n\n if self.mask is not None:\n mask_header, mask_frames = self.mask.serialize()\n header[\"mask\"] = mask_header\n frames.extend(mask_frames)\n\n header[\"frame_count\"] = len(frames)\n return header, frames\n\n @classmethod\n def deserialize(cls, header: dict, frames: list) -> ColumnBase:\n dtype = header[\"dtype\"]\n data = Buffer.deserialize(header[\"data\"], [frames[0]])\n mask = None\n if \"mask\" in header:\n mask = Buffer.deserialize(header[\"mask\"], [frames[1]])\n return build_column(\n data=data, dtype=dtype, mask=mask, size=header.get(\"size\", None)\n )\n\n def unary_operator(self, unaryop: builtins.str):\n raise TypeError(\n f\"Operation {unaryop} not supported for dtype {self.dtype}.\"\n )\n\n def binary_operator(\n self, op: builtins.str, other: BinaryOperand, reflect: bool = False\n ) -> ColumnBase:\n raise TypeError(\n f\"Operation {op} not supported between dtypes {self.dtype} and \"\n f\"{other.dtype}.\"\n )\n\n def normalize_binop_value(\n self, other: ScalarLike\n ) -> Union[ColumnBase, ScalarLike]:\n raise NotImplementedError\n\n def _minmax(self, skipna: bool = None):\n result_col = self._process_for_reduction(skipna=skipna)\n if isinstance(result_col, ColumnBase):\n return libcudf.reduce.minmax(result_col)\n return result_col\n\n def _reduce(\n self, op: str, skipna: bool = None, min_count: int = 0, *args, **kwargs\n ) -> ScalarLike:\n \"\"\"Compute {op} of column values.\n\n skipna : bool\n Whether or not na values must be skipped.\n min_count : int, default 0\n The minimum number of entries for the reduction, otherwise the\n reduction returns NaN.\n \"\"\"\n preprocessed = self._process_for_reduction(\n skipna=skipna, min_count=min_count\n )\n if isinstance(preprocessed, ColumnBase):\n return libcudf.reduce.reduce(op, preprocessed, **kwargs)\n return preprocessed\n\n @property\n def contains_na_entries(self) -> bool:\n return self.null_count != 0\n\n def _process_for_reduction(\n self, skipna: bool = None, min_count: int = 0\n ) -> Union[ColumnBase, ScalarLike]:\n skipna = True if skipna is None else skipna\n\n if skipna:\n if self.has_nulls():\n result_col = self.dropna()\n else:\n if self.has_nulls():\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n result_col = self\n\n if min_count > 0:\n valid_count = len(result_col) - result_col.null_count\n if valid_count < min_count:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n elif min_count < 0:\n warnings.warn(\n f\"min_count value cannot be negative({min_count}), will \"\n f\"default to 0.\"\n )\n return result_col\n\n def _reduction_result_dtype(self, reduction_op: str) -> Dtype:\n \"\"\"\n Determine the correct dtype to pass to libcudf based on\n the input dtype, data dtype, and specific reduction op\n \"\"\"\n return self.dtype\n\n def _with_type_metadata(self: ColumnBase, dtype: Dtype) -> ColumnBase:\n \"\"\"\n Copies type metadata from self onto other, returning a new column.\n\n When ``self`` is a nested column, recursively apply this function on\n the children of ``self``.\n \"\"\"\n return self\n\n\ndef column_empty_like(\n column: ColumnBase,\n dtype: Dtype = None,\n masked: bool = False,\n newsize: int = None,\n) -> ColumnBase:\n \"\"\"Allocate a new column like the given *column*\"\"\"\n if dtype is None:\n dtype = column.dtype\n row_count = len(column) if newsize is None else newsize\n\n if (\n hasattr(column, \"dtype\")\n and is_categorical_dtype(column.dtype)\n and dtype == column.dtype\n ):\n column = cast(\"cudf.core.column.CategoricalColumn\", column)\n codes = column_empty_like(column.codes, masked=masked, newsize=newsize)\n return build_column(\n data=None,\n dtype=dtype,\n mask=codes.base_mask,\n children=(as_column(codes.base_data, dtype=codes.dtype),),\n size=codes.size,\n )\n\n return column_empty(row_count, dtype, masked)\n\n\ndef column_empty_like_same_mask(\n column: ColumnBase, dtype: Dtype\n) -> ColumnBase:\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n result = column_empty_like(column, dtype)\n if column.nullable:\n result = result.set_mask(column.mask)\n return result\n\n\ndef column_empty(\n row_count: int, dtype: Dtype = \"object\", masked: bool = False\n) -> ColumnBase:\n \"\"\"Allocate a new column like the given row_count and dtype.\"\"\"\n dtype = cudf.dtype(dtype)\n children = () # type: Tuple[ColumnBase, ...]\n\n if is_struct_dtype(dtype):\n data = None\n children = tuple(\n column_empty(row_count, field_dtype)\n for field_dtype in dtype.fields.values()\n )\n elif is_list_dtype(dtype):\n data = None\n children = (\n full(row_count + 1, 0, dtype=\"int32\"),\n column_empty(row_count, dtype=dtype.element_type),\n )\n elif is_categorical_dtype(dtype):\n data = None\n children = (\n build_column(\n data=Buffer.empty(row_count * cudf.dtype(\"int32\").itemsize),\n dtype=\"int32\",\n ),\n )\n elif dtype.kind in \"OU\" and not is_decimal_dtype(dtype):\n data = None\n children = (\n full(row_count + 1, 0, dtype=\"int32\"),\n build_column(\n data=Buffer.empty(row_count * cudf.dtype(\"int8\").itemsize),\n dtype=\"int8\",\n ),\n )\n else:\n data = Buffer.empty(row_count * dtype.itemsize)\n\n if masked:\n mask = create_null_mask(row_count, state=MaskState.ALL_NULL)\n else:\n mask = None\n\n return build_column(\n data, dtype, mask=mask, size=row_count, children=children\n )\n\n\ndef build_column(\n data: Union[Buffer, None],\n dtype: Dtype,\n *,\n size: int = None,\n mask: Buffer = None,\n offset: int = 0,\n null_count: int = None,\n children: Tuple[ColumnBase, ...] = (),\n) -> ColumnBase:\n \"\"\"\n Build a Column of the appropriate type from the given parameters\n\n Parameters\n ----------\n data : Buffer\n The data buffer (can be None if constructing certain Column\n types like StringColumn, ListColumn, or CategoricalColumn)\n dtype\n The dtype associated with the Column to construct\n mask : Buffer, optional\n The mask buffer\n size : int, optional\n offset : int, optional\n children : tuple, optional\n \"\"\"\n dtype = cudf.dtype(dtype)\n\n if _is_non_decimal_numeric_dtype(dtype):\n assert data is not None\n return cudf.core.column.NumericalColumn(\n data=data,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n )\n if is_categorical_dtype(dtype):\n if not len(children) == 1:\n raise ValueError(\n \"Must specify exactly one child column for CategoricalColumn\"\n )\n if not isinstance(children[0], ColumnBase):\n raise TypeError(\"children must be a tuple of Columns\")\n return cudf.core.column.CategoricalColumn(\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=children,\n )\n elif dtype.type is np.datetime64:\n if data is None:\n raise TypeError(\"Must specify data buffer\")\n return cudf.core.column.DatetimeColumn(\n data=data,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n )\n elif dtype.type is np.timedelta64:\n if data is None:\n raise TypeError(\"Must specify data buffer\")\n return cudf.core.column.TimeDeltaColumn(\n data=data,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n )\n elif dtype.type in (np.object_, np.str_):\n return cudf.core.column.StringColumn(\n mask=mask,\n size=size,\n offset=offset,\n children=children,\n null_count=null_count,\n )\n elif is_list_dtype(dtype):\n return cudf.core.column.ListColumn(\n size=size,\n dtype=dtype,\n mask=mask,\n offset=offset,\n null_count=null_count,\n children=children,\n )\n elif is_interval_dtype(dtype):\n return cudf.core.column.IntervalColumn(\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n children=children,\n null_count=null_count,\n )\n elif is_struct_dtype(dtype):\n if size is None:\n raise TypeError(\"Must specify size\")\n return cudf.core.column.StructColumn(\n data=data,\n dtype=dtype,\n size=size,\n offset=offset,\n mask=mask,\n null_count=null_count,\n children=children,\n )\n elif is_decimal64_dtype(dtype):\n if size is None:\n raise TypeError(\"Must specify size\")\n return cudf.core.column.Decimal64Column(\n data=data,\n size=size,\n offset=offset,\n dtype=dtype,\n mask=mask,\n null_count=null_count,\n children=children,\n )\n elif is_decimal32_dtype(dtype):\n if size is None:\n raise TypeError(\"Must specify size\")\n return cudf.core.column.Decimal32Column(\n data=data,\n size=size,\n offset=offset,\n dtype=dtype,\n mask=mask,\n null_count=null_count,\n children=children,\n )\n elif is_decimal128_dtype(dtype):\n if size is None:\n raise TypeError(\"Must specify size\")\n return cudf.core.column.Decimal128Column(\n data=data,\n size=size,\n offset=offset,\n dtype=dtype,\n mask=mask,\n null_count=null_count,\n children=children,\n )\n elif is_interval_dtype(dtype):\n return cudf.core.column.IntervalColumn(\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=children,\n )\n else:\n raise TypeError(f\"Unrecognized dtype: {dtype}\")\n\n\ndef build_categorical_column(\n categories: ColumnBase,\n codes: ColumnBase,\n mask: Buffer = None,\n size: int = None,\n offset: int = 0,\n null_count: int = None,\n ordered: bool = None,\n) -> \"cudf.core.column.CategoricalColumn\":\n \"\"\"\n Build a CategoricalColumn\n\n Parameters\n ----------\n categories : Column\n Column of categories\n codes : Column\n Column of codes, the size of the resulting Column will be\n the size of `codes`\n mask : Buffer\n Null mask\n size : int, optional\n offset : int, optional\n ordered : bool\n Indicates whether the categories are ordered\n \"\"\"\n codes_dtype = min_unsigned_type(len(categories))\n codes = as_column(codes)\n if codes.dtype != codes_dtype:\n codes = codes.astype(codes_dtype)\n\n dtype = CategoricalDtype(categories=categories, ordered=ordered)\n\n result = build_column(\n data=None,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=(codes,),\n )\n return cast(\"cudf.core.column.CategoricalColumn\", result)\n\n\ndef build_interval_column(\n left_col,\n right_col,\n mask=None,\n size=None,\n offset=0,\n null_count=None,\n closed=\"right\",\n):\n \"\"\"\n Build an IntervalColumn\n\n Parameters\n ----------\n left_col : Column\n Column of values representing the left of the interval\n right_col : Column\n Column of representing the right of the interval\n mask : Buffer\n Null mask\n size : int, optional\n offset : int, optional\n closed : {\"left\", \"right\", \"both\", \"neither\"}, default \"right\"\n Whether the intervals are closed on the left-side, right-side,\n both or neither.\n \"\"\"\n left = as_column(left_col)\n right = as_column(right_col)\n if closed not in {\"left\", \"right\", \"both\", \"neither\"}:\n closed = \"right\"\n if type(left_col) is not list:\n dtype = IntervalDtype(left_col.dtype, closed)\n else:\n dtype = IntervalDtype(\"int64\", closed)\n size = len(left)\n return build_column(\n data=None,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=(left, right),\n )\n\n\ndef build_list_column(\n indices: ColumnBase,\n elements: ColumnBase,\n mask: Buffer = None,\n size: int = None,\n offset: int = 0,\n null_count: int = None,\n) -> \"cudf.core.column.ListColumn\":\n \"\"\"\n Build a ListColumn\n\n Parameters\n ----------\n indices : ColumnBase\n Column of list indices\n elements : ColumnBase\n Column of list elements\n mask: Buffer\n Null mask\n size: int, optional\n offset: int, optional\n \"\"\"\n dtype = ListDtype(element_type=elements.dtype)\n\n result = build_column(\n data=None,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=(indices, elements),\n )\n\n return cast(\"cudf.core.column.ListColumn\", result)\n\n\ndef build_struct_column(\n names: Sequence[str],\n children: Tuple[ColumnBase, ...],\n dtype: Optional[Dtype] = None,\n mask: Buffer = None,\n size: int = None,\n offset: int = 0,\n null_count: int = None,\n) -> \"cudf.core.column.StructColumn\":\n \"\"\"\n Build a StructColumn\n\n Parameters\n ----------\n names : sequence of strings\n Field names to map to children dtypes, must be strings.\n children : tuple\n\n mask: Buffer\n Null mask\n size: int, optional\n offset: int, optional\n \"\"\"\n if dtype is None:\n dtype = StructDtype(\n fields={name: col.dtype for name, col in zip(names, children)}\n )\n\n result = build_column(\n data=None,\n dtype=dtype,\n mask=mask,\n size=size,\n offset=offset,\n null_count=null_count,\n children=children,\n )\n\n return cast(\"cudf.core.column.StructColumn\", result)\n\n\ndef _make_copy_replacing_NaT_with_null(column):\n \"\"\"Return a copy with NaT values replaced with nulls.\"\"\"\n if np.issubdtype(column.dtype, np.timedelta64):\n na_value = np.timedelta64(\"NaT\", column.time_unit)\n elif np.issubdtype(column.dtype, np.datetime64):\n na_value = np.datetime64(\"NaT\", column.time_unit)\n else:\n raise ValueError(\"This type does not support replacing NaT with null.\")\n\n null = column_empty_like(column, masked=True, newsize=1)\n out_col = cudf._lib.replace.replace(\n column,\n build_column(\n Buffer(np.array([na_value], dtype=column.dtype).view(\"|u1\")),\n dtype=column.dtype,\n ),\n null,\n )\n return out_col\n\n\ndef as_column(\n arbitrary: Any,\n nan_as_null: bool = None,\n dtype: Dtype = None,\n length: int = None,\n):\n \"\"\"Create a Column from an arbitrary object\n\n Parameters\n ----------\n arbitrary : object\n Object to construct the Column from. See *Notes*.\n nan_as_null : bool, optional, default None\n If None (default), treats NaN values in arbitrary as null if there is\n no mask passed along with it. If True, combines the mask and NaNs to\n form a new validity mask. If False, leaves NaN values as is.\n dtype : optional\n Optionally typecast the constructed Column to the given\n dtype.\n length : int, optional\n If `arbitrary` is a scalar, broadcast into a Column of\n the given length.\n\n Returns\n -------\n A Column of the appropriate type and size.\n\n Notes\n -----\n Currently support inputs are:\n\n * ``Column``\n * ``Series``\n * ``Index``\n * Scalars (can be broadcasted to a specified `length`)\n * Objects exposing ``__cuda_array_interface__`` (e.g., numba device arrays)\n * Objects exposing ``__array_interface__``(e.g., numpy arrays)\n * pyarrow array\n * pandas.Categorical objects\n \"\"\"\n if isinstance(arbitrary, ColumnBase):\n if dtype is not None:\n return arbitrary.astype(dtype)\n else:\n return arbitrary\n\n elif isinstance(arbitrary, cudf.Series):\n data = arbitrary._column\n if dtype is not None:\n data = data.astype(dtype)\n elif isinstance(arbitrary, cudf.BaseIndex):\n data = arbitrary._values\n if dtype is not None:\n data = data.astype(dtype)\n\n elif hasattr(arbitrary, \"__cuda_array_interface__\"):\n desc = arbitrary.__cuda_array_interface__\n current_dtype = np.dtype(desc[\"typestr\"])\n\n arb_dtype = (\n np.dtype(\"float32\")\n if current_dtype == \"float16\"\n else cudf.dtype(current_dtype)\n )\n\n if desc.get(\"mask\", None) is not None:\n # Extract and remove the mask from arbitrary before\n # passing to cupy.asarray\n mask = _mask_from_cuda_array_interface_desc(arbitrary)\n arbitrary = SimpleNamespace(__cuda_array_interface__=desc.copy())\n arbitrary.__cuda_array_interface__[\"mask\"] = None\n desc = arbitrary.__cuda_array_interface__\n else:\n mask = None\n\n arbitrary = cupy.asarray(arbitrary)\n\n if arb_dtype != current_dtype:\n arbitrary = arbitrary.astype(arb_dtype)\n current_dtype = arb_dtype\n\n if (\n desc[\"strides\"] is not None\n and not (arbitrary.itemsize,) == arbitrary.strides\n ):\n arbitrary = cupy.ascontiguousarray(arbitrary)\n\n data = _data_from_cuda_array_interface_desc(arbitrary)\n col = build_column(data, dtype=current_dtype, mask=mask)\n\n if dtype is not None:\n col = col.astype(dtype)\n\n if isinstance(col, cudf.core.column.CategoricalColumn):\n return col\n elif np.issubdtype(col.dtype, np.floating):\n if nan_as_null or (mask is None and nan_as_null is None):\n mask = libcudf.transform.nans_to_nulls(col.fillna(np.nan))\n col = col.set_mask(mask)\n elif np.issubdtype(col.dtype, np.datetime64):\n if nan_as_null or (mask is None and nan_as_null is None):\n col = _make_copy_replacing_NaT_with_null(col)\n return col\n\n elif isinstance(arbitrary, (pa.Array, pa.ChunkedArray)):\n if isinstance(arbitrary, pa.lib.HalfFloatArray):\n raise NotImplementedError(\n \"Type casting from `float16` to `float32` is not \"\n \"yet supported in pyarrow, see: \"\n \"https://issues.apache.org/jira/browse/ARROW-3802\"\n )\n col = ColumnBase.from_arrow(arbitrary)\n\n if isinstance(arbitrary, pa.NullArray):\n new_dtype = cudf.dtype(arbitrary.type.to_pandas_dtype())\n if dtype is not None:\n # Cast the column to the `dtype` if specified.\n col = col.astype(dtype)\n elif len(arbitrary) == 0:\n # If the column is empty, it has to be\n # a `float64` dtype.\n col = col.astype(\"float64\")\n else:\n # If the null column is not empty, it has to\n # be of `object` dtype.\n col = col.astype(new_dtype)\n\n return col\n\n elif isinstance(arbitrary, (pd.Series, pd.Categorical)):\n if isinstance(arbitrary, pd.Series) and isinstance(\n arbitrary.array, pd.core.arrays.masked.BaseMaskedArray\n ):\n return as_column(arbitrary.array)\n if is_categorical_dtype(arbitrary):\n data = as_column(pa.array(arbitrary, from_pandas=True))\n elif is_interval_dtype(arbitrary.dtype):\n data = as_column(pa.array(arbitrary, from_pandas=True))\n elif arbitrary.dtype == np.bool_:\n data = as_column(cupy.asarray(arbitrary), dtype=arbitrary.dtype)\n elif arbitrary.dtype.kind in (\"f\"):\n arb_dtype = np.dtype(arbitrary.dtype)\n data = as_column(\n cupy.asarray(arbitrary, dtype=arb_dtype),\n nan_as_null=nan_as_null,\n dtype=dtype,\n )\n elif arbitrary.dtype.kind in (\"u\", \"i\"):\n data = as_column(\n cupy.asarray(arbitrary), nan_as_null=nan_as_null, dtype=dtype\n )\n else:\n pyarrow_array = pa.array(arbitrary, from_pandas=nan_as_null)\n if isinstance(pyarrow_array.type, pa.Decimal128Type):\n pyarrow_type = cudf.Decimal128Dtype.from_arrow(\n pyarrow_array.type\n )\n else:\n pyarrow_type = arbitrary.dtype\n data = as_column(pyarrow_array, dtype=pyarrow_type)\n if dtype is not None:\n data = data.astype(dtype)\n\n elif isinstance(arbitrary, (pd.Timestamp, pd.Timedelta)):\n # This will always treat NaTs as nulls since it's not technically a\n # discrete value like NaN\n data = as_column(pa.array(pd.Series([arbitrary]), from_pandas=True))\n if dtype is not None:\n data = data.astype(dtype)\n\n elif np.isscalar(arbitrary) and not isinstance(arbitrary, memoryview):\n length = length or 1\n if (\n (nan_as_null is True)\n and isinstance(arbitrary, (np.floating, float))\n and np.isnan(arbitrary)\n ):\n arbitrary = None\n if dtype is None:\n dtype = cudf.dtype(\"float64\")\n\n data = as_column(\n utils.scalar_broadcast_to(arbitrary, length, dtype=dtype)\n )\n if not nan_as_null and not is_decimal_dtype(data.dtype):\n if np.issubdtype(data.dtype, np.floating):\n data = data.fillna(np.nan)\n elif np.issubdtype(data.dtype, np.datetime64):\n data = data.fillna(np.datetime64(\"NaT\"))\n\n elif hasattr(arbitrary, \"__array_interface__\"):\n # CUDF assumes values are always contiguous\n desc = arbitrary.__array_interface__\n shape = desc[\"shape\"]\n arb_dtype = np.dtype(desc[\"typestr\"])\n # CUDF assumes values are always contiguous\n if len(shape) > 1:\n raise ValueError(\"Data must be 1-dimensional\")\n\n arbitrary = np.asarray(arbitrary)\n\n # Handle case that `arbitrary` elements are cupy arrays\n if (\n shape\n and shape[0]\n and hasattr(arbitrary[0], \"__cuda_array_interface__\")\n ):\n return as_column(\n cupy.asarray(arbitrary, dtype=arbitrary[0].dtype),\n nan_as_null=nan_as_null,\n dtype=dtype,\n length=length,\n )\n\n if not arbitrary.flags[\"C_CONTIGUOUS\"]:\n arbitrary = np.ascontiguousarray(arbitrary)\n\n if dtype is not None:\n arbitrary = arbitrary.astype(np.dtype(dtype))\n\n if arb_dtype.kind == \"M\":\n\n time_unit = get_time_unit(arbitrary)\n cast_dtype = time_unit in (\"D\", \"W\", \"M\", \"Y\")\n\n if cast_dtype:\n arbitrary = arbitrary.astype(cudf.dtype(\"datetime64[s]\"))\n\n buffer = Buffer(arbitrary.view(\"|u1\"))\n mask = None\n if nan_as_null is None or nan_as_null is True:\n data = build_column(buffer, dtype=arbitrary.dtype)\n data = _make_copy_replacing_NaT_with_null(data)\n mask = data.mask\n\n data = cudf.core.column.datetime.DatetimeColumn(\n data=buffer, mask=mask, dtype=arbitrary.dtype\n )\n elif arb_dtype.kind == \"m\":\n\n time_unit = get_time_unit(arbitrary)\n cast_dtype = time_unit in (\"D\", \"W\", \"M\", \"Y\")\n\n if cast_dtype:\n arbitrary = arbitrary.astype(cudf.dtype(\"timedelta64[s]\"))\n\n buffer = Buffer(arbitrary.view(\"|u1\"))\n mask = None\n if nan_as_null is None or nan_as_null is True:\n data = build_column(buffer, dtype=arbitrary.dtype)\n data = _make_copy_replacing_NaT_with_null(data)\n mask = data.mask\n\n data = cudf.core.column.timedelta.TimeDeltaColumn(\n data=buffer,\n size=len(arbitrary),\n mask=mask,\n dtype=arbitrary.dtype,\n )\n elif (\n arbitrary.size != 0\n and arb_dtype.kind in (\"O\")\n and isinstance(arbitrary[0], pd._libs.interval.Interval)\n ):\n # changing from pd array to series,possible arrow bug\n interval_series = pd.Series(arbitrary)\n data = as_column(\n pa.Array.from_pandas(interval_series), dtype=arbitrary.dtype,\n )\n if dtype is not None:\n data = data.astype(dtype)\n elif arb_dtype.kind in (\"O\", \"U\"):\n data = as_column(\n pa.Array.from_pandas(arbitrary), dtype=arbitrary.dtype\n )\n # There is no cast operation available for pa.Array from int to\n # str, Hence instead of handling in pa.Array block, we\n # will have to type-cast here.\n if dtype is not None:\n data = data.astype(dtype)\n elif arb_dtype.kind in (\"f\"):\n if arb_dtype == np.dtype(\"float16\"):\n arb_dtype = np.dtype(\"float32\")\n arb_dtype = cudf.dtype(arb_dtype if dtype is None else dtype)\n data = as_column(\n cupy.asarray(arbitrary, dtype=arb_dtype),\n nan_as_null=nan_as_null,\n )\n else:\n data = as_column(cupy.asarray(arbitrary), nan_as_null=nan_as_null)\n\n elif isinstance(arbitrary, pd.core.arrays.numpy_.PandasArray):\n if is_categorical_dtype(arbitrary.dtype):\n arb_dtype = arbitrary.dtype\n else:\n if arbitrary.dtype == pd.StringDtype():\n arb_dtype = cudf.dtype(\"O\")\n else:\n arb_dtype = (\n cudf.dtype(\"float32\")\n if arbitrary.dtype == \"float16\"\n else cudf.dtype(arbitrary.dtype)\n )\n if arb_dtype != arbitrary.dtype.numpy_dtype:\n arbitrary = arbitrary.astype(arb_dtype)\n if (\n arbitrary.size != 0\n and isinstance(arbitrary[0], pd._libs.interval.Interval)\n and arb_dtype.kind in (\"O\")\n ):\n # changing from pd array to series,possible arrow bug\n interval_series = pd.Series(arbitrary)\n data = as_column(\n pa.Array.from_pandas(interval_series), dtype=arb_dtype\n )\n elif arb_dtype.kind in (\"O\", \"U\"):\n data = as_column(pa.Array.from_pandas(arbitrary), dtype=arb_dtype)\n else:\n data = as_column(\n pa.array(\n arbitrary,\n from_pandas=True if nan_as_null is None else nan_as_null,\n ),\n nan_as_null=nan_as_null,\n )\n if dtype is not None:\n data = data.astype(dtype)\n elif isinstance(arbitrary, memoryview):\n data = as_column(\n np.asarray(arbitrary), dtype=dtype, nan_as_null=nan_as_null\n )\n elif isinstance(arbitrary, cudf.Scalar):\n data = ColumnBase.from_scalar(arbitrary, length if length else 1)\n elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray):\n cudf_dtype = arbitrary._data.dtype\n\n data = Buffer(arbitrary._data.view(\"|u1\"))\n data = build_column(data, dtype=cudf_dtype)\n\n mask = arbitrary._mask\n mask = bools_to_mask(as_column(mask).unary_operator(\"not\"))\n\n data = data.set_mask(mask)\n else:\n try:\n data = as_column(\n memoryview(arbitrary), dtype=dtype, nan_as_null=nan_as_null\n )\n except TypeError:\n if dtype is not None:\n # Arrow throws a type error if the input is of\n # mixed-precision and cannot fit into the provided\n # decimal type properly, see:\n # https://github.com/apache/arrow/pull/9948\n # Hence we should let the exception propagate to\n # the user.\n if isinstance(dtype, cudf.core.dtypes.Decimal128Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal128Column.from_arrow(data)\n elif isinstance(dtype, cudf.core.dtypes.Decimal64Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal64Column.from_arrow(data)\n elif isinstance(dtype, cudf.core.dtypes.Decimal32Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal32Column.from_arrow(data)\n\n pa_type = None\n np_type = None\n try:\n if dtype is not None:\n if is_categorical_dtype(dtype) or is_interval_dtype(dtype):\n raise TypeError\n if is_list_dtype(dtype):\n data = pa.array(arbitrary)\n if type(data) not in (pa.ListArray, pa.NullArray):\n raise ValueError(\n \"Cannot create list column from given data\"\n )\n return as_column(data, nan_as_null=nan_as_null)\n elif isinstance(\n dtype, cudf.StructDtype\n ) and not isinstance(dtype, cudf.IntervalDtype):\n data = pa.array(arbitrary, type=dtype.to_arrow())\n return as_column(data, nan_as_null=nan_as_null)\n elif isinstance(dtype, cudf.core.dtypes.Decimal128Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal128Column.from_arrow(\n data\n )\n elif isinstance(dtype, cudf.core.dtypes.Decimal64Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal64Column.from_arrow(\n data\n )\n elif isinstance(dtype, cudf.core.dtypes.Decimal32Dtype):\n data = pa.array(\n arbitrary,\n type=pa.decimal128(\n precision=dtype.precision, scale=dtype.scale\n ),\n )\n return cudf.core.column.Decimal32Column.from_arrow(\n data\n )\n\n if is_bool_dtype(dtype):\n # Need this special case handling for bool dtypes,\n # since 'boolean' & 'pd.BooleanDtype' are not\n # understood by np.dtype below.\n dtype = \"bool\"\n np_type = np.dtype(dtype).type\n pa_type = np_to_pa_dtype(np.dtype(dtype))\n data = as_column(\n pa.array(\n arbitrary,\n type=pa_type,\n from_pandas=True\n if nan_as_null is None\n else nan_as_null,\n ),\n dtype=dtype,\n nan_as_null=nan_as_null,\n )\n except (pa.ArrowInvalid, pa.ArrowTypeError, TypeError):\n if is_categorical_dtype(dtype):\n sr = pd.Series(arbitrary, dtype=\"category\")\n data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)\n elif np_type == np.str_:\n sr = pd.Series(arbitrary, dtype=\"str\")\n data = as_column(sr, nan_as_null=nan_as_null)\n elif is_interval_dtype(dtype):\n sr = pd.Series(arbitrary, dtype=\"interval\")\n data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)\n elif (\n isinstance(arbitrary, Sequence)\n and len(arbitrary) > 0\n and any(\n cudf.utils.dtypes.is_column_like(arb)\n for arb in arbitrary\n )\n ):\n return cudf.core.column.ListColumn.from_sequences(\n arbitrary\n )\n else:\n data = as_column(\n _construct_array(arbitrary, dtype),\n dtype=dtype,\n nan_as_null=nan_as_null,\n )\n return data\n\n\ndef _construct_array(\n arbitrary: Any, dtype: Optional[Dtype]\n) -> Union[np.ndarray, cupy.ndarray]:\n \"\"\"\n Construct a CuPy or NumPy array from `arbitrary`\n \"\"\"\n try:\n dtype = dtype if dtype is None else cudf.dtype(dtype)\n arbitrary = cupy.asarray(arbitrary, dtype=dtype)\n except (TypeError, ValueError):\n native_dtype = dtype\n if (\n dtype is None\n and not cudf._lib.scalar._is_null_host_scalar(arbitrary)\n and infer_dtype(arbitrary) in (\"mixed\", \"mixed-integer\",)\n ):\n native_dtype = \"object\"\n arbitrary = np.asarray(\n arbitrary,\n dtype=native_dtype\n if native_dtype is None\n else np.dtype(native_dtype),\n )\n return arbitrary\n\n\ndef _data_from_cuda_array_interface_desc(obj) -> Buffer:\n desc = obj.__cuda_array_interface__\n ptr = desc[\"data\"][0]\n nelem = desc[\"shape\"][0] if len(desc[\"shape\"]) > 0 else 1\n dtype = cudf.dtype(desc[\"typestr\"])\n\n data = Buffer(data=ptr, size=nelem * dtype.itemsize, owner=obj)\n return data\n\n\ndef _mask_from_cuda_array_interface_desc(obj) -> Union[Buffer, None]:\n desc = obj.__cuda_array_interface__\n mask = desc.get(\"mask\", None)\n\n if mask is not None:\n desc = mask.__cuda_array_interface__\n ptr = desc[\"data\"][0]\n nelem = desc[\"shape\"][0]\n typestr = desc[\"typestr\"]\n typecode = typestr[1]\n if typecode == \"t\":\n mask_size = bitmask_allocation_size_bytes(nelem)\n mask = Buffer(data=ptr, size=mask_size, owner=obj)\n elif typecode == \"b\":\n col = as_column(mask)\n mask = bools_to_mask(col)\n else:\n raise NotImplementedError(\n f\"Cannot infer mask from typestr {typestr}\"\n )\n return mask\n\n\ndef serialize_columns(columns) -> Tuple[List[dict], List]:\n \"\"\"\n Return the headers and frames resulting\n from serializing a list of Column\n Parameters\n ----------\n columns : list\n list of Columns to serialize\n Returns\n -------\n headers : list\n list of header metadata for each Column\n frames : list\n list of frames\n \"\"\"\n headers: List[Dict[Any, Any]] = []\n frames = []\n\n if len(columns) > 0:\n header_columns = [c.serialize() for c in columns]\n headers, column_frames = zip(*header_columns)\n for f in column_frames:\n frames.extend(f)\n\n return headers, frames\n\n\ndef deserialize_columns(headers: List[dict], frames: List) -> List[ColumnBase]:\n \"\"\"\n Construct a list of Columns from a list of headers\n and frames.\n \"\"\"\n columns = []\n\n for meta in headers:\n col_frame_count = meta[\"frame_count\"]\n col_typ = pickle.loads(meta[\"type-serialized\"])\n colobj = col_typ.deserialize(meta, frames[:col_frame_count])\n columns.append(colobj)\n # Advance frames\n frames = frames[col_frame_count:]\n\n return columns\n\n\ndef arange(\n start: Union[int, float],\n stop: Union[int, float] = None,\n step: Union[int, float] = 1,\n dtype=None,\n) -> cudf.core.column.NumericalColumn:\n \"\"\"\n Returns a column with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop).\n The first three arguments are mapped like the range built-in function,\n i.e. start and step are optional.\n\n Parameters\n ----------\n start : int/float\n Start of the interval.\n stop : int/float, default is None\n Stop of the interval.\n step : int/float, default 1\n Step width between each pair of consecutive values.\n dtype : default None\n Data type specifier. It is inferred from other arguments by default.\n\n Returns\n -------\n cudf.core.column.NumericalColumn\n\n Examples\n --------\n >>> import cudf\n >>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')\n >>> col\n <cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>\n >>> cudf.Series(col)\n 0 2\n 1 3\n 2 4\n 3 5\n 4 6\n dtype: int16\n \"\"\"\n if stop is None:\n stop = start\n start = 0\n\n if step is None:\n step = 1\n\n size = len(range(int(start), int(stop), int(step)))\n if size == 0:\n return as_column([], dtype=dtype)\n\n return libcudf.filling.sequence(\n size,\n as_device_scalar(start, dtype=dtype),\n as_device_scalar(step, dtype=dtype),\n )\n\n\ndef full(size: int, fill_value: ScalarLike, dtype: Dtype = None) -> ColumnBase:\n \"\"\"\n Returns a column of given size and dtype, filled with a given value.\n\n Parameters\n ----------\n size : int\n size of the expected column.\n fill_value : scalar\n A scalar value to fill a new array.\n dtype : default None\n Data type specifier. It is inferred from other arguments by default.\n\n Returns\n -------\n Column\n\n Examples\n --------\n >>> import cudf\n >>> col = cudf.core.column.full(size=5, fill_value=7, dtype='int8')\n >>> col\n <cudf.core.column.numerical.NumericalColumn object at 0x7fa0912e8b90>\n >>> cudf.Series(col)\n 0 7\n 1 7\n 2 7\n 3 7\n 4 7\n dtype: int8\n \"\"\"\n return ColumnBase.from_scalar(cudf.Scalar(fill_value, dtype), size)\n\n\ndef concat_columns(objs: \"MutableSequence[ColumnBase]\") -> ColumnBase:\n \"\"\"Concatenate a sequence of columns.\"\"\"\n if len(objs) == 0:\n dtype = cudf.dtype(None)\n return column_empty(0, dtype=dtype, masked=True)\n\n # If all columns are `NumericalColumn` with different dtypes,\n # we cast them to a common dtype.\n # Notice, we can always cast pure null columns\n not_null_col_dtypes = [o.dtype for o in objs if o.valid_count]\n if len(not_null_col_dtypes) and all(\n _is_non_decimal_numeric_dtype(dtyp)\n and np.issubdtype(dtyp, np.datetime64)\n for dtyp in not_null_col_dtypes\n ):\n # Use NumPy to find a common dtype\n common_dtype = np.find_common_type(not_null_col_dtypes, [])\n # Cast all columns to the common dtype\n objs = [obj.astype(common_dtype) for obj in objs]\n\n # Find the first non-null column:\n head = next((obj for obj in objs if obj.valid_count), objs[0])\n\n for i, obj in enumerate(objs):\n # Check that all columns are the same type:\n if not is_dtype_equal(obj.dtype, head.dtype):\n # if all null, cast to appropriate dtype\n if obj.valid_count == 0:\n objs[i] = column_empty_like(\n head, dtype=head.dtype, masked=True, newsize=len(obj)\n )\n else:\n raise ValueError(\"All columns must be the same type\")\n\n # TODO: This logic should be generalized to a dispatch to\n # ColumnBase._concat so that all subclasses can override necessary\n # behavior. However, at the moment it's not clear what that API should look\n # like, so CategoricalColumn simply implements a minimal working API.\n if all(is_categorical_dtype(o.dtype) for o in objs):\n return cudf.core.column.categorical.CategoricalColumn._concat(\n cast(\n MutableSequence[\n cudf.core.column.categorical.CategoricalColumn\n ],\n objs,\n )\n )\n\n newsize = sum(map(len, objs))\n if newsize > libcudf.MAX_COLUMN_SIZE:\n raise MemoryError(\n f\"Result of concat cannot have \"\n f\"size > {libcudf.MAX_COLUMN_SIZE_STR}\"\n )\n elif newsize == 0:\n col = column_empty(0, head.dtype, masked=True)\n else:\n # Filter out inputs that have 0 length, then concatenate.\n objs = [o for o in objs if len(o)]\n try:\n col = libcudf.concat.concat_columns(objs)\n except RuntimeError as e:\n if \"exceeds size_type range\" in str(e):\n raise OverflowError(\n \"total size of output is too large for a cudf column\"\n ) from e\n raise\n return col\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.find_common_type", "numpy.asarray", "pandas.StringDtype", "numpy.ascontiguousarray", "numpy.isscalar", "numpy.timedelta64", "pandas.Series", "numpy.int32", "numpy.issubdtype", "numpy.dtype", "numpy.datetime64" ] ]
roycek7/machine_learning
[ "4589a590b9e87d37976cd0bcf9626d085735132d" ]
[ "K Nearest Neighbour/knn.py" ]
[ "\"\"\"\n@author: roycek\n\"\"\"\n\nimport math\n\nimport numpy as np\nimport pylab\nfrom sklearn.model_selection import train_test_split\n\nlabel = 1\nk = 100\nfeatures = 11\n\n\ndef normalize_data(data):\n return (data - np.min(data)) / (np.max(data) - np.min(data))\n\n\ndef euclidean_distance(test_data_point, training_data_point):\n return math.sqrt(sum([(a - b) ** 2 for a, b in zip(test_data_point, training_data_point)]))\n\n\ndef accuracy_metric(actual, predicted, correct=0):\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / len(actual) * 100.0\n\n\ndef sort_tuple(tup, y):\n tup.sort(key=lambda x: x[y])\n return tup\n\n\ndef predict_test_data(k_th):\n predicted_labels = []\n for i in range(len(x_test)):\n distance = []\n for j in range(len(x_train)):\n eu_distance = euclidean_distance(x_test[i], x_train[j])\n distance.append((eu_distance, y_train[j]))\n minimum_distant_neighbours = sort_tuple(distance, 0)[:k_th]\n nearest_neighbours = []\n for j in minimum_distant_neighbours:\n nearest_neighbours.append(j[label])\n predicted_labels.append(max(nearest_neighbours, key=nearest_neighbours.count))\n return predicted_labels\n\n\nnumpy_data = np.loadtxt(\"glass.data\", delimiter=\",\", usecols=range(1, features))\ny_label = [i[-label] for i in numpy_data]\nscaled_data = normalize_data(numpy_data)\ntraining_data, test_data, y_train, y_test = train_test_split(scaled_data, y_label, train_size=0.8, test_size=0.2,\n shuffle=True)\nx_train = training_data[:, :-label]\nx_test = test_data[:, :-label]\n\npylab.plot(range(k - label), [accuracy_metric(y_test, predict_test_data(i)) for i in range(1, k)])\npylab.xlabel('k')\npylab.ylabel('Accuracy')\npylab.show()\n" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.max", "numpy.min" ] ]
xwang233/code-snippe
[ "2c053c85c6d73a32cc4b8e92e55ef8d0d780d578" ]
[ "pooling-bench-68682/c.py" ]
[ "import torch.utils.benchmark as benchmark\nimport glob\nimport pickle\n\nres = []\n\nfor pkl in sorted(glob.glob('./*.pkl')):\n with open(pkl, 'rb') as f:\n res += pickle.load(f)\n\ncompare = benchmark.Compare(res)\ncompare.print()\n" ]
[ [ "torch.utils.benchmark.Compare" ] ]
MobleyLab/quanformer
[ "d2e00309101a962d18f36c7966f20a78d62f349d" ]
[ "quanformer/tests/test_basic_plot.py" ]
[ "\"\"\"\ntest_basic_plot.py\n\"\"\"\nimport sys\nimport os\nimport pytest\nimport shutil\n\n# define location of input files for testing\nmydir = os.path.dirname(os.path.abspath(__file__))\n\n# import functions to aid testing\nsys.path.append(os.path.join(os.path.dirname(__file__), 'helpers'))\nfrom helper import *\n\nfrom quanformer.basic_plot import *\n\nimport matplotlib as mpl\nmpl.use(\"Agg\") # for Mac OS X error of NSInvalidArgumentException on Travis CI\n\n# -----------------------\n\n\ndef test_basic_plot():\n basic_plot(os.path.join(mydir, 'data_tests', 'survey_confs', 'divrefine-220.sdf'),\n tag='QM Psi4 Final Opt. Energy (Har) b3lyp-d3mbj/def2-tzvp',\n style='line',\n take_relative=True,\n har_to_kcal=True)\n os.remove('output_0.png')\n os.remove('output_1.png')\n\ndef test_combine_files_plot():\n os.chdir(mydir)\n combine_files_plot(os.path.join(mydir, 'data_tests', 'survey_confs', 'stitch_ene.in'),\n molname='Div_6',\n verbose=True,\n take_relative=True,\n har_to_kcal=True)\n os.remove('combined.dat')\n os.remove('combined.png')\n\n\n# test manually without pytest\nif 0:\n sys.path.insert(0, '/home/limvt/Documents/quanformer/quanformer')\n from basic_plot import *\n test_basic_plot()\n" ]
[ [ "matplotlib.use" ] ]
zhakguder/puzzlesolver
[ "6516a446065f84031607b8a97a566784d25fa21f" ]
[ "tests/test_imageprocess.py" ]
[ "\"\"\"Tests for image processing\"\"\"\n\nimport unittest\nfrom os import path\n\nimport cv2\nimport numpy as np\n\nfrom puzzlesolver.imageprocess import chaincode, fileops\nfrom puzzlesolver.utils import get_project_root\n\n\nclass TestImageProcess(unittest.TestCase):\n \"\"\"Tests for imageprocess\"\"\"\n\n def setUp(self):\n self.project_root = get_project_root()\n self.img_path = path.join(self.project_root, \"assets\", \"Castle.png\")\n self.threshold = 254\n self.dog_image = path.join(self.project_root, \"data/train/dog.1526.jpg\")\n\n def test_contours_to_chaincodes_returns_chaincodes(self):\n \"\"\"Test that a chaincode is returned for every contour\"\"\"\n test_contours = chaincode._contour(self.img_path, threshold=self.threshold)\n test_chaincodes = chaincode._contours_to_chaincodes(self.img_path)\n self.assertEqual(len(test_contours), len(test_chaincodes))\n\n def test_contour_to_chaincode_returns_chaincode(self):\n \"\"\"Test that a chaincode is returned for a single contour\"\"\"\n test_contour = chaincode._contour(self.img_path, threshold=self.threshold)\n test_chaincode = chaincode._contour_to_chaincode(test_contour[0])\n self.assertLess(max(test_chaincode), 9)\n self.assertGreater(min(test_chaincode), 0)\n\n def test_contour_returns_something(self):\n test_contour = chaincode._contour(self.img_path, threshold=self.threshold)\n self.assertIsNotNone(test_contour)\n\n @unittest.expectedFailure\n def test_can_cut_image(self):\n upper, lower = fileops.cut_image(self.dog_image)\n self.assertIsNotNone(upper)\n self.assertIsNotNone(lower)\n\n def test_can_set_mask(self):\n matrix = np.ones((10, 10))\n n_all_entries = np.sum(matrix)\n row_range = range(7, 10)\n col_range = range(3, 6)\n matrix = fileops._set_zeros(matrix, row_range, col_range)\n n_zeros = len(row_range) * len(col_range)\n self.assertEqual(np.sum(matrix), n_all_entries - n_zeros)\n\n def test_can_cut_image(self):\n upper, lower = fileops.cut_image(self.dog_image)\n cv2.imwrite(\"upper.png\", upper)\n cv2.imwrite(\"lower.png\", lower)\n" ]
[ [ "numpy.sum", "numpy.ones" ] ]
zoumo4913/DRAMsim3
[ "79a6aa3038e1a6da749fe1fb041a4564555e5a0a" ]
[ "scripts/plot_stats.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nGenerate time series graphs of power/bandwidth/energy...\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef extract_epoch_data(json_data, label, merge_channel=True):\n \"\"\"\n TODO enable merge_channel=False option later\n \"\"\"\n if merge_channel:\n merged_data = {}\n for line in json_data:\n epoch_num = line[\"epoch_num\"]\n if epoch_num in merged_data:\n merged_data[epoch_num] += line[label]\n else:\n merged_data[epoch_num] = line[label]\n return [v for (k, v) in sorted(merged_data.items(),\n key=lambda t: t[0])]\n\n\ndef plot_epochs(json_data, label, unit=\"\", output=None):\n \"\"\"\n plot the time series of a specified stat serie (e.g. bw, power, etc)\n \"\"\"\n print('ploting {}'.format(label))\n cycles_per_epoch = json_data[0]['num_cycles']\n y_data = extract_epoch_data(json_data, label)\n x_ticks = [i * cycles_per_epoch for i in range(len(y_data))]\n\n plt.plot(x_ticks, y_data)\n\n plt.title(label)\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n plt.xlabel('Cycles')\n plt.ylabel('{} ({})'.format(label, unit))\n plt.ylim(bottom=0, top=1.1*max(y_data))\n if output:\n plt.savefig(output+'_epochs_{}.pdf'.format(label))\n plt.clf()\n else:\n plt.show()\n return\n\n\ndef extract_histo_data(data, label):\n array = []\n for chan, channel_data in data.items():\n for key, count in channel_data[label].items():\n val = int(key)\n array.extend([val for _ in range(count)])\n return array\n\n\ndef plot_histogram(json_data, label, unit='', output=None):\n histo_data = extract_histo_data(json_data, label)\n histo_data = sorted(histo_data)\n total_cnt = len(histo_data)\n existing_nums = set()\n unique_vals = 0\n for i in range(int(0.90 * total_cnt)):\n if histo_data[i] in existing_nums:\n continue\n else:\n existing_nums.add(histo_data[i])\n unique_vals += 1\n print('90-Percentile unique {} values: {}'.format(label, unique_vals))\n x_min = min(histo_data)\n x_max = max(histo_data)\n x_99 = int(0.99 * len(histo_data))\n mark_99 = histo_data[x_99]\n avg = np.average(histo_data)\n histo_data = histo_data[0:x_99]\n \n # doane seems to provide better esitmates for bins\n plt.hist(histo_data, bins='doane', density=True)\n\n line_avg = plt.axvline(x=avg, linestyle='--', c='g',\n label='Average:{0:.1f}'.format(avg))\n line_99 = plt.axvline(x=mark_99, linestyle='-.', c='r',\n label='99 Percentile:{0:.1f}'.format(mark_99))\n plt.title(label)\n plt.xlabel(label + ' [max: ' + str(x_max) + '](' + unit + ')')\n plt.ylabel('Density')\n plt.legend(handles=[line_avg, line_99])\n if output:\n plt.savefig(output+'_histo_{}.pdf'.format(label))\n plt.clf()\n else:\n plt.show()\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Plot time serie graphs from '\n 'stats outputs, type -h for more options')\n parser.add_argument('json', help='stats json file')\n parser.add_argument('-d', '--dir', help='output dir', default='.')\n parser.add_argument('-o', '--output',\n help='output name (withouth extension name)',\n default='dramsim')\n parser.add_argument('-k', '--key',\n help='plot a specific key name in epoch stats, '\n 'use the name in JSON')\n args = parser.parse_args()\n\n with open(args.json, 'r') as j_file:\n is_epoch = False\n try:\n j_data = json.load(j_file)\n except:\n print('cannot load file ' + args.json)\n exit(1)\n if isinstance(j_data, list):\n is_epoch = True\n else:\n is_epoch = False\n\n prefix = os.path.join(args.dir, args.output)\n if is_epoch:\n data_units = {'average_bandwidth': 'GB/s',\n 'average_power': 'mW',\n 'average_read_latency': 'cycles'}\n if args.key:\n data_units[args.key] = ''\n for label, unit in data_units.items():\n plot_epochs(j_data, label, unit, prefix)\n else:\n data_units = {'read_latency': 'cycles',\n 'write_latency': 'cycles',\n 'interarrival_latency': 'cycles'}\n for label, unit in data_units.items():\n plot_histogram(j_data, label, unit, prefix)\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.average", "matplotlib.pyplot.clf", "matplotlib.pyplot.ticklabel_format" ] ]
zhaoxin94/OVANet
[ "16c660cc8e6b384528f9e28d79ebc1ee7a2255ea" ]
[ "train.py" ]
[ "from __future__ import print_function\nimport yaml\nimport easydict\nimport os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom apex import amp, optimizers\nfrom utils.utils import log_set, save_model\nfrom utils.loss import ova_loss, open_entropy\nfrom utils.lr_schedule import inv_lr_scheduler\nfrom utils.defaults import get_dataloaders, get_models\nfrom eval import test\nimport argparse\nimport random\nimport numpy as np\n\nparser = argparse.ArgumentParser(\n description='Pytorch OVANet',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--config',\n type=str,\n default='config.yaml',\n help='/path/to/config/file')\n\nparser.add_argument('--source_data',\n type=str,\n default='./utils/source_list.txt',\n help='path to source list')\nparser.add_argument('--target_data',\n type=str,\n default='./utils/target_list.txt',\n help='path to target list')\nparser.add_argument('--log-interval',\n type=int,\n default=100,\n help='how many batches before logging training status')\nparser.add_argument('--exp_name',\n type=str,\n default='office',\n help='/path/to/config/file')\nparser.add_argument('--network',\n type=str,\n default='resnet50',\n help='network name')\nparser.add_argument(\"--gpu_devices\",\n type=int,\n nargs='+',\n default=None,\n help=\"\")\nparser.add_argument(\"--no_adapt\", default=False, action='store_true')\nparser.add_argument(\"--save_model\", default=False, action='store_true')\nparser.add_argument(\"--save_path\",\n type=str,\n default=\"record/ova_model\",\n help='/path/to/save/model')\nparser.add_argument('--multi',\n type=float,\n default=0.1,\n help='weight factor for adaptation')\nparser.add_argument(\"--seed\",\n type=int,\n default=-1,\n help=\"only positive value enables a fixed seed\")\nparser.add_argument(\"--output-dir\",\n type=str,\n default=\"\",\n help=\"output directory\")\nargs = parser.parse_args()\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\n# set seed\nif args.seed >= 0:\n print(\"Setting fixed seed: {}\".format(args.seed))\n set_random_seed(args.seed)\n\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.detrministic = True\n\nconfig_file = args.config\nconf = yaml.safe_load(open(config_file))\nsave_config = yaml.safe_load(open(config_file))\nconf = easydict.EasyDict(conf)\ngpu_devices = ','.join([str(id) for id in args.gpu_devices])\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_devices\nargs.cuda = torch.cuda.is_available()\n\nsource_data = args.source_data\ntarget_data = args.target_data\nevaluation_data = args.target_data\nnetwork = args.network\nuse_gpu = torch.cuda.is_available()\nn_share = conf.data.dataset.n_share\nn_source_private = conf.data.dataset.n_source_private\nn_total = conf.data.dataset.n_total\nopen = n_total - n_share - n_source_private > 0\nnum_class = n_share + n_source_private\nscript_name = os.path.basename(__file__)\n\ninputs = vars(args)\ninputs[\"evaluation_data\"] = evaluation_data\ninputs[\"conf\"] = conf\ninputs[\"script_name\"] = script_name\ninputs[\"num_class\"] = num_class\ninputs[\"config_file\"] = config_file\n\nsource_loader, target_loader, \\\ntest_loader, target_folder = get_dataloaders(inputs)\n\nlogname = log_set(inputs)\n\nG, C1, C2, opt_g, opt_c, \\\nparam_lr_g, param_lr_c = get_models(inputs)\nndata = target_folder.__len__()\n\n\ndef train():\n criterion = nn.CrossEntropyLoss().cuda()\n print('train start!')\n data_iter_s = iter(source_loader)\n data_iter_t = iter(target_loader)\n len_train_source = len(source_loader)\n len_train_target = len(target_loader)\n for step in range(conf.train.min_step + 1):\n G.train()\n C1.train()\n C2.train()\n if step % len_train_target == 0:\n data_iter_t = iter(target_loader)\n if step % len_train_source == 0:\n data_iter_s = iter(source_loader)\n data_t = next(data_iter_t)\n data_s = next(data_iter_s)\n inv_lr_scheduler(param_lr_g,\n opt_g,\n step,\n init_lr=conf.train.lr,\n max_iter=conf.train.min_step)\n inv_lr_scheduler(param_lr_c,\n opt_c,\n step,\n init_lr=conf.train.lr,\n max_iter=conf.train.min_step)\n img_s = data_s[0]\n label_s = data_s[1]\n img_t = data_t[0]\n img_s, label_s = Variable(img_s.cuda()), \\\n Variable(label_s.cuda())\n img_t = Variable(img_t.cuda())\n opt_g.zero_grad()\n opt_c.zero_grad()\n C2.module.weight_norm()\n\n ## Source loss calculation\n feat = G(img_s)\n out_s = C1(feat)\n out_open = C2(feat)\n ## source classification loss\n loss_s = criterion(out_s, label_s)\n ## open set loss for source\n out_open = out_open.view(out_s.size(0), 2, -1)\n open_loss_pos, open_loss_neg = ova_loss(out_open, label_s)\n ## b x 2 x C\n loss_open = 0.5 * (open_loss_pos + open_loss_neg)\n ## open set loss for target\n all = loss_s + loss_open\n log_string = 'Train {}/{} \\t ' \\\n 'Loss Source: {:.4f} ' \\\n 'Loss Open: {:.4f} ' \\\n 'Loss Open Source Positive: {:.4f} ' \\\n 'Loss Open Source Negative: {:.4f} '\n log_values = [\n step, conf.train.min_step,\n loss_s.item(),\n loss_open.item(),\n open_loss_pos.item(),\n open_loss_neg.item()\n ]\n if not args.no_adapt:\n feat_t = G(img_t)\n out_open_t = C2(feat_t)\n out_open_t = out_open_t.view(img_t.size(0), 2, -1)\n ent_open = open_entropy(out_open_t)\n all += args.multi * ent_open\n log_values.append(ent_open.item())\n log_string += \"Loss Open Target: {:.6f} \"\n\n # zhaoxin add\n lr = opt_c.param_groups[0][\"lr\"]\n log_string += \"learning rate: {:.4f}\"\n log_values.append(lr)\n with amp.scale_loss(all, [opt_g, opt_c]) as scaled_loss:\n scaled_loss.backward()\n opt_g.step()\n opt_c.step()\n opt_g.zero_grad()\n opt_c.zero_grad()\n if step % conf.train.log_interval == 0:\n print(log_string.format(*log_values))\n if step > 0 and step % conf.test.test_interval == 0:\n acc_o, h_score = test(step,\n test_loader,\n logname,\n n_share,\n G, [C1, C2],\n open=open)\n print(\"acc all %s h_score %s \" % (acc_o, h_score))\n G.train()\n C1.train()\n if args.save_model:\n save_path = \"%s_%s.pth\" % (args.save_path, step)\n save_model(G, C1, C2, save_path)\n\n\ntrain()\n" ]
[ [ "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.manual_seed", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss" ] ]
zjstillman/RAISRFT
[ "fbef07fb7d42fbd1ffc3c2efb678f7570347a59c" ]
[ "FFT_Crop_Abs_Ver/hashkey.py" ]
[ "import numpy as np\nfrom math import atan2, floor, pi\n# from numba import jit\n\n# @jit\ndef hashkey(block, Qangle, W):\n # Calculate gradient\n gy, gx = np.gradient(block)\n\n # Transform 2D matrix into 1D array\n gx = gx.ravel()\n gy = gy.ravel()\n\n # SVD calculation\n G = np.vstack((gx,gy)).T\n GTWG = G.T.dot(W).dot(G)\n w, v = np.linalg.eig(GTWG);\n\n # Make sure V and D contain only real numbers\n nonzerow = np.count_nonzero(np.isreal(w))\n nonzerov = np.count_nonzero(np.isreal(v))\n if nonzerow != 0:\n w = np.real(w)\n if nonzerov != 0:\n v = np.real(v)\n\n # Sort w and v according to the descending order of w\n idx = w.argsort()[::-1]\n w = w[idx]\n v = v[:,idx]\n\n # Calculate theta\n theta = atan2(v[1,0], v[0,0])\n if theta < 0:\n theta = theta + pi\n # if theta > pi/2:\n # print(theta)\n # Calculate lamda\n lamda = w[0]\n\n # Calculate u\n sqrtlamda1 = np.sqrt(w[0])\n sqrtlamda2 = np.sqrt(w[1])\n if sqrtlamda1 + sqrtlamda2 == 0:\n u = 0\n else:\n u = (sqrtlamda1 - sqrtlamda2)/(sqrtlamda1 + sqrtlamda2)\n\n # Quantize\n angle = floor(theta/pi*Qangle)\n if lamda < 0.0001:\n strength = 0\n elif lamda > 0.001:\n strength = 2\n else:\n strength = 1\n # strength = 0\n if u < 0.25:\n coherence = 0\n elif u > 0.5:\n coherence = 2\n else:\n coherence = 1\n\n # Bound the output to the desired ranges\n if angle > 23:\n angle = 23\n elif angle < 0:\n angle = 0\n\n return angle, strength, coherence\n" ]
[ [ "numpy.real", "numpy.isreal", "numpy.linalg.eig", "numpy.sqrt", "numpy.gradient", "numpy.vstack" ] ]
Jueun-Park/donkey_gym
[ "f1a19a9983f06af167d9e8f647a39195dc505a0c" ]
[ "default_controller/plot.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\nfilename = os.path.dirname(os.path.abspath(__file__)) + \"/data/\" + \"data.csv\"\ndf = pd.read_csv(filename)\ndf.plot(x='t')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv" ] ]
ZombaSY/util-collection
[ "49b65eeff130edfa34fcecd7bba5b2e717eebdb7" ]
[ "machine learning/PSNR.py" ]
[ "import torch\nimport os\nimport pandas as pd\n\nfrom PIL import Image\nfrom torchvision import transforms\n\n\nclass PSNR:\n \"\"\"Peak Signal to Noise Ratio\n img1 and img2 have range [0, 255]\"\"\"\n\n def __init__(self):\n self.name = \"PSNR\"\n\n @staticmethod\n def __call__(img1, img2):\n mse = torch.mean((img1 - img2) ** 2)\n return 20 * torch.log10(1 / torch.sqrt(mse))\n\n\ntarget_path = 'A:/Users/SSY/Desktop/dataset/cud_calibration/210308_paper_dataset/dataset/test/A'\nimg_path = 'A:/Users/SSY/Desktop/Other works/paper/졸업논문/experiments/CUD-Net/model14_iden'\n\n\ndef main():\n img1_list = os.listdir(target_path)\n img2_list = os.listdir(img_path)\n\n assert len(img1_list) == len(img2_list), 'the number of images should be same'\n\n img_zip = zip(img1_list, img2_list)\n psnr = PSNR()\n\n psnr_list = []\n\n for idx, items in enumerate(img_zip):\n img1_full_path = os.path.join(target_path, items[0])\n img2_full_path = os.path.join(img_path, items[1])\n\n img1 = Image.open(img1_full_path).convert('RGB')\n img2 = Image.open(img2_full_path).convert('RGB')\n\n transform = transforms.ToTensor()\n\n img1 = transform(img1).unsqueeze(0)\n img2 = transform(img2).unsqueeze(0)\n\n psnr_value = psnr(img1, img2)\n\n psnr_list.append(psnr_value.cpu().detach().item())\n print(psnr_value)\n\n df = {'file_name': img1_list,\n 'A_percentage': psnr_list}\n\n pd.DataFrame(df).to_csv('psnr_result.csv', index=False)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame", "torch.sqrt", "torch.mean" ] ]
AlexanderAzhar/RGBDStudy
[ "2ba4404eff5231982ad49ce8c2d0121af37cd95b" ]
[ "main.py" ]
[ "import numpy as np\nimport argparse\nimport glob\nimport os\nfrom functools import partial\nimport vispy\nimport scipy.misc as misc\nfrom tqdm import tqdm\nimport yaml\nimport time\nimport sys\nfrom mesh import write_ply, read_ply, output_3d_photo\nfrom utils import get_MiDaS_samples, read_MiDaS_depth\nimport torch\nimport cv2\nfrom skimage.transform import resize\nimport imageio\nimport copy\nfrom networks import Inpaint_Color_Net, Inpaint_Depth_Net, Inpaint_Edge_Net\nfrom MiDaS.run import run_depth\nfrom MiDaS.monodepth_net import MonoDepthNet\nimport MiDaS.MiDaS_utils as MiDaS_utils\nfrom bilateral_filtering import sparse_bilateral_filtering\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config', type=str, default='argument.yml', help='Configure of post processing')\nargs = parser.parse_args()\nconfig = yaml.load(open(args.config, 'r'))\nif config['offscreen_rendering'] is True:\n vispy.use(app='egl')\nos.makedirs(config['mesh_folder'], exist_ok=True)\nos.makedirs(config['video_folder'], exist_ok=True)\nos.makedirs(config['depth_folder'], exist_ok=True)\nsample_list = get_MiDaS_samples(config['src_folder'], config['depth_folder'], config, config['specific'])\nnormal_canvas, all_canvas = None, None\n\nif isinstance(config[\"gpu_ids\"], int) and (config[\"gpu_ids\"] >= 0):\n device = config[\"gpu_ids\"]\nelse:\n device = \"cpu\"\n\nprint(f\"running on device {device}\")\n\nfor idx in tqdm(range(len(sample_list))):\n depth = None\n sample = sample_list[idx]\n print(\"Current Source ==> \", sample['src_pair_name'])\n mesh_fi = os.path.join(config['mesh_folder'], sample['src_pair_name'] + '.ply')\n image = imageio.imread(sample['ref_img_fi'])\n\n print(f\"Running depth extraction at {time.time()}\")\n if config['require_midas'] is True:\n run_depth([sample['ref_img_fi']], config['src_folder'], config['depth_folder'],\n config['MiDaS_model_ckpt'], MonoDepthNet, MiDaS_utils, target_w=640)\n if 'npy' in config['depth_format']:\n config['output_h'], config['output_w'] = np.load(sample['depth_fi']).shape[:2]\n else:\n config['output_h'], config['output_w'] = imageio.imread(sample['depth_fi']).shape[:2]\n frac = config['longer_side_len'] / max(config['output_h'], config['output_w'])\n config['output_h'], config['output_w'] = int(config['output_h'] * frac), int(config['output_w'] * frac)\n config['original_h'], config['original_w'] = config['output_h'], config['output_w']\n if image.ndim == 2:\n image = image[..., None].repeat(3, -1)\n if np.sum(np.abs(image[..., 0] - image[..., 1])) == 0 and np.sum(np.abs(image[..., 1] - image[..., 2])) == 0:\n config['gray_image'] = True\n else:\n config['gray_image'] = False\n image = cv2.resize(image, (config['output_w'], config['output_h']), interpolation=cv2.INTER_AREA)\n depth = read_MiDaS_depth(sample['depth_fi'], 3.0, config['output_h'], config['output_w'])\n mean_loc_depth = depth[depth.shape[0] // 2, depth.shape[1] // 2]\n if not (config['load_ply'] is True and os.path.exists(mesh_fi)):\n vis_photos, vis_depths = sparse_bilateral_filtering(depth.copy(), image.copy(), config,\n num_iter=config['sparse_iter'], spdb=False)\n depth = vis_depths[-1]\n model = None\n torch.cuda.empty_cache()\n print(\"Start Running 3D_Photo ...\")\n print(f\"Loading edge model at {time.time()}\")\n depth_edge_model = Inpaint_Edge_Net(init_weights=True)\n # depth_edge_weight = torch.load(config['depth_edge_model_ckpt'], map_location=torch.device(device))\n depth_edge_weight = torch.load(config['depth_edge_model_ckpt'], map_location='cpu')\n depth_edge_model.load_state_dict(depth_edge_weight)\n depth_edge_model = depth_edge_model.to('cpu')\n depth_edge_model.eval()\n\n print(f\"Loading depth model at {time.time()}\")\n depth_feat_model = Inpaint_Depth_Net()\n # depth_feat_weight = torch.load(config['depth_feat_model_ckpt'], map_location=torch.device(device))\n depth_feat_weight = torch.load(config['depth_feat_model_ckpt'], map_location='cpu')\n depth_feat_model.load_state_dict(depth_feat_weight, strict=True)\n # depth_feat_model = depth_feat_model.to(device)\n depth_feat_model = depth_feat_model.to('cpu')\n depth_feat_model.eval()\n # depth_feat_model = depth_feat_model.to(device)\n depth_feat_model = depth_feat_model.to('cpu')\n print(f\"Loading rgb model at {time.time()}\")\n rgb_model = Inpaint_Color_Net()\n # rgb_feat_weight = torch.load(config['rgb_feat_model_ckpt'], map_location=torch.device(device))\n rgb_feat_weight = torch.load(config['rgb_feat_model_ckpt'], map_location='cpu')\n rgb_model.load_state_dict(rgb_feat_weight)\n rgb_model.eval()\n # rgb_model = rgb_model.to(device)\n rgb_model = rgb_model.to('cpu')\n graph = None\n\n print(f\"Writing depth ply (and basically doing everything) at {time.time()}\")\n rt_info = write_ply(image,\n depth,\n sample['int_mtx'],\n mesh_fi,\n config,\n rgb_model,\n depth_edge_model,\n depth_edge_model,\n depth_feat_model)\n\n if rt_info is False:\n continue\n rgb_model = None\n color_feat_model = None\n depth_edge_model = None\n depth_feat_model = None\n torch.cuda.empty_cache()\n if config['save_ply'] is True or config['load_ply'] is True:\n verts, colors, faces, Height, Width, hFov, vFov = read_ply(mesh_fi)\n else:\n verts, colors, faces, Height, Width, hFov, vFov = rt_info\n\n print(f\"Making video at {time.time()}\")\n videos_poses, video_basename = copy.deepcopy(sample['tgts_poses']), sample['tgt_name']\n top = (config.get('original_h') // 2 - sample['int_mtx'][1, 2] * config['output_h'])\n left = (config.get('original_w') // 2 - sample['int_mtx'][0, 2] * config['output_w'])\n down, right = top + config['output_h'], left + config['output_w']\n border = [int(xx) for xx in [top, down, left, right]]\n normal_canvas, all_canvas = output_3d_photo(verts.copy(), colors.copy(), faces.copy(), copy.deepcopy(Height),\n copy.deepcopy(Width), copy.deepcopy(hFov), copy.deepcopy(vFov),\n copy.deepcopy(sample['tgt_pose']), sample['video_postfix'],\n copy.deepcopy(sample['ref_pose']),\n copy.deepcopy(config['video_folder']),\n image.copy(), copy.deepcopy(sample['int_mtx']), config, image,\n videos_poses, video_basename, config.get('original_h'),\n config.get('original_w'), border=border, depth=depth,\n normal_canvas=normal_canvas, all_canvas=all_canvas,\n mean_loc_depth=mean_loc_depth)\n" ]
[ [ "torch.cuda.empty_cache", "numpy.abs", "numpy.load", "torch.load" ] ]
zha-hengfeng/EACNet
[ "e542d5ec02b4669b31a083f05c1b873697185489" ]
[ "evaluate_iou.py" ]
[ "import os\r\nimport time\r\nimport torch\r\nimport numpy as np\r\nimport torch.backends.cudnn as cudnn\r\nfrom torch.autograd import Variable\r\nfrom argparse import ArgumentParser\r\n# user\r\nfrom builders.model_builder import build_model\r\nfrom builders.dataset_builder import build_dataset_test\r\nfrom utils.utils import save_predict\r\nfrom utils.iouEval import iouEval\r\n\r\n\r\ndef eval(args, test_loader, model):\r\n \"\"\"\r\n args:\r\n test_loader: loaded for test dataset\r\n model: model\r\n return: class IoU and mean IoU\r\n \"\"\"\r\n # evaluation or test mode\r\n model.eval()\r\n total_batches = len(test_loader)\r\n iouEvalVal = iouEval(args.classes+1) # cityscapes\r\n # iouEvalVal = iouEval(args.classes+1, ignoreIndex=11) #camvid\r\n data_list = []\r\n for i, (input, label, size, name) in enumerate(test_loader):\r\n with torch.no_grad():\r\n if args.cuda:\r\n input_var = Variable(input).cuda()\r\n else:\r\n input_var = Variable(input)\r\n #label = torch.from_numpy(np.array(label)).long().unsqueeze(0).cuda()\r\n #label = label.long().unsqueeze(1).cuda()\r\n start_time = time.time()\r\n\r\n output = model(input_var)\r\n\r\n torch.cuda.synchronize()\r\n time_taken = time.time() - start_time\r\n print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))\r\n # print(output.max(1)[1].unsqueeze(1).dtype)\r\n # print(label.dtype)\r\n iouEvalVal.addBatch(output.max(1)[1].unsqueeze(1).data, label.unsqueeze(1))\r\n output = output.cpu().data[0].numpy()\r\n gt = np.asarray(label[0].numpy(), dtype=np.uint8)\r\n output = output.transpose(1, 2, 0)\r\n output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)\r\n # data_list.append([gt.flatten(), output.flatten()])\r\n\r\n # save the predicted image\r\n if args.save:\r\n save_predict(output, gt, name[0], args.dataset, args.save_seg_dir,\r\n output_grey=False, output_color=True, gt_color=False)\r\n\r\n #iouVal, iou_classes = iouEvalVal.getIoU()\r\n #meanIoU, per_class_iu = get_iou(data_list, args.classes)\r\n meanIoU, per_class_iu = iouEvalVal.getIoU()\r\n return meanIoU, per_class_iu\r\n\r\n\r\ndef eval_model(args):\r\n \"\"\"\r\n main function for testing\r\n param args: global arguments\r\n return: None\r\n \"\"\"\r\n print(args)\r\n\r\n logFileLoc = 'log_test_' + args.model + '.txt'\r\n logFileLoc = os.path.join(os.path.dirname(args.checkpoint), logFileLoc)\r\n if os.path.isfile(logFileLoc):\r\n logger = open(logFileLoc, 'a')\r\n else:\r\n logger = open(logFileLoc, 'w')\r\n\r\n if args.cuda:\r\n print(\"=====> use gpu id: '{}'\".format(args.gpus))\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\r\n if not torch.cuda.is_available():\r\n raise Exception(\"no GPU found or wrong gpu id, please run without --cuda\")\r\n\r\n # build the model\r\n model = build_model(args.model, num_classes=args.classes)\r\n #print(model)\r\n\r\n if args.cuda:\r\n model = model.cuda() # using GPU for inference\r\n cudnn.benchmark = True\r\n\r\n if args.save:\r\n if not os.path.exists(args.save_seg_dir):\r\n os.makedirs(args.save_seg_dir)\r\n\r\n # load the test set\r\n datas, testLoader = build_dataset_test(args.dataset, args.num_workers)\r\n\r\n if not args.best:\r\n if args.checkpoint:\r\n if os.path.isfile(args.checkpoint):\r\n print(\"=====> loading checkpoint '{}'\".format(args.checkpoint))\r\n checkpoint = torch.load(args.checkpoint)\r\n model.load_state_dict(checkpoint['model'])\r\n # model.load_state_dict(convert_state_dict(checkpoint['model']))\r\n # model.load_state_dict(convert_state_dict(checkpoint))\r\n else:\r\n print(\"=====> no checkpoint found at '{}'\".format(args.checkpoint))\r\n raise FileNotFoundError(\"no checkpoint found at '{}'\".format(args.checkpoint))\r\n\r\n print(\"=====> beginning validation\")\r\n print(\"validation set length: \", len(testLoader))\r\n mIOU_val, per_class_iu = eval(args, testLoader, model)\r\n print(mIOU_val)\r\n print(per_class_iu)\r\n\r\n # Get the best test result among the last 10 model records.\r\n else:\r\n if args.checkpoint:\r\n if os.path.isfile(args.checkpoint):\r\n dirname, basename = os.path.split(args.checkpoint)\r\n epoch = int(os.path.splitext(basename)[0].split('_')[1])\r\n mIOU_val = []\r\n per_class_iu = []\r\n min = epoch - args.eval_num + 1\r\n max = epoch + 1\r\n for i in range(min, max):\r\n basename = 'model_' + str(i) + '.pth'\r\n resume = os.path.join(dirname, basename)\r\n checkpoint = torch.load(resume)\r\n model.load_state_dict(checkpoint['model'])\r\n print(\"=====> beginning test the \" + basename)\r\n print(\"validation set length: \", len(testLoader))\r\n mIOU_val_0, per_class_iu_0 = eval(args, testLoader, model)\r\n mIOU_val.append(mIOU_val_0)\r\n per_class_iu.append(per_class_iu_0)\r\n logger.write(\"%d\\t%.4f\\n\" % (i, mIOU_val_0))\r\n logger.flush()\r\n\r\n index = list(range(min, max))[np.argmax(mIOU_val)]\r\n print(\"The best mIoU among the last 10 models is\", index)\r\n print(mIOU_val)\r\n per_class_iu = per_class_iu[np.argmax(mIOU_val)]\r\n mIOU_val = np.max(mIOU_val)\r\n print(mIOU_val)\r\n print(per_class_iu)\r\n\r\n else:\r\n print(\"=====> no checkpoint found at '{}'\".format(args.checkpoint))\r\n raise FileNotFoundError(\"no checkpoint found at '{}'\".format(args.checkpoint))\r\n\r\n # Save the result\r\n if not args.best:\r\n model_path = os.path.splitext(os.path.basename(args.checkpoint))\r\n args.logFile = 'log_test_' + model_path[0] + '.txt'\r\n logFileLoc = os.path.join(os.path.dirname(args.checkpoint), args.logFile)\r\n else:\r\n args.logFile = 'log_test_' + 'best' + str(index) + '.txt'\r\n logFileLoc = os.path.join(os.path.dirname(args.checkpoint), args.logFile)\r\n\r\n # Save the result\r\n if os.path.isfile(logFileLoc):\r\n logger = open(logFileLoc, 'a')\r\n else:\r\n logger = open(logFileLoc, 'w')\r\n logger.write(\"Mean IoU: %.4f\" % mIOU_val)\r\n logger.write(\"\\nPer class IoU: \")\r\n for i in range(len(per_class_iu)):\r\n logger.write(\"%.4f\\t\" % per_class_iu[i])\r\n logger.flush()\r\n logger.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = ArgumentParser()\r\n parser.add_argument('--model', default=\"EACNet_ResNet-18-ENC\", help=\"model name: Context Guided Network (CGNet)\")\r\n\r\n parser.add_argument('--dataset', default=\"cityscapes\", help=\"dataset: cityscapes or camvid\")\r\n parser.add_argument('--num_workers', type=int, default=1, help=\"the number of parallel threads\")\r\n parser.add_argument('--batch_size', type=int, default=1,\r\n help=\" the batch_size is set to 1 when evaluating or testing\")\r\n parser.add_argument('--checkpoint', default=\"checkpoint/cityscapes/EACNet_ResNet-18-ENC/bs8_gpu1_train_adam_ohem/model_400.pth\")\r\n parser.add_argument('--eval_num', type=int, default=50)\r\n # parser.add_argument('--checkpoint', type=str,\r\n # default=\"./checkpoint/cityscapes/DABNet_cityscapes.pth\",\r\n # help=\"use the file to load the checkpoint for evaluating or testing \")\r\n parser.add_argument('--save_seg_dir', type=str, default=\"./result/\",\r\n help=\"saving path of prediction result\")\r\n parser.add_argument('--best', action='store_true', default=True, help=\"Get the best result among last few checkpoints\")\r\n parser.add_argument('--save', action='store_true', default=False, help=\"Save the predicted image\")\r\n parser.add_argument('--cuda', default=True, help=\"run on CPU or GPU\")\r\n parser.add_argument(\"--gpus\", default=\"0\", type=str, help=\"gpu ids (default: 0)\")\r\n args = parser.parse_args()\r\n\r\n args.save_seg_dir = os.path.join(args.save_seg_dir, args.dataset, args.model)\r\n\r\n if args.dataset == 'cityscapes':\r\n args.classes = 19\r\n elif args.dataset == 'camvid':\r\n args.classes = 11\r\n else:\r\n raise NotImplementedError(\r\n \"This repository now supports two datasets: cityscapes and camvid, %s is not included\" % args.dataset)\r\n\r\n eval_model(args)\r\n" ]
[ [ "numpy.max", "torch.cuda.synchronize", "torch.autograd.Variable", "torch.no_grad", "torch.cuda.is_available", "numpy.argmax", "torch.load" ] ]
tugot17/Swarm-Algorithms
[ "93c50485a7e3fab9d2dd8bdb8b9b47b46e012085" ]
[ "swarm_algorithms/aaa.py" ]
[ "from swarm_algorithms.abstract_swarm_algorithm import AbstractSwarmAlgorithm\nimport numpy as np\n\nfrom swarm_algorithms.particle import Particle\n\n\nclass AAA(AbstractSwarmAlgorithm):\n # Hyperparameters\n w_min, w_max = np.pi / 2, 3 * np.pi / 2\n\n a_min, a_max = 0., 2 * np.pi\n p_lo, p_hi = -15., 15.\n\n survival_coeff = 0.7\n s_sigma = 0.01\n\n\n def __init__(self, optimised_function, number_of_agents):\n super().__init__(optimised_function, number_of_agents)\n\n # Initialize particles\n self.particles = [Particle(position=np.random.uniform(self.p_lo, self.p_hi, 2))\n for _ in range(self.number_of_agents)]\n\n # Update the swarm best known position\n self.best_swarm_score = np.inf\n for particle in self.particles:\n particle.best_score = self.optimized_function(particle.position)\n if particle.best_score < self.best_swarm_score:\n self.best_swarm_score = particle.best_score\n self.best_solution = particle.best_known_position\n\n def get_best_global_solution(self):\n return self.best_solution\n\n def step(self):\n r_max = np.mean([np.sqrt((p1.position - p2.position) ** 2) for p1 in self.particles for p2 in self.particles])/2.\n\n w = np.random.uniform(self.w_min, self.w_max)\n for particle in self.particles:\n a = np.random.uniform(self.a_min, self.a_max)\n r = np.random.uniform(0., r_max)\n\n v1 = np.array([np.cos(a), np.sin(a)]) * r\n v2 = np.array([np.cos(w + a - np.pi), np.sin(w + a - np.pi)]) * r\n\n particle.position = particle.position + v1 + v2\n\n #starvation\n energy = [(particle, -self.optimized_function(particle.position)) for particle in self.particles]\n energy = list(sorted(energy, key=lambda x: -x[1]))\n survived = [p for p, _ in energy[:int(self.survival_coeff * len(energy))]]\n\n self.particles = survived + [Particle(survived[i].position + np.random.normal(0., self.s_sigma, survived[i].position.shape))\n for i in range(len(self.particles) - len(survived))]\n\n for particle in self.particles:\n particle_new_score = self.optimized_function(particle.position)\n if particle_new_score <= particle.best_score:\n particle.best_score = particle_new_score\n particle.best_known_position = np.copy(particle.position)\n if particle.best_score <= self.best_swarm_score:\n self.best_swarm_score = particle.best_score\n self.best_solution = particle.best_known_position" ]
[ [ "numpy.random.normal", "numpy.sin", "numpy.copy", "numpy.random.uniform", "numpy.sqrt", "numpy.cos" ] ]
gbup-group/EAN-efficient-attention-network
[ "ac9c049158873836e1c239fc35f65d4b79274b12" ]
[ "search_imagent/EAN_search.py" ]
[ "from __future__ import print_function\n\nimport os\nimport shutil\nimport time\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport imagenet_network as models\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom utils import Logger, AverageMeter, accuracy, mkdir_p, savefig\nimport numpy as np\nfrom train_imagenet_ensemble_subset import train, test\n\nimport argparse\nimport tools\nimport math\nimport copy\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='NAS')\n# Datasets\nparser.add_argument('-data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=160, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('--validate_size', type=int, default=0, help='')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--checkpoint', default='', type=str, help='checkpoint')\n\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\nprint(state)\n\n\nuse_cuda = torch.cuda.is_available()\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\n\nnp.random.seed(args.manualSeed)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\n\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nhyperparams = {}\nhyperparams['controller_max_step'] = 300\nhyperparams['ema_baseline_decay'] = 0.95\nhyperparams['controller_lr'] = 5e-2\nhyperparams['controller_grad_clip'] = 0\nhyperparams['checkpoint'] = args.checkpoint\n\nif args.arch == 'forward_config_share_sge_resnet50' or 'forward_dia_fbresnet50':\n config_limit = (3, 4, 6, 3)\n total_num_blocks = sum(config_limit)\nelse:\n num_blocks = None\n total_num_blocks = None\n\nprint(hyperparams)\n\nnp.random.seed(args.manualSeed)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\ntorch.cuda.manual_seed_all(args.manualSeed)\n\n## build the ckpt directory\nif not os.path.isdir(args.checkpoint):\n mkdir_p(args.checkpoint)\n\n## save the hyperparams\nwith open(args.checkpoint + \"/Config.txt\", 'w+') as f:\n for (k, v) in args._get_kwargs():\n f.write(k + ' : ' + str(v) + '\\n')\n for v in hyperparams:\n f.write(v + ' : ' + str(hyperparams[v]) + '\\n')\n\n#replaybuffer for ppo\nclass ReplayBuffer(object):\n def __init__(self, max_size):\n self.max_size = max_size\n self.memory_actions = []\n self.memory_rewards = []\n self.memory_probs = []\n self.memory_steps = []\n\n def num_buffers(self):\n return len(self.memory_actions)\n\n def add_new(self, action, reward, prob, step):\n self.memory_actions.append(action)\n self.memory_rewards.append(reward)\n self.memory_probs.append(prob)\n self.memory_steps.append(step)\n if len(self.memory_rewards)>self.max_size:\n self.memory_actions.pop(0)\n self.memory_rewards.pop(0)\n self.memory_probs.pop(0)\n self.memory_steps.pop(0)\n\n def sample(self, num):\n rnd_choice = np.random.choice(np.arange(len(self.memory_steps)), size=num, replace=False) \n sampled_actions = [self.memory_actions[i] for i in rnd_choice.tolist()]\n sampled_rewards = [self.memory_rewards[i] for i in rnd_choice.tolist()]\n sampled_probs = [self.memory_probs[i] for i in rnd_choice.tolist()]\n sampled_steps = [self.memory_steps[i] for i in rnd_choice.tolist()]\n return sampled_actions, sampled_rewards, sampled_probs, sampled_steps\n\n#rnd\nclass RND_fix(torch.nn.Module):\n def __init__(self):\n torch.nn.Module.__init__(self)\n\n self.num_blocks = total_num_blocks\n self.input_size = self.num_blocks\n self.hidden_size = 32\n self.output_scale = 4\n self.output_size = self.output_scale * self.num_blocks\n\n self.rnd_fix = nn.Sequential(\n nn.Linear(self.input_size,self.hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_size,self.hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_size,self.output_size)\n )\n\n def forward(self,x):\n logits = self.rnd_fix(x)\n return logits\n\nclass RND_learn(torch.nn.Module):\n def __init__(self):\n torch.nn.Module.__init__(self)\n\n self.num_blocks = total_num_blocks\n self.input_size = self.num_blocks\n self.hidden_size = 32\n self.output_scale = 4\n self.output_size = self.output_scale * self.num_blocks\n\n self.rnd_fix = nn.Sequential(\n nn.Linear(self.input_size,self.hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_size,self.output_size)\n )\n\n def forward(self,x):\n logits = self.rnd_fix(x)\n return logits\n\nclass Controller(torch.nn.Module):\n \"\"\"Based on\n https://github.com/pytorch/examples/blob/master/word_language_model/model.py\n Base the controller RNN on the GRU from:\n https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py\n \"\"\"\n def __init__(self):\n torch.nn.Module.__init__(self)\n\n self.num_blocks = total_num_blocks\n self.softmax_temperature = 5.0\n self.tanh_c = 2.5\n self.mode = True\n\n\n self.input_size = 20\n self.hidden_size = 50\n self.output_size = self.num_blocks * 2\n\n self._fc_controller = nn.Sequential(\n nn.Linear(self.input_size,self.hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_size,self.output_size)\n )\n\n\n def forward(self,x):\n logits = self._fc_controller(x)\n\n logits /= self.softmax_temperature\n\n # exploration # ??\n if self.mode == 'train':\n logits = (self.tanh_c*F.tanh(logits))\n\n return logits\n\n def sample(self, batch_size=1, replay = None):\n \"\"\"Samples a set of `args.num_blocks` many computational nodes from the\n controller, where each node is made up of an activation function, and\n each node except the last also includes a previous node.\n \"\"\"\n\n # [B, L, H]\n inputs = torch.zeros(batch_size, self.input_size).cuda()\n log_probs = []\n actions = []\n\n\n total_logits = self.forward(inputs)\n\n\n if replay == None:\n for block_idx in range(total_num_blocks):\n logits = total_logits[:,(2*block_idx):(2*block_idx+1+1)]\n \n # print(logits.size()) # batch size * 2\n probs = F.softmax(logits, dim=-1) # batch size * 2\n log_prob = F.log_softmax(logits, dim=-1)\n\n action = probs.multinomial(num_samples=1).data\n # print(action.size()) # batch size * 1\n selected_log_prob = log_prob.gather(\n 1, tools.get_variable(action, requires_grad=False))\n # print(selected_log_prob.size()) # batch size * 1\n\n log_probs.append(selected_log_prob[:, 0:1])\n inputs = tools.get_variable(action[:, 0], requires_grad=False)\n actions.append(action[:, 0])\n\n\n return actions, torch.cat(log_probs, 1)\n\n\n\n else:\n\n r_actions, r_rewards, r_probs, r_steps = replay\n replay_actions, replay_rewards, replay_probs, replay_steps = copy.deepcopy(r_actions[0]),\\\n copy.deepcopy(r_rewards[0]), \\\n copy.deepcopy(r_probs[0]), \\\n copy.deepcopy(r_steps[0])\n ratio = []\n log_probs = []\n for block_idx in range(total_num_blocks):\n logits = total_logits[:,(2*block_idx):(2*block_idx+1+1)]\n\n probs = F.softmax(logits, dim=-1)\n log_prob = F.log_softmax(logits, dim=-1)\n\n action = probs.multinomial(num_samples=1).data\n\n val_size = args.validate_size + 1\n\n\n selected_log_prob = log_prob.gather(1, tools.get_variable(replay_actions[:,block_idx].view(1,val_size), requires_grad=False))\n temp_prob = replay_probs[:,block_idx].view(1,val_size)\n prob_ratio = torch.exp(selected_log_prob)/temp_prob\n \n\n ratio.append(prob_ratio)\n log_probs.append(selected_log_prob)\n inputs = tools.get_variable(action[:, 0], requires_grad=False)\n return torch.cat(log_probs, 0), replay_rewards, torch.cat(ratio,0)\n # torch.cat(log_probs, 0) 54*batch\n # torch.cat(ratio,0) 54*batch\n\n\n def init_hidden(self, batch_size):\n zeros = torch.zeros(batch_size, self.controller_hid)\n return (tools.get_variable(zeros, True, requires_grad=False),\n tools.get_variable(zeros.clone(), True, requires_grad=False))\n# reward\ndef get_reward(trainloader, testloader, model, optimizer, criterion, actions, step, hyperparams):\n \"\"\"Computes the perplexity of a single sampled model on a minibatch of\n validation data.\n \"\"\"\n # get the action code\n rewards_list, val_acc_list, val_loss_list, sparse_portion_list = [], [], [], []\n for i in range(actions[0].size(0)):\n binary_code = ''\n for action in actions:\n binary_code = binary_code + str(action[i].item())\n seg = np.cumsum(config_limit)\n code1 = int(binary_code[0:seg[0]],2)\n code2 = int(binary_code[seg[0]:seg[1]],2)\n code3 = int(binary_code[seg[1]:seg[2]],2)\n code4 = int(binary_code[seg[2]:seg[3]],2)\n config = (code1, code2, code3, code4)\n\n # update the model\n if trainloader is not None:\n train_loss, train_acc = train(config, trainloader, model, criterion, optimizer, step)\n val_loss, val_acc = test(config, testloader, model, criterion)\n sparse_portion = sum([i == '0' for i in binary_code])/len(binary_code)\n\n base = 54.0\n R = val_acc + sparse_portion*2 - base\n rewards = R\n rewards_list.append(rewards)\n val_acc_list.append(val_acc)\n val_loss_list.append(val_loss)\n sparse_portion_list.append(sparse_portion)\n return np.row_stack(rewards_list), np.row_stack(val_acc_list), np.row_stack(val_loss_list), np.row_stack(sparse_portion_list)\n\ndef get_action_code(actions):\n # get the action code (binary to decimal)\n binary_code = ''\n for action in actions:\n binary_code = binary_code + str(action.item())\n actions_code = int(binary_code, 2)\n return actions_code, binary_code\n\ndef train_controller(Controller, Controller_optim, rnd_fix, rnd_learn, rnd_learn_optim, trainloader, testloader, model, optimizer, criterion, hyperparams):\n \"\"\"\n The controller is updated with a score function gradient estimator\n (i.e., REINFORCE), with the reward being c/valid_ppl, where valid_ppl\n is computed on a minibatch of validation data.\n\n A moving average baseline is used.\n\n The controller is trained for 2000 steps per epoch.\n \"\"\"\n logger = Logger(os.path.join(hyperparams['checkpoint'], 'search_log.txt'), title='')\n logger.set_names(['Loss', 'Baseline', 'Reward', 'Action', 'Binary', 'Valid Loss', 'Valid Acc.', 'Sparse','rnd_loss','p'])\n\n logger_all = Logger(os.path.join(hyperparams['checkpoint'], 'search_log_all_sampled.txt'), title='')\n logger_all.set_names(['Loss', 'Baseline', 'Reward', 'Action', 'Binary', 'Valid Loss', 'Valid Acc.', 'Sparse'])\n \n model_fc = Controller\n model_fc.train()\n\n model_rnd_fix = rnd_fix\n model_rnd_fix.eval()\n model_rnd_learn = rnd_learn\n model_rnd_learn.train()\n\n baseline = None\n total_loss = 0\n buffer = ReplayBuffer(30)\n buffer_sparse = ReplayBuffer(30)\n update_mode = 'online'\n\n for step in range(hyperparams['controller_max_step']):\n print('************************* ('+str(step+1)+'/'+str(hyperparams['controller_max_step'])+')******')\n adjust_learning_rate(optimizer, step, args, hyperparams)\n actions, log_probs = model_fc.sample(replay = None)\n\n #sample N connection for val (for updating theta)\n actions_validate, log_probs_validate = model_fc.sample(batch_size=args.validate_size)\n\n decimal_code_all = []\n binary_code_all = []\n\n # get the action code (binary to decimal)\n actions_code_1, binary_code_1 = get_action_code(actions)\n decimal_code_all.append(actions_code_1)\n binary_code_all.append(binary_code_1)\n\n for i in range(actions_validate[0].size(0)):\n binary_code = ''\n for action in actions_validate:\n binary_code = binary_code + str(action[i].item())\n decimal_code = int(binary_code, 2)\n decimal_code_all.append(decimal_code)\n binary_code_all.append(binary_code)\n\n #get reward (train one \"step\")\n rewards_org, val_acc, val_loss, sparse_portion = get_reward(None, testloader, model, optimizer, criterion, actions, step, hyperparams)\n rewards_validate, val_acc_validate, val_loss_validate, sparse_portion_validate = get_reward(None, testloader, model, optimizer, criterion, actions_validate, step, hyperparams)\n\n val_acc = np.row_stack((val_acc, val_acc_validate))\n val_loss = np.row_stack((val_loss, val_loss_validate))\n #buf_rewards\n rewards = np.row_stack((rewards_org,rewards_validate)) \n \n # #buf_action\n temp_action = torch.cat(actions).view(1,-1)\n temp_actions_validate = torch.cat(actions_validate).view(args.validate_size,-1)\n buf_action = torch.cat((temp_action,temp_actions_validate),0)\n\n # #buf_prob\n temp_prob = torch.exp(log_probs).detach()\n temp_prob_val = torch.exp(log_probs_validate).detach()\n buf_prob = torch.cat((temp_prob,temp_prob_val),0)\n\n\n # #store - buffer\n buffer.add_new(buf_action, rewards, buf_prob, step)\n\n #cal rnd\n val_fix = model_rnd_fix(buf_action.float())\n val_learn = model_rnd_learn(buf_action.float())\n reward_rnd = ((val_fix - val_learn)**2).sum()\n rewards = rewards + reward_rnd.detach().cpu().data.numpy()\n # moving average baseline\n if baseline is None:\n baseline = rewards.mean()\n else:\n decay = hyperparams['ema_baseline_decay']\n baseline = decay * baseline + (1 - decay) * rewards.mean()\n\n\n \n adv = rewards - baseline\n adv = adv \n log_probs = torch.cat((log_probs, log_probs_validate), 0)\n loss = -log_probs * tools.get_variable(adv,True,requires_grad=False) \n\n loss = loss.mean(dim = 0, keepdim = True)\n \n \n loss = loss.sum()\n\n # update\n Controller_optim.zero_grad()\n rnd_learn_optim.zero_grad()\n loss.backward()\n reward_rnd.mean().backward()\n if hyperparams['controller_grad_clip'] > 0:\n torch.nn.utils.clip_grad_norm(model_fc.parameters(),\n hyperparams['controller_grad_clip'])\n rnd_learn_optim.step()\n Controller_optim.step()\n\n\n\n log = 'Step: {step}| Loss: {loss:.4f}| Action: {act} |Baseline: {base:.4f}| ' \\\n 'Reward {re:.4f}| Valid Acc {acc:.4f}'.format(loss=loss.item(), base=baseline, act = binary_code,\n re=rewards[0].item(), acc=val_acc[0].item(), step=step)\n print(log)\n logger.append([loss.item(), baseline, rewards[0].item(), actions_code_1, binary_code_1, \\\n val_loss[0].item(), val_acc[0].item(), (binary_code.count('0')/len(binary_code)), reward_rnd.item(), torch.exp(log_probs).mean().item()])\n for i in range(len(binary_code_all)):\n logger_all.append([loss.item(), baseline, rewards[i].item(), decimal_code_all[i], binary_code_all[i],\n val_loss[i].item(), val_acc[i].item(), (binary_code_all[i].count('0') / len(binary_code_all[i]))])\n\n save_checkpoint({\n 'iters': step + 1,\n 'state_dict': model_fc.state_dict(),\n 'optimizer' : Controller_optim.state_dict(),\n }, checkpoint=hyperparams['checkpoint'])\n save_checkpoint({'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()},\n checkpoint=hyperparams['checkpoint'],\n filename='model.pth.tar')\n\n\n if step >= 10:\n\n # use old data (buff)\n replay = buffer.sample(num = 1)\n log_probs, replay_rewards, prob_ratio = model_fc.sample(replay = replay)\n adv = replay_rewards - baseline\n eps_clip = 0.1\n\n temp_surr1 = (prob_ratio.detach() * log_probs).mm(tools.get_variable(adv,True,requires_grad=False))\n temp_surr2 = (torch.clamp(prob_ratio.detach(), 1-eps_clip, 1+eps_clip) * log_probs).mm(tools.get_variable(adv,True,requires_grad=False))\n surr1 = temp_surr1.sum()/(args.validate_size + 1)\n surr2 = temp_surr2.sum()/(args.validate_size + 1)\n\n loss = 0.1 * -torch.min(surr1, surr2) \n\n\n # update\n Controller_optim.zero_grad()\n loss.backward()\n\n if hyperparams['controller_grad_clip'] > 0:\n torch.nn.utils.clip_grad_norm(model_fc.parameters(),\n hyperparams['controller_grad_clip'])\n Controller_optim.step()\n\n\n\n\n\n\ndef save_checkpoint(state, checkpoint='checkpoint', filename='controller.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n\ndef adjust_learning_rate(optimizer, epoch, args, hyperparams):\n lr = 0.5 * args.lr * (math.cos(math.pi * epoch / hyperparams['controller_max_step']) + 1)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\ndef main():\n\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n val_dataset_from_train = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]))\n\n print('train_datatset_len', len(train_dataset))\n\n train_subset = []\n val_subset = []\n # for class_idx in range(1, 1001):\n for class_idx in range(1000):\n class_sample_size = len([e for e, i in enumerate(train_dataset.targets) if i == class_idx])\n class_sample_index = [e for e, i in enumerate(train_dataset.targets) if i == class_idx]\n split = 100\n val_subset.append(class_sample_index[:split])\n train_subset.append(class_sample_index[split:])\n\n flattened_val_subset = [val for sublist in val_subset for val in sublist]\n flattened_train_subset = [val for sublist in train_subset for val in sublist]\n\n print(len(flattened_train_subset))\n print(len(flattened_val_subset))\n print('sum:', len(flattened_val_subset) + len(flattened_train_subset))\n\n train_sampler = SubsetRandomSampler(flattened_train_subset)\n valid_sampler = SubsetRandomSampler(flattened_val_subset)\n\n\n valid_from_train_loader = torch.utils.data.DataLoader(\n val_dataset_from_train, batch_size=args.batch_size, shuffle=(valid_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=valid_sampler)\n\n model = models.__dict__[args.arch](pretrained=False)\n model = torch.nn.DataParallel(model).cuda()\n\n print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n controller = Controller().cuda()\n rnd_fix = RND_fix().cuda()\n rnd_learn = RND_learn().cuda()\n\n rnd_learn_optim = torch.optim.Adam(rnd_learn.parameters(), lr=hyperparams['controller_lr'])\n controller_optim = torch.optim.Adam(controller.parameters(), lr=hyperparams['controller_lr'])\n train_controller(controller, controller_optim, rnd_fix, rnd_learn, rnd_learn_optim, None, valid_from_train_loader, model, optimizer, criterion, hyperparams)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.cuda.is_available", "torch.load", "numpy.cumsum", "torch.utils.data.sampler.SubsetRandomSampler", "torch.exp", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.nn.functional.tanh", "torch.zeros", "torch.cuda.manual_seed_all", "torch.min", "torch.save", "torch.nn.Module.__init__", "torch.nn.functional.log_softmax", "torch.nn.ReLU", "torch.nn.functional.softmax", "numpy.random.seed", "numpy.row_stack" ] ]
S-Yuan137/COMAPreduce
[ "bcb8b171ae3a226b10978e83f85ec72f538466ff" ]
[ "comancpipeline/Analysis/CreateLevel3.py" ]
[ "# Routines for downsampling the level 2 data to wide band chunks for continuum data analysis\n# Data will be cleaned such that it is ready to enter the destriper:\n# 1) From Statistics : Remove atmosphere and baselines \n# 2) From astrocalibration : Apply Jupiter calibration to each feed\n\nimport numpy as np\nimport h5py\nfrom astropy import wcs\nfrom matplotlib import pyplot\nfrom tqdm import tqdm\nfrom scipy import linalg as la\nimport healpy as hp\nfrom comancpipeline.Tools.median_filter import medfilt\nfrom comancpipeline.Tools import binFuncs, stats\n\nfrom comancpipeline.Analysis.BaseClasses import DataStructure\nfrom comancpipeline.Analysis.FocalPlane import FocalPlane\nfrom comancpipeline.Analysis import SourceFitting\nfrom comancpipeline.Analysis import Statistics\nfrom scipy import signal\n\nimport time\nimport os\n\nimport shutil\n\nfrom tqdm import tqdm\n\n__level3_version__='v2'\n\nclass CreateLevel3(DataStructure):\n def __init__(self,level2='level2',level3='level3',output_dir = None,\n channel_mask=None, gain_mask=None, calibration_factors=None, **kwargs):\n \"\"\"\n \"\"\"\n super().__init__(**kwargs)\n self.name = 'CreateLevel3'\n # READ ANY ANCILLARY DATA: MASKS/CALIBRATION FACTORS\n if not isinstance(channel_mask,type(None)):\n self.channelmask = np.load(channel_mask,allow_pickle=True).astype(bool)\n else:\n self.channelmask = None\n\n if not isinstance(gain_mask,type(None)):\n self.gainmask = np.load(gain_mask,allow_pickle=True).astype(bool)\n else:\n self.gainmask = None\n\n if not isinstance(calibration_factors, type(None)):\n self.calfactors = np.load(calibration_factors)\n else:\n self.calfactors = None\n\n self.output_dir = output_dir\n\n self.level2=level2\n self.level3=level3\n\n def __str__(self):\n return \"Creating Level 3\"\n\n def __call__(self,data):\n \"\"\"\n \"\"\"\n\n assert isinstance(data, h5py._hl.files.File), 'Data is not a h5py file structure'\n fname = data.filename.split('/')[-1]\n data_dir = data.filename.split(fname)[0]\n if isinstance(self.output_dir,type(None)):\n self.output_dir = f'{data_dir}/{self.level3}'\n self.outfile = '{}/{}_{}'.format(self.output_dir,self.level3,fname)\n\n self.logger(f' ')\n self.logger(f'{fname}:{self.name}: Starting. (overwrite = {self.overwrite})')\n\n comment = self.getComment(data)\n\n if 'Sky nod' in comment:\n return data\n\n if not self.level2 in data.keys():\n self.logger(f'{fname}:{self.name}:Error: No {self.level2} data found?')\n return data\n\n if not 'Statistics' in data[self.level2].keys():\n self.logger(f'{fname}:{self.name}:Error: No {self.level2}/Statistics found?')\n return data\n\n if not 'scan_edges' in data[f'{self.level2}/Statistics'].keys():\n self.logger(f'{fname}:{self.name}:Error: No {self.level2}/Statistics/scan_edges found?')\n return data\n\n \n\n if os.path.exists(self.outfile) & (not self.overwrite):\n self.logger(f'{fname}:{self.name}: {self.level3}_{fname} exists, ignoring (overwrite = {self.overwrite})')\n return data\n \n\n self.logger(f'{fname}:{self.name}: Creating {self.level3} data.')\n self.run(data)\n # Want to ensure the data file is read/write\n data = self.setReadWrite(data)\n\n self.logger(f'{fname}:{self.name}: Writing to {self.outfile}')\n self.write(data)\n self.logger(f'{fname}:{self.name}: Done.')\n\n return data\n\n\n def run(self, d):\n \"\"\"\n Expects a level2 file structure to be passed.\n \"\"\"\n tod_shape = d[f'{self.level2}/averaged_tod'].shape\n\n scan_edges = d[f'{self.level2}/Statistics/scan_edges'][...]\n nchannels = 8\n self.all_tod = np.zeros((tod_shape[0], nchannels, tod_shape[-1])) \n self.all_weights = np.zeros((tod_shape[0], nchannels, tod_shape[-1])) \n frequency = d['level1/spectrometer/frequency'][...]\n frequency = np.mean(np.reshape(frequency,(frequency.shape[0],frequency.shape[1]//16,16)) ,axis=-1).flatten()\n feeds = d['level1/spectrometer/feeds'][...]\n\n # Read in data from each feed\n for index, ifeed in enumerate(range(tod_shape[0])):\n if feeds[ifeed] == 20:\n continue\n todin = d[f'{self.level2}/averaged_tod'][ifeed,:,:,:]\n az = d['level1/spectrometer/pixel_pointing/pixel_az'][ifeed,:]\n el = d['level1/spectrometer/pixel_pointing/pixel_el'][ifeed,:]\n\n # Statistics for this feed \n medfilt_coefficient = d[f'{self.level2}/Statistics/filter_coefficients'][ifeed,...]\n atmos = d[f'{self.level2}/Statistics/atmos'][ifeed,...]\n atmos_coefficient = d[f'{self.level2}/Statistics/atmos_coefficients'][ifeed,...]\n wnoise_auto = d[f'{self.level2}/Statistics/wnoise_auto'][ifeed,...]\n\n # Create gain masks/channel masks/calfactors\n if isinstance(self.gainmask, type(None)):\n self.gainmask = np.zeros((tod_shape[0],tod_shape[1],tod_shape[2])).astype(bool)\n if isinstance(self.channelmask, type(None)):\n self.channelmask = np.zeros((tod_shape[0],tod_shape[1],tod_shape[2])).astype(bool)\n if isinstance(self.calfactors, type(None)):\n self.calfactors = np.ones((tod_shape[0],tod_shape[1],tod_shape[2])).astype(bool)\n\n self.channelmask = self.channelmask | self.gainmask\n\n\n\n # then the data for each scan\n last = 0\n for iscan,(start,end) in enumerate(scan_edges):\n median_filter = d[f'{self.level2}/Statistics/FilterTod_Scan{iscan:02d}'][ifeed,...]\n N = int((end-start))\n end = start+N\n tod = todin[...,start:end]\n\n # Subtract atmospheric fluctuations per channel\n for iband in range(4):\n for ichannel in range(64):\n if self.channelmask[ifeed,iband,ichannel] == False:\n amdl = Statistics.AtmosGroundModel(atmos[iband,iscan],az[start:end],el[start:end]) *\\\n atmos_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= median_filter[iband,:N] * medfilt_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= amdl\n tod[iband,ichannel,:] -= np.nanmedian(tod[iband,ichannel,:])\n tod /= self.calfactors[ifeed,:,:,None] # Calibrate to Jupiter temperature scale\n # Then average together the channels\n wnoise = wnoise_auto[:,:,iscan,:]\n channels = (self.channelmask[ifeed].flatten() == False)\n channels = np.where((channels))[0]\n\n tod = np.reshape(tod,(tod.shape[0]*tod.shape[1], tod.shape[2]))\n wnoise = np.reshape(wnoise,(wnoise.shape[0]*wnoise.shape[1], wnoise.shape[2]))\n\n nancheck = np.sum(tod[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n nancheck = np.sum(wnoise[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n\n tod, wnoise = tod[channels,:], wnoise[channels,:]\n freq = frequency[channels]\n\n for ichan, (flow,fhigh) in enumerate(zip([26,27,28,29,30,31,32],[28,29,30,31,32,33,34])):\n sel = np.where(((freq >= flow) & (freq < fhigh)))[0]\n top = np.sum(tod[sel,:]/wnoise[sel,:]**2,axis=0)\n bot = np.sum(1/wnoise[sel,:]**2,axis=0)\n\n self.all_tod[index,ichan,start:end] = top/bot\n self.all_weights[index,ichan,start:end] = bot\n\n def write(self,data):\n \"\"\"\n Write out the averaged TOD to a Level2 continuum file with an external link to the original level 1 data\n \"\"\" \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n if os.path.exists(self.outfile):\n os.remove(self.outfile)\n output = h5py.File(self.outfile,'a')\n\n # Set permissions and group\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group='comap')\n\n # Store datasets in root\n dnames = ['tod','weights']\n dsets = [self.all_tod, self.all_weights]\n\n for (dname, dset) in zip(dnames, dsets):\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n \n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')\n" ]
[ [ "numpy.reshape", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.load", "numpy.where", "numpy.isfinite", "numpy.nanmedian" ] ]
m4ln/image_processing_scripts
[ "3274e01869be9c7a845a4bb76aa02b3cf0023b50" ]
[ "util/print_numpy.py" ]
[ "import numpy as np\n\n\ndef print_numpy(array, shape=True, val=True):\n \"\"\"\n Prints the mean, min, max, median, std, and size of a numpy array\n\n Args:\n array: numpy array\n shape: if True prints the shape of the numpy array\n val: if True prints the values of the numpy array\n\n Returns:\n\n \"\"\"\n\n array = array.astype(np.float64)\n\n if shape:\n print('shape =', array.shape)\n if val:\n array = array.flatten()\n print(\n 'mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(array), np.min(array), np.max(array), np.median(array),\n np.std(array)))\n" ]
[ [ "numpy.max", "numpy.median", "numpy.min", "numpy.mean", "numpy.std" ] ]
eigenholser/image-highlight
[ "61705dd797605c2cb3eac74c5e5d62a94c54a68e" ]
[ "highlight.py" ]
[ "from __future__ import print_function\nimport argparse\nimport json\nimport logging\nimport numpy as np\nimport os\nfrom PIL import Image\nimport sys\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Highlighter(object):\n \"\"\"\n Methods for applying highlights to image from JSON description.\n \"\"\"\n\n def __init__(self, image_filename, highlights_filename):\n self.image_filename = image_filename\n self.image = Image.open(image_filename).convert('RGB')\n\n with open(highlights_filename) as f:\n self.highlights = json.load(f)\n\n self.render_highlights()\n self.display_highlighted_image()\n self.save_highlighted_image()\n\n def render_highlights(self):\n \"\"\"\n Apply highlighting to all described areas.\n \"\"\"\n for highlight in self.highlights:\n box = self.compute_highlight_corners(highlight)\n region = self.image.crop(box)\n region.load()\n logger.debug(\"{}: {} - {} - {}\".format(highlight['comment'],\n region.format, region.size, region.mode))\n color = self.compute_normalized_color(highlight)\n data2 = self.color_transform(region, color)\n img2 = Image.fromarray(data2, mode='RGB')\n self.image.paste(img2, box)\n\n def display_highlighted_image(self):\n \"\"\"\n Display highlighted image.\n \"\"\"\n self.image.load()\n self.image.show()\n\n def save_highlighted_image(self):\n \"\"\"\n Save image.\n \"\"\"\n self.image.save(self.compute_highlighted_filename())\n\n def compute_highlighted_filename(self):\n \"\"\"\n Compute new filename.\n \"\"\"\n (image_filename_base, image_filename_ext) = os.path.splitext(\n self.image_filename)\n image_filename_new = \"{}_HIGHLIGHTED{}\".format(\n image_filename_base, image_filename_ext)\n logger.debug(\"Writing highlighted image to: {}\".format(\n image_filename_new))\n return image_filename_new\n\n def compute_highlight_corners(self, highlight):\n \"\"\"\n Given x, y, width, height, compute upper left and lower right corners.\n \"\"\"\n x1 = highlight['x']\n y1 = highlight['y']\n x2 = x1 + highlight['width']\n y2 = y1 + highlight['height']\n return (x1, y1, x2, y2)\n\n def compute_normalized_color(self, highlight):\n \"\"\"\n Compute normalized colors from hex colors.\n \"\"\"\n (r, g, b) = (0, 0, 0)\n if 'color' in highlight:\n color = highlight['color']\n # TODO: Support 3 character colors?\n if len(color) != 6:\n raise Exception('Requires hex RGB colors in format 112233.')\n r = int(\"0x{}\".format(\"\".join(list(color)[:2])), 16) / 255.0\n g = int(\"0x{}\".format(\"\".join(list(color)[2:4])), 16) / 255.0\n b = int(\"0x{}\".format(\"\".join(list(color)[4:6])), 16) / 255.0\n return [r, g, b]\n\n def normalize(self, im):\n \"\"\"\n Normalize color values.\n \"\"\"\n return -np.log(1/((1 + im)/257) - 1)\n\n def denormalize(self, im):\n \"\"\"\n Restore color values.\n \"\"\"\n return (1 + 1/(np.exp(-im) + 1) * 257).astype(\"uint8\")\n\n def color_transform(self, region, color):\n \"\"\"\n Apply color highlighting transform to image.\n \"\"\"\n data = np.array(region)\n data_normed = self.normalize(data)\n data_xform = np.multiply(data_normed, np.array(color))\n data2 = self.denormalize(data_xform)\n return data2\n\n\nclass CustomArgumentParser(argparse.ArgumentParser): # pragma: no cover\n \"\"\"\n Custom argparser.\n \"\"\"\n def error(self, message):\n sys.stderr.write('error: {}\\n'.format(message))\n self.print_help()\n sys.exit(2)\n\n def usage_message(self):\n \"\"\"\n Print a message and exit.\n \"\"\"\n sys.stderr.write(\"error: Missing required arguments.\\n\")\n self.print_help()\n sys.exit(3)\n\n\ndef main():\n \"\"\"\n Parse command-line arguments. Initiate file processing.\n \"\"\"\n parser = CustomArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Source file to highlight.\")\n parser.add_argument(\"-d\", \"--highlights\",\n help=\"JSON highlights description filename.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Log level to DEBUG.\",\n action=\"store_true\")\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n error = False\n\n image_filename = args.image\n highlights_filename = args.highlights\n if not image_filename or not highlights_filename:\n error = True\n logger.error(\"Invalid arguments.\")\n\n if error:\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n else:\n highlighter = Highlighter(image_filename, highlights_filename)\n\n\nif __name__ == '__main__': # pragma: no cover\n main()\n\n" ]
[ [ "numpy.array", "numpy.exp", "numpy.log" ] ]
charan223/alstm
[ "33fe910a6c170547debbd5cbed254594183b8a0b" ]
[ "examples/model.py" ]
[ "\"\"\"PyTorch Language Model\n\nGeneric PyTorch Language Model that can runs on top of an RNN class.\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom embed_regularize import embedded_dropout\nfrom locked_dropout import LockedDropout\nfrom weight_drop import WeightDrop\n\n\ndef get_model(args):\n \"\"\"Return the specified model\"\"\"\n return RNNModel(\n rnn_type=args.model,\n ntoken=args.ntokens,\n ninp=args.emsize,\n nhid=args.nhid,\n npar=args.npar,\n nlayers=args.nlayers,\n dropouth=args.dropouth,\n dropouti=args.dropouti,\n dropoute=args.dropoute,\n dropouto=args.dropouto,\n dropouta=args.dropouta,\n tie_weights=args.tied\n )\n\n\nclass RNNModel(nn.Module):\n\n \"\"\"RNN Language Model\n\n Container module with an encoder, a recurrent module, and a decoder.\n \"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, npar, nlayers, dropoutl=0,\n dropouto=0.6, dropouth=0.5, dropouti=0.5, dropoute=0.1,\n dropouta=0.1, wdrop=0, tie_weights=False):\n super(RNNModel, self).__init__()\n self.rnn_type = rnn_type\n self.ntoken = ntoken\n self.ninp = ninp\n self.nhid = nhid\n self.npar = npar\n self.nlayers = nlayers\n self.dropoutl = dropoutl\n self.dropouti = dropouti\n self.dropouth = dropouth\n self.dropoute = dropoute\n self.dropouto = dropouto\n self.dropouta = dropouta\n self.wdrop = wdrop\n self.tie_weights = tie_weights\n\n self.lockdrop = LockedDropout()\n self.edrop = nn.Dropout(dropoute)\n self.idrop = nn.Dropout(dropouti)\n self.hdrop = nn.Dropout(dropouth)\n self.ldrop = nn.Dropout(dropoutl)\n self.odrop = nn.Dropout(dropouto)\n self.encoder = nn.Embedding(ntoken, ninp)\n\n if rnn_type == 'ALSTM':\n from alstm import aLSTM\n self.rnns = aLSTM(ninp, nhid, npar, ninp, nlayers,\n dropout_alstm=dropouth, dropout_adapt=dropouta)\n\n elif rnn_type == 'LSTM':\n self.rnns = [nn.LSTM(ninp if l == 0 else nhid,\n nhid if l != nlayers - 1 else ninp, 1,\n dropout=0)\n for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop)\n for rnn in self.rnns]\n\n elif rnn_type == 'GRU':\n self.rnns = [nn.GRU(ninp if l == 0 else nhid,\n nhid if l != nlayers - 1 else ninp, 1,\n dropout=0)\n for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop)\n for rnn in self.rnns]\n\n elif rnn_type == 'QRNN':\n from torchqrnn import QRNNLayer\n self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid,\n hidden_size=nhid if l != nlayers - 1 else ninp,\n save_prev_x=False, zoneout=0,\n window=1 if l == 0 else 1, output_gate=True)\n for l in range(nlayers)]\n else:\n raise NotImplementedError(\"Model type not implemented\")\n\n if rnn_type != 'ALSTM':\n self.rnns = torch.nn.ModuleList(self.rnns)\n\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n def reset(self):\n if self.rnn_type == 'QRNN':\n [r.reset() for r in self.rnns]\n\n def init_weights(self):\n \"\"\"Initialize Embedding weights\"\"\"\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden, return_h=False):\n \"\"\"Run Language model on given input\"\"\"\n emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)\n emb = self.lockdrop(emb, self.dropouti)\n\n output, hidden, raw_outputs, outputs = self._forward(emb, hidden)\n\n output = self.lockdrop(output, self.dropouto)\n outputs.append(output)\n\n decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))\n result = decoded.view(output.size(0), output.size(1), decoded.size(1))\n\n if return_h:\n return result, hidden, raw_outputs, outputs\n return result, hidden\n\n def _forward(self, emb, hidden):\n if self.rnn_type == 'ALSTM':\n output, hidden, output_all, output_all_raw = self.rnns(emb, hidden, return_all=True)\n return output, hidden, output_all_raw, output_all\n\n # Original AWD-LSTM code\n raw_output = emb\n new_hidden = []\n raw_outputs = []\n outputs = []\n for l, rnn in enumerate(self.rnns):\n raw_output, new_h = rnn(raw_output, hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nlayers - 1:\n raw_output = self.lockdrop(raw_output, self.dropouth)\n outputs.append(raw_output)\n hidden = new_hidden\n return output, hidden, raw_outputs, outputs\n\n def init_hidden(self, bsz):\n if self.rnn_type == 'ALSM':\n return None\n\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()),\n Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()))\n for l in range(self.nlayers)]\n elif self.rnn_type == 'QRNN' or self.rnn_type == 'GRU' or self.rnn_type == 'RNN':\n return [Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_())\n for l in range(self.nlayers)]\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LSTM", "torch.nn.GRU", "torch.nn.ModuleList", "torch.nn.Embedding" ] ]
KiUngSong/Generative-Models
[ "a86edb7a29daf97239f7bdde72cd9c9bffbc9c6c" ]
[ "Vanilla GAN & cGAN/GAN_pytorch.py" ]
[ "import torch\r\nimport numpy as np\r\nfrom torch import nn\r\nfrom tqdm.notebook import tqdm\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self, img_shape, dim_latent, g_dims=[128,256,512,1024]):\r\n super(Generator, self).__init__()\r\n self.dim_latent = int(dim_latent)\r\n self.img_shape = img_shape\r\n\r\n def block(in_feat, out_feat, normalize=True):\r\n layers = [nn.Linear(in_feat, out_feat)]\r\n if normalize:\r\n layers.append(nn.BatchNorm1d(out_feat, 0.8))\r\n layers.append(nn.LeakyReLU(0.2, inplace=True))\r\n return layers\r\n\r\n self._blocks = []\r\n self._blocks += block(self.dim_latent, g_dims[0], normalize=False)\r\n for i in range(len(g_dims)-1):\r\n self._blocks += block(g_dims[i], g_dims[i+1])\r\n self._blocks = np.reshape(self._blocks, -1).tolist()\r\n self.total_block = nn.Sequential(*self._blocks)\r\n\r\n self.fc = nn.Sequential(\r\n nn.Linear(g_dims[-1], int(np.prod(img_shape))),\r\n nn.Tanh())\r\n\r\n def forward(self, x):\r\n x = self.total_block(x)\r\n img = self.fc(x)\r\n img = img.view(img.size(0), *self.img_shape)\r\n return img\r\n\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, img_shape, d_dims=[512, 256]):\r\n super(Discriminator, self).__init__()\r\n self.img_shape = img_shape\r\n\r\n def block(in_feat, out_feat):\r\n layers = [nn.Linear(in_feat, out_feat)]\r\n layers.append(nn.LeakyReLU(0.2, inplace=True))\r\n return layers\r\n\r\n self._blocks = []\r\n self._blocks += block(int(np.prod(self.img_shape)), d_dims[0])\r\n for i in range(len(d_dims)-1):\r\n self._blocks += block(d_dims[i], d_dims[i+1])\r\n self.total_block = nn.Sequential(*self._blocks)\r\n\r\n self.fc = nn.Sequential(nn.Linear(d_dims[-1], 1), nn.Sigmoid())\r\n\r\n def forward(self, x):\r\n x = x.view(x.size(0), -1)\r\n x = self.total_block(x)\r\n pred = self.fc(x)\r\n return pred\r\n\r\n\r\ndef Train(epoch, dataloader, device, G, D, optimizer_G, optimizer_D):\r\n Tensor = torch.FloatTensor\r\n dim_latent = G.dim_latent\r\n adversarial_loss = torch.nn.BCELoss()\r\n\r\n for j in tqdm(range(epoch)):\r\n for _, (imgs, labels) in enumerate(dataloader):\r\n batch_size = imgs.size(0)\r\n \r\n # Adversarial ground truths\r\n y_valid = torch.ones(batch_size, 1).to(device)\r\n y_fake = torch.zeros(batch_size, 1).to(device)\r\n \r\n # Configure input\r\n real_imgs = imgs.type(Tensor).to(device)\r\n \r\n # Sample noise as generator input\r\n z = torch.rand(batch_size, dim_latent).to(device)\r\n\r\n # Generate a batch of images\r\n gen_imgs = G(z)\r\n\r\n # Train Discriminator\r\n optimizer_D.zero_grad()\r\n real_loss = adversarial_loss(D(real_imgs), y_valid)\r\n fake_loss = adversarial_loss(D(gen_imgs.detach()), y_fake)\r\n d_loss = real_loss + fake_loss\r\n d_loss.backward()\r\n optimizer_D.step()\r\n\r\n # Train Generator : \r\n optimizer_G.zero_grad()\r\n g_loss = adversarial_loss(D(gen_imgs), y_valid)\r\n g_loss.backward()\r\n optimizer_G.step()\r\n\r\n if (j+1)%5 == 0:\r\n print(f\"Epoch {j+1} / D loss: {d_loss.item():.4f} / G loss: {g_loss.item():.4f}\")\r\n " ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.rand", "numpy.reshape", "torch.nn.Sigmoid", "torch.nn.Sequential", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.ones", "numpy.prod", "torch.nn.BatchNorm1d", "torch.nn.BCELoss" ] ]
kevinyamauchi/skan
[ "df59b90279d14f31d767d00d74fcac2b763c6975" ]
[ "skan/csr.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom scipy import sparse, ndimage as ndi\nfrom scipy.sparse import csgraph\nfrom scipy import spatial\nimport numba\n\nfrom .nputil import pad, raveled_steps_to_neighbors\n\n\n## NBGraph and Numba-based implementation\n\ncsr_spec = [\n ('indptr', numba.int32[:]),\n ('indices', numba.int32[:]),\n ('data', numba.float64[:]),\n ('shape', numba.int32[:]),\n ('node_properties', numba.float64[:])\n]\n\[email protected](csr_spec)\nclass NBGraph:\n def __init__(self, indptr, indices, data, shape, node_props):\n self.indptr = indptr\n self.indices = indices\n self.data = data\n self.shape = shape\n self.node_properties = node_props\n\n def edge(self, i, j):\n return _csrget(self.indices, self.indptr, self.data, i, j)\n\n def neighbors(self, row):\n loc, stop = self.indptr[row], self.indptr[row+1]\n return self.indices[loc:stop]\n\n @property\n def has_node_props(self):\n return self.node_properties.strides != (0,)\n\n\ndef csr_to_nbgraph(csr, node_props=None):\n if node_props is None:\n node_props = np.broadcast_to(1., csr.shape[0])\n node_props.flags.writeable = True\n return NBGraph(csr.indptr, csr.indices, csr.data,\n np.array(csr.shape, dtype=np.int32), node_props)\n\n\ndef _pixel_graph(image, steps, distances, num_edges, height=None):\n row = np.empty(num_edges, dtype=int)\n col = np.empty(num_edges, dtype=int)\n data = np.empty(num_edges, dtype=float)\n if height is None:\n k = _write_pixel_graph(image, steps, distances, row, col, data)\n else:\n k = _write_pixel_graph_height(image, height, steps, distances,\n row, col, data)\n graph = sparse.coo_matrix((data[:k], (row[:k], col[:k]))).tocsr()\n return graph\n\n\[email protected](nopython=True, cache=True, nogil=True)\ndef _write_pixel_graph(image, steps, distances, row, col, data):\n \"\"\"Step over `image` to build a graph of nonzero pixel neighbors.\n\n Parameters\n ----------\n image : int array\n The input image.\n steps : int array, shape (N,)\n The raveled index steps to find a pixel's neighbors in `image`.\n distances : float array, shape (N,)\n The euclidean distance from a pixel to its corresponding\n neighbor in `steps`.\n row : int array\n Output array to be filled with the \"center\" pixel IDs.\n col : int array\n Output array to be filled with the \"neighbor\" pixel IDs.\n data : float array\n Output array to be filled with the distances from center to\n neighbor pixels.\n\n Returns\n -------\n k : int\n The number of entries written to row, col, and data.\n\n Notes\n -----\n No size or bounds checking is performed. Users should ensure that\n - No index in `indices` falls on any edge of `image` (or the\n neighbor computation will fail or segfault).\n - The `steps` and `distances` arrays have the same shape.\n - The `row`, `col`, `data` are long enough to hold all of the\n edges.\n \"\"\"\n image = image.ravel()\n n_neighbors = steps.size\n start_idx = np.max(steps)\n end_idx = image.size + np.min(steps)\n k = 0\n for i in range(start_idx, end_idx + 1):\n if image[i] != 0:\n for j in range(n_neighbors):\n n = steps[j] + i\n if image[n] != 0 and image[n] != image[i]:\n row[k] = image[i]\n col[k] = image[n]\n data[k] = distances[j]\n k += 1\n return k\n\[email protected](nopython=True, cache=True, nogil=True)\ndef _write_pixel_graph_height(image, height, steps, distances, row, col, data):\n \"\"\"Step over `image` to build a graph of nonzero pixel neighbors.\n\n Parameters\n ----------\n image : int array\n The input image.\n height : float array, same shape as `image`\n This is taken to be a height map along an additional\n dimension (in addition to the image dimensions), so the distance\n between two neighbors `i` and `n` separated by `j` is given by:\n\n `np.sqrt(distances[j]**2 + (height[i] - height[n])**2)`\n\n steps : int array, shape (N,)\n The raveled index steps to find a pixel's neighbors in `image`.\n distances : float array, shape (N,)\n The euclidean distance from a pixel to its corresponding\n neighbor in `steps`.\n row : int array\n Output array to be filled with the \"center\" pixel IDs.\n col : int array\n Output array to be filled with the \"neighbor\" pixel IDs.\n data : float array\n Output array to be filled with the distances from center to\n neighbor pixels.\n\n Returns\n -------\n k : int\n The number of entries written to row, col, and data.\n\n Notes\n -----\n No size or bounds checking is performed. Users should ensure that\n - No index in `indices` falls on any edge of `image` (or the\n neighbor computation will fail or segfault).\n - The `steps` and `distances` arrays have the same shape.\n - The `row`, `col`, `data` are long enough to hold all of the\n edges.\n \"\"\"\n image = image.ravel()\n height = height.ravel()\n n_neighbors = steps.size\n start_idx = np.max(steps)\n end_idx = image.size + np.min(steps)\n k = 0\n for i in range(start_idx, end_idx + 1):\n if image[i] != 0:\n for j in range(n_neighbors):\n n = steps[j] + i\n if image[n] != 0 and image[n] != image[i]:\n row[k] = image[i]\n col[k] = image[n]\n data[k] = np.sqrt(distances[j] ** 2 +\n (height[i] - height[n]) ** 2)\n k += 1\n return k\n\n\[email protected](nopython=True, cache=False) # change this to True with Numba 1.0\ndef _build_paths(jgraph, indptr, indices, path_data, visited, degrees):\n indptr_i = 0\n indices_j = 0\n # first, process all nodes in a path to an endpoint or junction\n for node in range(1, jgraph.shape[0]):\n if degrees[node] > 2 or degrees[node] == 1 and not visited[node]:\n for neighbor in jgraph.neighbors(node):\n if not visited[neighbor]:\n n_steps = _walk_path(jgraph, node, neighbor, visited,\n degrees, indices, path_data,\n indices_j)\n visited[node] = True\n indptr[indptr_i + 1] = indptr[indptr_i] + n_steps\n indptr_i += 1\n indices_j += n_steps\n # everything else is by definition in isolated cycles\n for node in range(1, jgraph.shape[0]):\n if degrees[node] > 0:\n if not visited[node]:\n visited[node] = True\n neighbor = jgraph.neighbors(node)[0]\n n_steps = _walk_path(jgraph, node, neighbor, visited, degrees,\n indices, path_data, indices_j)\n indptr[indptr_i + 1] = indptr[indptr_i] + n_steps\n indptr_i += 1\n indices_j += n_steps\n return indptr_i + 1, indices_j\n\n\[email protected](nopython=True, cache=False) # change this to True with Numba 1.0\ndef _walk_path(jgraph, node, neighbor, visited, degrees, indices, path_data,\n startj):\n indices[startj] = node\n path_data[startj] = jgraph.node_properties[node]\n j = startj + 1\n while degrees[neighbor] == 2 and not visited[neighbor]:\n indices[j] = neighbor\n path_data[j] = jgraph.node_properties[neighbor]\n n1, n2 = jgraph.neighbors(neighbor)\n nextneighbor = n1 if n1 != node else n2\n node, neighbor = neighbor, nextneighbor\n visited[node] = True\n j += 1\n indices[j] = neighbor\n path_data[j] = jgraph.node_properties[neighbor]\n visited[neighbor] = True\n return j - startj + 1\n\n\ndef _build_skeleton_path_graph(graph, *, _buffer_size_offset=None):\n if _buffer_size_offset is None:\n max_num_cycles = graph.indices.size // 4\n _buffer_size_offset = max_num_cycles\n degrees = np.diff(graph.indptr)\n visited = np.zeros(degrees.shape, dtype=bool)\n endpoints = (degrees != 2)\n endpoint_degrees = degrees[endpoints]\n num_paths = np.sum(endpoint_degrees)\n path_indptr = np.zeros(num_paths + _buffer_size_offset, dtype=int)\n # the number of points that we need to save to store all skeleton\n # paths is equal to the number of pixels plus the sum of endpoint\n # degrees minus one (since the endpoints will have been counted once\n # already in the number of pixels) *plus* the number of isolated\n # cycles (since each cycle has one index repeated). We don't know\n # the number of cycles ahead of time, but it is bounded by one quarter\n # of the number of points.\n n_points = (graph.indices.size + np.sum(endpoint_degrees - 1) +\n _buffer_size_offset)\n path_indices = np.zeros(n_points, dtype=int)\n path_data = np.zeros(path_indices.shape, dtype=float)\n m, n = _build_paths(graph, path_indptr, path_indices, path_data,\n visited, degrees)\n paths = sparse.csr_matrix((path_data[:n], path_indices[:n],\n path_indptr[:m]), shape=(m-1, n))\n return paths\n\n\nclass Skeleton:\n \"\"\"Object to group together all the properties of a skeleton.\n\n In the text below, we use the following notation:\n\n - N: the number of points in the pixel skeleton,\n - ndim: the dimensionality of the skeleton\n - P: the number of paths in the skeleton (also the number of links in the\n junction graph).\n - J: the number of junction nodes\n - Sd: the sum of the degrees of all the junction nodes\n - [Nt], [Np], Nr, Nc: the dimensions of the source image\n\n Parameters\n ----------\n skeleton_image : array\n The input skeleton (1-pixel/voxel thick skeleton, all other values 0).\n\n Other Parameters\n ----------------\n spacing : float or array of float, shape ``(ndim,)``\n The scale of the pixel spacing along each axis.\n source_image : array of float, same shape as `skeleton_image`\n The image that `skeleton_image` represents / summarizes / was generated\n from. This is used to produce visualizations as well as statistical\n properties of paths.\n keep_images : bool\n Whether or not to keep the original input images. These can be useful\n for visualization, but they may take up a lot of memory.\n unique_junctions : bool, optional\n If True, adjacent junction nodes get collapsed into a single\n conceptual node, with position at the centroid of all the connected\n initial nodes.\n\n Attributes\n ----------\n graph : scipy.sparse.csr_matrix, shape (N + 1, N + 1)\n The skeleton pixel graph, where each node is a non-zero pixel in the\n input image, and each edge connects adjacent pixels. The graph is\n represented as an adjacency matrix in SciPy sparse matrix format. For\n more information see the ``scipy.sparse`` documentation as well as\n ``scipy.sparse.csgraph``. Note: pixel numbering starts at 1, so the\n shape of this matrix is ``(N + 1, N + 1)`` instead of ``(N, N)``.\n nbgraph : NBGraph\n A thin Numba wrapper around the ``csr_matrix`` format, this provides\n faster graph methods. For example, it is much faster to get a list of\n neighbors, or test for the presence of a specific edge.\n coordinates : array, shape (N, ndim)\n The image coordinates of each pixel in the skeleton.\n paths : scipy.sparse.csr_matrix, shape (P, N + 1)\n A csr_matrix where element [i, j] is on if node j is in path i. This\n includes path endpoints. The number of nonzero elements is N - J + Sd.\n n_paths : int\n The number of paths, P. This is redundant information given `n_paths`,\n but it is used often enough that it is worth keeping around.\n distances : array of float, shape (P,)\n The distance of each path. Note: not initialized until `path_lengths()`\n is called on the skeleton; use path_lengths() instead\n skeleton_image : array or None\n The input skeleton image. Only present if `keep_images` is True. Set to\n False to preserve memory.\n source_image : array or None\n The image from which the skeleton was derived. Only present if\n `keep_images` is True. This is useful for visualization.\n \"\"\"\n def __init__(self, skeleton_image, *, spacing=1, source_image=None,\n _buffer_size_offset=None, keep_images=True,\n unique_junctions=True):\n graph, coords, degrees = skeleton_to_csgraph(skeleton_image,\n spacing=spacing,\n unique_junctions=unique_junctions)\n if np.issubdtype(skeleton_image.dtype, np.float_):\n pixel_values = ndi.map_coordinates(skeleton_image, coords.T,\n order=3)\n else:\n pixel_values = None\n self.graph = graph\n self.nbgraph = csr_to_nbgraph(graph, pixel_values)\n self.coordinates = coords\n self.paths = _build_skeleton_path_graph(self.nbgraph,\n _buffer_size_offset=_buffer_size_offset)\n self.n_paths = self.paths.shape[0]\n self.distances = np.empty(self.n_paths, dtype=float)\n self._distances_initialized = False\n self.skeleton_image = None\n self.source_image = None\n self.degrees_image = degrees\n self.degrees = np.diff(self.graph.indptr)\n self.spacing = (np.asarray(spacing) if not np.isscalar(spacing)\n else np.full(skeleton_image.ndim, spacing))\n if keep_images:\n self.skeleton_image = skeleton_image\n self.source_image = source_image\n\n def path(self, index):\n \"\"\"Return the pixel indices of path number `index`.\n\n Parameters\n ----------\n index : int\n The desired path.\n\n Returns\n -------\n path : array of int\n The indices of the pixels belonging to the path, including\n endpoints.\n \"\"\"\n # The below is equivalent to `self.paths[index].indices`, which is much\n # more elegant. However the below version is about 25x faster!\n # In [14]: %timeit mat[1].indices\n # 128 µs ± 421 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n # In [16]: %%timeit\n # ...: start, stop = mat.indptr[1:3]\n # ...: mat.indices[start:stop]\n # ...:\n # 5.05 µs ± 77.2 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n start, stop = self.paths.indptr[index:index+2]\n return self.paths.indices[start:stop]\n\n def path_coordinates(self, index):\n \"\"\"Return the image coordinates of the pixels in the path.\n\n Parameters\n ----------\n index : int\n The desired path.\n\n Returns\n -------\n path_coords : array of float\n The (image) coordinates of points on the path, including endpoints.\n \"\"\"\n path_indices = self.path(index)\n return self.coordinates[path_indices]\n\n def path_with_data(self, index):\n \"\"\"Return pixel indices and corresponding pixel values on a path.\n\n Parameters\n ----------\n index : int\n The desired path.\n\n Returns\n -------\n path : array of int\n The indices of pixels on the path, including endpoints.\n data : array of float\n The values of pixels on the path.\n \"\"\"\n start, stop = self.paths.indptr[index:index+2]\n return self.paths.indices[start:stop], self.paths.data[start:stop]\n\n def path_lengths(self):\n \"\"\"Return the length of each path on the skeleton.\n\n Returns\n -------\n lengths : array of float\n The length of all the paths in the skeleton.\n \"\"\"\n if not self._distances_initialized:\n _compute_distances(self.nbgraph, self.paths.indptr,\n self.paths.indices, self.distances)\n self._distances_initialized = True\n return self.distances\n\n def paths_list(self):\n \"\"\"List all the paths in the skeleton, including endpoints.\n\n Returns\n -------\n paths : list of array of int\n The list containing all the paths in the skeleton.\n \"\"\"\n return [list(self.path(i)) for i in range(self.n_paths)]\n\n def path_means(self):\n \"\"\"Compute the mean pixel value along each path.\n\n Returns\n -------\n means : array of float\n The average pixel value along each path in the skeleton.\n \"\"\"\n sums = np.add.reduceat(self.paths.data, self.paths.indptr[:-1])\n lengths = np.diff(self.paths.indptr)\n return sums / lengths\n\n def path_stdev(self):\n \"\"\"Compute the standard deviation of values along each path.\n\n Returns\n -------\n stdevs : array of float\n The standard deviation of pixel values along each path.\n \"\"\"\n data = self.paths.data\n sumsq = np.add.reduceat(data * data, self.paths.indptr[:-1])\n lengths = np.diff(self.paths.indptr)\n means = self.path_means()\n return np.sqrt(np.clip(sumsq/lengths - means*means, 0, None))\n\n\ndef summarize(skel: Skeleton):\n \"\"\"Compute statistics for every skeleton and branch in ``skel``.\n\n Parameters\n ----------\n skel : skan.csr.Skeleton\n A Skeleton object.\n\n Returns\n -------\n summary : pandas.DataFrame\n A summary of the branches including branch length, mean branch value,\n branch euclidean distance, etc.\n \"\"\"\n summary = {}\n ndim = skel.coordinates.shape[1]\n _, skeleton_ids = csgraph.connected_components(skel.graph,\n directed=False)\n endpoints_src = skel.paths.indices[skel.paths.indptr[:-1]]\n endpoints_dst = skel.paths.indices[skel.paths.indptr[1:] - 1]\n summary['skeleton-id'] = skeleton_ids[endpoints_src]\n summary['node-id-src'] = endpoints_src\n summary['node-id-dst'] = endpoints_dst\n summary['branch-distance'] = skel.path_lengths()\n deg_src = skel.degrees[endpoints_src]\n deg_dst = skel.degrees[endpoints_dst]\n kind = np.full(deg_src.shape, 2) # default: junction-to-junction\n kind[(deg_src == 1) | (deg_dst == 1)] = 1 # tip-junction\n kind[(deg_src == 1) & (deg_dst == 1)] = 0 # tip-tip\n kind[endpoints_src == endpoints_dst] = 3 # cycle\n summary['branch-type'] = kind\n summary['mean-pixel-value'] = skel.path_means()\n summary['stdev-pixel-value'] = skel.path_stdev()\n for i in range(ndim): # keep loops separate for best insertion order\n summary[f'image-coord-src-{i}'] = skel.coordinates[endpoints_src, i]\n for i in range(ndim):\n summary[f'image-coord-dst-{i}'] = skel.coordinates[endpoints_dst, i]\n coords_real_src = skel.coordinates[endpoints_src] * skel.spacing\n for i in range(ndim):\n summary[f'coord-src-{i}'] = coords_real_src[:, i]\n coords_real_dst = skel.coordinates[endpoints_dst] * skel.spacing\n for i in range(ndim):\n summary[f'coord-dst-{i}'] = coords_real_dst[:, i]\n summary['euclidean-distance'] = (\n np.sqrt((coords_real_dst - coords_real_src)**2 @ np.ones(ndim))\n )\n df = pd.DataFrame(summary)\n return df\n\n\[email protected](nopython=True, nogil=True, cache=False) # cache with Numba 1.0\ndef _compute_distances(graph, path_indptr, path_indices, distances):\n for i in range(len(distances)):\n start, stop = path_indptr[i:i+2]\n path = path_indices[start:stop]\n distances[i] = _path_distance(graph, path)\n\n\[email protected](nopython=True, nogil=True, cache=False) # cache with Numba 1.0\ndef _path_distance(graph, path):\n d = 0.\n n = len(path)\n for i in range(n - 1):\n u, v = path[i], path[i+1]\n d += graph.edge(u, v)\n return d\n\n\ndef _uniquify_junctions(csmat, pixel_indices, junction_labels,\n junction_centroids, *, spacing=1):\n \"\"\"Replace clustered pixels with degree > 2 by a single \"floating\" pixel.\n\n Parameters\n ----------\n csmat : NBGraph\n The input graph.\n pixel_indices : array of int\n The raveled index in the image of every pixel represented in csmat.\n spacing : float, or array-like of float, shape `len(shape)`, optional\n The spacing between pixels in the source image along each dimension.\n\n Returns\n -------\n final_graph : NBGraph\n The output csmat.\n \"\"\"\n junctions = np.unique(junction_labels)[1:] # discard 0, background\n junction_centroids_real = junction_centroids * spacing\n for j, jloc in zip(junctions, junction_centroids_real):\n loc, stop = csmat.indptr[j], csmat.indptr[j+1]\n neighbors = csmat.indices[loc:stop]\n neighbor_locations = pixel_indices[neighbors]\n neighbor_locations *= spacing\n distances = np.sqrt(np.sum((neighbor_locations - jloc)**2, axis=1))\n csmat.data[loc:stop] = distances\n tdata = csmat.T.tocsr().data\n csmat.data = np.maximum(csmat.data, tdata)\n\n\ndef skeleton_to_csgraph(skel, *, spacing=1, value_is_height=False,\n unique_junctions=True):\n \"\"\"Convert a skeleton image of thin lines to a graph of neighbor pixels.\n\n Parameters\n ----------\n skel : array\n An input image in which every nonzero pixel is considered part of\n the skeleton, and links between pixels are determined by a full\n n-dimensional neighborhood.\n spacing : float, or array-like of float, shape `(skel.ndim,)`\n A value indicating the distance between adjacent pixels. This can\n either be a single value if the data has the same resolution along\n all axes, or it can be an array of the same shape as `skel` to\n indicate spacing along each axis.\n\n Other Parameters\n ----------------\n value_is_height : bool, optional\n If `True`, the pixel value at each point of the skeleton will be\n considered to be a height measurement, and this height will be\n incorporated into skeleton branch lengths. Used for analysis of\n atomic force microscopy (AFM) images.\n unique_junctions : bool, optional\n If True, adjacent junction nodes get collapsed into a single\n conceptual node, with position at the centroid of all the connected\n initial nodes.\n\n Returns\n -------\n graph : sparse.csr_matrix\n A graph of shape (Nnz + 1, Nnz + 1), where Nnz is the number of\n nonzero pixels in `skel`. The value graph[i, j] is the distance\n between adjacent pixels i and j. In a 2D image, that would be\n 1 for immediately adjacent pixels and sqrt(2) for diagonally\n adjacent ones.\n pixel_coordinates : array of float\n An array of shape (Nnz + 1, skel.ndim), mapping indices in `graph`\n to pixel coordinates in `degree_image` or `skel`. Array entry\n (0,:) contains currently always zeros to index the pixels, which\n start at 1, directly to the coordinates.\n degree_image : array of int, same shape as skel\n An image where each pixel value contains the degree of its\n corresponding node in `graph`. This is useful to classify nodes.\n \"\"\"\n height = pad(skel, 0.) if value_is_height else None\n # ensure we have a bool image, since we later use it for bool indexing\n skel = skel.astype(bool)\n ndim = skel.ndim\n spacing = np.ones(ndim, dtype=float) * spacing\n\n pixel_indices = np.concatenate(([[0.] * ndim],\n np.transpose(np.nonzero(skel))), axis=0)\n skelint = np.zeros(skel.shape, dtype=int)\n skelint[tuple(pixel_indices[1:].T.astype(int))] = \\\n np.arange(pixel_indices.shape[0])[1:]\n\n degree_kernel = np.ones((3,) * ndim)\n degree_kernel[(1,) * ndim] = 0 # remove centre pixel\n degree_image = ndi.convolve(skel.astype(int), degree_kernel,\n mode='constant') * skel\n\n if unique_junctions:\n # group all connected junction nodes into \"meganodes\".\n junctions = degree_image > 2\n junction_ids = skelint[junctions]\n labeled_junctions, centroids = compute_centroids(junctions)\n labeled_junctions[junctions] = \\\n junction_ids[labeled_junctions[junctions] - 1]\n skelint[junctions] = labeled_junctions[junctions]\n pixel_indices[np.unique(labeled_junctions)[1:]] = centroids\n\n num_edges = np.sum(degree_image) # *2, which is how many we need to store\n skelint = pad(skelint, 0) # pad image to prevent looparound errors\n steps, distances = raveled_steps_to_neighbors(skelint.shape, ndim,\n spacing=spacing)\n graph = _pixel_graph(skelint, steps, distances, num_edges, height)\n\n if unique_junctions:\n _uniquify_junctions(graph, pixel_indices,\n labeled_junctions, centroids, spacing=spacing)\n return graph, pixel_indices, degree_image\n\n\[email protected](nopython=True, cache=True)\ndef _csrget(indices, indptr, data, row, col):\n \"\"\"Fast lookup of value in a scipy.sparse.csr_matrix format table.\n\n Parameters\n ----------\n indices, indptr, data : numpy arrays of int, int, float\n The CSR format data.\n row, col : int\n The matrix coordinates of the desired value.\n\n Returns\n -------\n dat: float\n The data value in the matrix.\n \"\"\"\n start, end = indptr[row], indptr[row+1]\n for i in range(start, end):\n if indices[i] == col:\n return data[i]\n return 0.\n\n\[email protected](nopython=True)\ndef _expand_path(graph, source, step, visited, degrees):\n \"\"\"Walk a path on a graph until reaching a tip or junction.\n\n A path is a sequence of degree-2 nodes.\n\n Parameters\n ----------\n graph : NBGraph\n A graph encoded identically to a SciPy sparse compressed sparse\n row matrix. See the documentation of `NBGraph` for details.\n source : int\n The starting point of the walk. This must be a path node, or\n the function's behaviour is undefined.\n step : int\n The initial direction of the walk. Must be a neighbor of\n `source`.\n visited : array of bool\n An array mapping node ids to `False` (unvisited node) or `True`\n (previously visited node).\n degrees : array of int\n An array mapping node ids to their degrees in `graph`.\n\n Returns\n -------\n dest : int\n The tip or junction node at the end of the path.\n d : float\n The distance travelled from `source` to `dest`.\n n : int\n The number of pixels along the path followed (excluding the source).\n s : float\n The sum of the pixel values along the path followed (also excluding\n the source).\n deg : int\n The degree of `dest`.\n \"\"\"\n d = graph.edge(source, step)\n s = 0.\n n = 0\n while degrees[step] == 2 and not visited[step]:\n n1, n2 = graph.neighbors(step)\n nextstep = n1 if n1 != source else n2\n source, step = step, nextstep\n d += graph.edge(source, step)\n visited[source] = True\n s += graph.node_properties[source]\n n += 1\n visited[step] = True\n return step, d, n, s, degrees[step]\n\n\[email protected](nopython=True, nogil=True)\ndef _branch_statistics_loop(jgraph, degrees, visited, result):\n num_results = 0\n for node in range(1, jgraph.shape[0]):\n if not visited[node]:\n if degrees[node] == 2:\n visited[node] = True\n left, right = jgraph.neighbors(node)\n id0, d0, n0, s0, deg0 = _expand_path(jgraph, node, left,\n visited, degrees)\n if id0 == node: # standalone cycle\n id1, d1, n1, s1, deg1 = node, 0, 0, 0., 2\n kind = 3\n else:\n id1, d1, n1, s1, deg1 = _expand_path(jgraph, node, right,\n visited, degrees)\n kind = 2 # default: junction-to-junction\n if deg0 == 1 and deg1 == 1: # tip-tip\n kind = 0\n elif deg0 == 1 or deg1 == 1: # tip-junct, tip-path impossible\n kind = 1\n counts = n0 + n1 + 1\n values = s0 + s1 + jgraph.node_properties[node]\n result[num_results, :] = (float(id0), float(id1), d0 + d1,\n float(kind), values / counts)\n num_results += 1\n elif degrees[node] == 1:\n visited[node] = True\n neighbor = jgraph.neighbors(node)[0]\n id0, d0, n0, s0, deg0 = _expand_path(jgraph, node, neighbor,\n visited, degrees)\n kind = 1 if deg0 > 2 else 0 # tip-junct / tip-tip\n counts = n0\n values = s0\n avg_value = np.nan if counts == 0 else values / counts\n result[num_results, :] = (float(node), float(id0), d0,\n float(kind), avg_value)\n num_results += 1\n return num_results\n\n\ndef branch_statistics(graph, pixel_values=None, *,\n buffer_size_offset=0):\n \"\"\"Compute the length and type of each branch in a skeleton graph.\n\n Parameters\n ----------\n graph : sparse.csr_matrix, shape (N, N)\n A skeleton graph.\n pixel_values : array of float, shape (N,)\n A value for each pixel in the graph. Used to compute total\n intensity statistics along each branch.\n buffer_size_offset : int, optional\n The buffer size is given by the sum of the degrees of non-path\n nodes. This is usually 2x the amount needed, allowing room for\n extra cycles of path-only nodes. However, if the image consists\n *only* of such cycles, the buffer size will be 0, resulting in\n an error. Until a more sophisticated, expandable-buffer\n solution is implemented, you can manually set a bigger buffer\n size using this parameter.\n\n Returns\n -------\n branches : array of float, shape (N, {4, 5})\n An array containing branch endpoint IDs, length, and branch type.\n The types are:\n - tip-tip (0)\n - tip-junction (1)\n - junction-junction (2)\n - path-path (3) (This can only be a standalone cycle)\n Optionally, the last column contains the average pixel value\n along each branch (not including the endpoints).\n \"\"\"\n jgraph = csr_to_nbgraph(graph, pixel_values)\n degrees = np.diff(graph.indptr)\n visited = np.zeros(degrees.shape, dtype=bool)\n endpoints = (degrees != 2)\n num_paths = np.sum(degrees[endpoints])\n result = np.zeros((num_paths + buffer_size_offset, 5), dtype=float)\n num_results = _branch_statistics_loop(jgraph, degrees, visited, result)\n num_columns = 5 if jgraph.has_node_props else 4\n return result[:num_results, :num_columns]\n\n\ndef submatrix(M, idxs):\n \"\"\"Return the outer-index product submatrix, `M[idxs, :][:, idxs]`.\n\n Parameters\n ----------\n M : scipy.sparse.spmatrix\n Input (square) matrix\n idxs : array of int\n The indices to subset. No index in `idxs` should exceed the\n number of rows of `M`.\n\n Returns\n -------\n Msub : scipy.sparse.spmatrix\n The subsetted matrix.\n\n Examples\n --------\n >>> Md = np.arange(16).reshape((4, 4))\n >>> M = sparse.csr_matrix(Md)\n >>> print(submatrix(M, [0, 2]).toarray())\n [[ 0 2]\n [ 8 10]]\n \"\"\"\n Msub = M[idxs, :][:, idxs]\n return Msub\n\n\ndef summarise(image, *, spacing=1, using_height=False):\n \"\"\"Compute statistics for every disjoint skeleton in `image`.\n\n **Note: this function is deprecated. Prefer** :func:`.summarize`.\n\n Parameters\n ----------\n image : array, shape (M, N, ..., P)\n N-dimensional array, where nonzero entries correspond to an\n object's single-pixel-wide skeleton. If the image is of type 'float',\n the values are taken to be the height at that pixel, which is used\n to compute the skeleton distances.\n spacing : float, or array-like of float, shape `(skel.ndim,)`\n A value indicating the distance between adjacent pixels. This can\n either be a single value if the data has the same resolution along\n all axes, or it can be an array of the same shape as `skel` to\n indicate spacing along each axis.\n using_height : bool, optional\n If `True`, the pixel value at each point of the skeleton will be\n considered to be a height measurement, and this height will be\n incorporated into skeleton branch lengths, endpoint coordinates,\n and euclidean distances. Used for analysis of atomic force\n microscopy (AFM) images.\n\n Returns\n -------\n df : pandas DataFrame\n A data frame summarising the statistics of the skeletons in\n `image`.\n \"\"\"\n ndim = image.ndim\n spacing = np.ones(ndim, dtype=float) * spacing\n g, coords_img, degrees = skeleton_to_csgraph(image, spacing=spacing,\n value_is_height=using_height)\n num_skeletons, skeleton_ids = csgraph.connected_components(g,\n directed=False)\n if np.issubdtype(image.dtype, np.float_) and not using_height:\n pixel_values = ndi.map_coordinates(image, coords_img.T, order=3)\n value_columns = ['mean pixel value']\n value_column_types = [float]\n else:\n pixel_values = None\n value_columns = []\n value_column_types = []\n stats = branch_statistics(g, pixel_values)\n indices0 = stats[:, 0].astype(int)\n indices1 = stats[:, 1].astype(int)\n coords_img0 = coords_img[indices0]\n coords_img1 = coords_img[indices1]\n coords_real0 = coords_img0 * spacing\n coords_real1 = coords_img1 * spacing\n if using_height:\n height_coords0 = ndi.map_coordinates(image, coords_img0.T, order=3)\n coords_real0 = np.column_stack((height_coords0, coords_real0))\n height_coords1 = ndi.map_coordinates(image, coords_img1.T, order=3)\n coords_real1 = np.column_stack((height_coords1, coords_real1))\n distances = np.sqrt(np.sum((coords_real0 - coords_real1)**2, axis=1))\n skeleton_id = skeleton_ids[stats[:, 0].astype(int)]\n table = np.column_stack((skeleton_id, stats, coords_img0, coords_img1,\n coords_real0, coords_real1, distances))\n height_ndim = ndim if not using_height else (ndim + 1)\n columns = (['skeleton-id', 'node-id-0', 'node-id-1', 'branch-distance',\n 'branch-type'] +\n value_columns +\n ['image-coord-src-%i' % i for i in range(ndim)] +\n ['image-coord-dst-%i' % i for i in range(ndim)] +\n ['coord-src-%i' % i for i in range(height_ndim)] +\n ['coord-dst-%i' % i for i in range(height_ndim)] +\n ['euclidean-distance'])\n column_types = ([int, int, int, float, int] + value_column_types +\n 2 * ndim * [int] +\n 2 * height_ndim * [float] +\n [float])\n data_dict = {col: dat.astype(dtype)\n for col, dat, dtype in zip(columns, table.T, column_types)}\n df = pd.DataFrame(data_dict)\n return df\n\n\ndef compute_centroids(image):\n \"\"\"Find the centroids of all nonzero connected blobs in `image`.\n\n Parameters\n ----------\n image : ndarray\n The input image.\n\n Returns\n -------\n label_image : ndarray of int\n The input image, with each connected region containing a different\n integer label.\n\n Examples\n --------\n >>> image = np.array([[1, 0, 1, 0, 0, 1, 1],\n ... [1, 0, 0, 1, 0, 0, 0]])\n >>> labels, centroids = compute_centroids(image)\n >>> print(labels)\n [[1 0 2 0 0 3 3]\n [1 0 0 2 0 0 0]]\n >>> centroids\n array([[0.5, 0. ],\n [0.5, 2.5],\n [0. , 5.5]])\n \"\"\"\n connectivity = np.ones((3,) * image.ndim)\n labeled_image = ndi.label(image, connectivity)[0]\n nz = np.nonzero(labeled_image)\n nzpix = labeled_image[nz]\n sizes = np.bincount(nzpix)\n coords = np.transpose(nz)\n grouping = np.argsort(nzpix)\n sums = np.add.reduceat(coords[grouping], np.cumsum(sizes)[:-1])\n means = sums / sizes[1:, np.newaxis]\n return labeled_image, means\n" ]
[ [ "numpy.add.reduceat", "scipy.sparse.csgraph.connected_components", "numpy.min", "numpy.cumsum", "numpy.issubdtype", "numpy.broadcast_to", "numpy.max", "numpy.full", "numpy.bincount", "numpy.empty", "pandas.DataFrame", "scipy.ndimage.map_coordinates", "numpy.nonzero", "numpy.transpose", "numpy.arange", "numpy.sqrt", "numpy.column_stack", "scipy.sparse.csr_matrix", "scipy.sparse.coo_matrix", "numpy.array", "numpy.zeros", "numpy.diff", "numpy.isscalar", "numpy.argsort", "numpy.clip", "numpy.asarray", "numpy.sum", "scipy.ndimage.label", "numpy.ones", "numpy.unique", "numpy.maximum" ] ]
lgbouma/tessmaps
[ "569bbc24f2fb9b00c537329a6cb919200dd258f3" ]
[ "src/tessmaps.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nfrom tessmaps import make_rect_map\nmake_rect_map(sector_number, coords, names=names\n annotate_bools=annotate_bools)\n'''\nfrom __future__ import division, print_function\n\nimport os, argparse\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd, numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n\ndef _get_TIC_coords_count(sector_number):\n\n # if we are running as a package, then __location__ is the header\n # directory. if we're running a shell script from source, we need a\n # different relative path.\n try:\n ticpath = os.path.join(__location__, 'data',\n 'TIC71_prioritycut_tess_sectors.csv')\n df = pd.read_csv(ticpath)\n except FileNotFoundError:\n try:\n ticpath = os.path.join(__location__.replace('src', 'data'),\n 'TIC71_prioritycut_tess_sectors.csv')\n df = pd.read_csv(ticpath)\n except FileNotFoundError:\n raise FileNotFoundError(\n 'poorly defined TIC path \\n{:s}'.format(ticpath))\n\n ra, dec = np.array(df['RA']), np.array(df['DEC'])\n elon, elat = np.array(df['ECLONG']), np.array(df['ECLAT'])\n totsectors = np.array(df['total_sectors_obsd'])\n this_sector = np.array(df['sector_{:d}'.format(sector_number)])\n sel = this_sector > 0\n\n return elon[sel], elat[sel], totsectors[sel]\n\n\ndef make_rect_map(sector_number, coords,\n names=None, annotate_bools=None,\n title=None, bkgnd_cmap='Paired',\n savname='tess_rectmap_TEMP.png', savdir='../results/',\n plotknownpoints=False, ms=10):\n '''\n Make a polar sky map of what TESS looks at in a given sector. The\n background is the TESS footprint for the sector; overplotted are any\n targets of interest, passed through as RAs, decs, and optionally names.\n\n NB. the TESS footprint, and the \"on silicon\" calculator differ slightly\n from Mr Tommy B's.\n\n args:\n sector_number: int (0-12) of TESS observing sector\n coords: np arrays of astroy SkyCoords you want to overplot against the\n TESS footprint. (decimal degrees)\n optional kwargs:\n names: names of the coordinates. especially useful if there are some\n you want annotate, using\n annotate_bools: np array with same length as elon/elat/names, with\n boolean T/F values for whether you want those elons, elats, and names\n to be annotated if they do fall on silicon. They will also get a\n highlighted color (green, instead of orange).\n plotknownpoints (bool): option if you want to see locations of\n (elon,elat) = (-15,-50),(15,-45),(-30,-15),(3,-80),(130,-80) on\n map.\n bkgnd_cmap: 'Paired' or 'Blues'.\n ms: marker size for overplotted points.\n '''\n ticelon, ticelat, tictotsectors = _get_TIC_coords_count(sector_number)\n\n import cartopy.crs as ccrs\n\n fig = plt.figure(figsize=(4,4))\n center_long = 0\n proj = ccrs.SouthPolarStereo(central_longitude=center_long,\n true_scale_latitude=True)\n ax = fig.add_subplot(1, 1, 1, projection=proj)\n\n # the map size is set by where the scatter points are. (but try anyway).\n minlat, maxlat = -90, -10\n ax.set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree())\n\n if plotknownpoints:\n ksize = 30\n klon = np.array([-15,15,-30,3,130])\n klat = np.array([-50, -45, -15, -80,-80])\n knsectors = np.array([5,2,3,4,12])\n\n # make colormap\n import seaborn as sns\n if bkgnd_cmap=='Paired':\n rgbs = sns.color_palette(bkgnd_cmap, n_colors=13, desat=0.9)\n cbarbounds = list(np.arange(0.5,14.5,1))\n bounds= list(np.arange(0.5,14.5,1))\n elif bkgnd_cmap=='Blues':\n rgbs = sns.color_palette(bkgnd_cmap, n_colors=10, desat=1.)\n rgbs = rgbs[3:]\n cbarbounds = list(np.arange(0.5,14.5,1))\n bounds= list(np.arange(0.5,4.5,1))\n else:\n raise NotImplementedError\n cmap = mpl.colors.ListedColormap(rgbs)\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n\n if plotknownpoints:\n cax = ax.scatter(klon, klat, c=knsectors, s=ksize, lw=0, zorder=2,\n cmap=cmap, norm=norm, rasterized=True,\n transform=ccrs.PlateCarree())\n\n # plot the TIC stars in this sector\n ticelon[ticelon>180] -= 360\n cax = ax.scatter(ticelon, ticelat, c=tictotsectors, s=0.25, lw=0, zorder=2,\n cmap=cmap, norm=norm, rasterized=True,\n transform=ccrs.PlateCarree())\n\n try:\n # a somewhat hacky relative import solution\n from . import get_time_on_silicon\n except ImportError:\n try:\n import get_time_on_silicon\n except:\n raise ImportError\n\n # get sectors where passed coords are observed\n df = get_time_on_silicon.get_time_on_silicon(coords)\n\n sel = df['total_sectors_obsd']>0\n sel &= df['sector_{:d}'.format(sector_number)]>0\n\n # plot the positions of passed coords\n elon, elat = df['elon'], df['elat']\n elon[elon>180] -= 360\n\n if len(elon[sel]) == 0:\n print('did not get any objects on silicon. skipping map-making!')\n return 0\n\n _ = ax.scatter(elon[sel], elat[sel], c='darkorange', s=ms, lw=0, zorder=4,\n rasterized=True, transform=ccrs.PlateCarree())\n _ = ax.scatter(elon, elat, c='lightgray', s=1, lw=0, zorder=3,\n rasterized=True, transform=ccrs.PlateCarree())\n\n if type(annotate_bools)==np.ndarray:\n\n # first, highlight annotated coords\n sel &= annotate_bools\n _ = ax.scatter(elon[sel], elat[sel], c='lime', s=ms, lw=0,\n zorder=5, rasterized=True,\n transform=ccrs.PlateCarree())\n\n subsel = sel\n # calculate the positions of annotation labels\n middle_elon = np.mean(ticelon[np.abs(ticelat)<45])\n diff = 45\n elon_start= np.remainder(middle_elon + diff, 360)\n elon_stop = np.remainder(middle_elon - diff, 360)\n text_elon = np.linspace(elon_start,elon_stop,len(elat[subsel]))\n text_elat = -20*np.ones_like(text_elon)\n if sector_number in [4,5,6,7,9,10,11,12]:\n text_elon = np.remainder(\n np.linspace(elon_start,elon_stop+360,len(elat[subsel])),\n 360)\n\n transform = ccrs.PlateCarree()._as_mpl_transform(ax)\n arrowprops = dict(facecolor='gray', edgecolor='gray', arrowstyle='->',\n linewidth=0.5, connectionstyle='arc3,rad=-0.05')\n bbox = dict(facecolor='white',edgecolor='gray',\n alpha=0.95,linewidth=0.5,pad=0.2)\n\n for ix, sname, alon, alat in list(\n zip(range(len(names[subsel])), names[subsel],\n elon[subsel], elat[subsel])):\n\n ax.annotate(sname, xy=(alon,alat), xycoords=transform,\n xytext=(text_elon[ix], text_elat[ix]),\n textcoords=transform, ha='center', va='top',\n arrowprops=arrowprops, bbox=bbox, fontsize='xx-small',\n zorder=4)\n else:\n pass\n\n # set up colorbar\n cbar = fig.colorbar(cax, cmap=cmap, norm=norm, boundaries=cbarbounds,\n fraction=0.04, pad=0.03, ticks=np.arange(13)+1,\n orientation='vertical')\n ylabels = np.arange(1,14,1)\n cbar.ax.set_yticklabels(map(str, ylabels))\n cbar.set_label('number of pointings', rotation=270, labelpad=10)\n cbar.ax.tick_params(direction='in')\n\n # make grid lines and label the spiral\n ax.gridlines(linewidth=0.5, linestyle='--', color='lightgray', zorder=-1)\n lonlabels = np.arange(-120,180+60,60)\n latlabels = -10*np.ones_like(lonlabels)\n ix = 0\n for lon, lat in list(zip(lonlabels, latlabels)):\n if lon >= 0:\n ax.text(lon, lat-ix*10, str('({:d},{:d})'.format(lon,lat-ix*10)),\n fontsize='xx-small', transform=ccrs.PlateCarree(),\n ha='center', va='center')\n else:\n ax.text(lon, lat-ix*10,\n str('({:d},{:d})'.format(lon+360,lat-ix*10)),\n fontsize='xx-small', transform=ccrs.PlateCarree(),\n ha='center', va='center')\n ix += 1\n ax.text(0.99,0.01,'ecliptic coords',\n fontsize='xx-small',transform=ax.transAxes, ha='right',va='bottom')\n ax.text(0.01,0.01,'github.com/lgbouma/tessmaps',\n fontsize='xx-small',transform=ax.transAxes, ha='left',va='bottom')\n\n ax.set_title(title, y=1.0, fontsize='xx-small')\n\n fig.tight_layout()\n\n fig.savefig(savdir+savname.replace('pdf','png'),dpi=400,\n bbox_inches='tight')\n print('made {:s}'.format(savdir+savname.replace('pdf','png')))\n" ]
[ [ "matplotlib.use", "numpy.array", "numpy.ones_like", "matplotlib.pyplot.figure", "matplotlib.colors.BoundaryNorm", "numpy.arange", "numpy.remainder", "numpy.abs", "matplotlib.colors.ListedColormap", "pandas.read_csv" ] ]
Cogito2012/DRIVE
[ "dba7e72ae3d1a5c2841d39d8fc51af48dd161f9e" ]
[ "src/saliency/mlnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models, transforms\nimport math\n\nclass ResNet_FPN(torch.nn.Module):\n def __init__(self, n_layers=50, preTrained=False):\n super(ResNet_FPN, self).__init__()\n if n_layers == 50:\n self.net = models.resnet50(pretrained=preTrained)\n else:\n raise NotImplementedError\n self.top_layer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # reduce channel\n\n self.lat_layer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)\n self.lat_layer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)\n self.lat_layer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)\n\n self.smooth_layer1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n self.smooth_layer2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n self.smooth_layer3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n\n self.init_new_modules()\n\n def init_new_modules(self):\n def normal_init(m, mean, stddev, truncated=False):\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n normal_init(self.top_layer, 0, 0.01)\n normal_init(self.smooth_layer1, 0, 0.01)\n normal_init(self.smooth_layer2, 0, 0.01)\n normal_init(self.smooth_layer3, 0, 0.01)\n normal_init(self.lat_layer1, 0, 0.01)\n normal_init(self.lat_layer2, 0, 0.01)\n normal_init(self.lat_layer3, 0, 0.01)\n\n \n def forward(self, im):\n # block 1\n x = self.net.conv1(im) # (B, 64, 112, 112)\n x = self.net.bn1(x)\n x = self.net.relu(x)\n c1 = self.net.maxpool(x) # (B, 64, 56, 56)\n # block 2, 3, 4, 5\n c2 = self.net.layer1(c1) # (B, 256, 56, 56)\n c3 = self.net.layer2(c2) # (B, 512, 28, 28)\n c4 = self.net.layer3(c3) # (B, 1024, 14, 14)\n c5 = self.net.layer4(c4) # (B, 2048, 7, 7)\n # Top down fusion\n p5 = self.top_layer(c5) # (B, 256, 7, 7)\n # P4 block\n c4_lat = self.lat_layer1(c4)\n p4 = F.interpolate(p5, size=(c4_lat.size(2), c4_lat.size(3)), mode='bilinear') + c4_lat\n p4 = self.smooth_layer1(p4) # (B, 256, 14, 14)\n # P3 block\n c3_lat = self.lat_layer2(c3)\n p3 = F.interpolate(p4, size=(c3_lat.size(2), c3_lat.size(3)), mode='bilinear') + c3_lat\n p3 = self.smooth_layer2(p3) # (B, 256, 28, 28)\n # P2 block\n c2_lat = self.lat_layer3(c2)\n p2 = F.interpolate(p3, size=(c2_lat.size(2), c2_lat.size(3)), mode='bilinear') + c2_lat\n p2 = self.smooth_layer3(p2) # (B, 256, 56, 56)\n return p2\n\n\nclass MLNet(nn.Module):\n \"\"\"\n Referenced from: https://github.com/immortal3/MLNet-Pytorch/blob/master/MLNet_Pytorch.ipynb\n \"\"\"\n def __init__(self, input_shape):\n super(MLNet, self).__init__()\n self.input_shape = input_shape\n self.output_shape = [int(input_shape[0] / 8), int(input_shape[1] / 8)]\n self.scale_factor = 10\n self.prior_size = [int(self.output_shape[0] / self.scale_factor), int(self.output_shape[1] / self.scale_factor)]\n\n # loading pre-trained vgg16 model and removing last max pooling layer (Conv5-3 pooling)\n # 16: conv3-3 pool (1/8), 23: conv4-3 pool (1/16), 30: conv5-3 (1/16)\n vgg16_model = models.vgg16(pretrained = True)\n self.freeze_params(vgg16_model, 21)\n features = list(vgg16_model.features)[:-1]\n \n # making same spatial size by calculation :) \n # in pytorch there was problem outputing same size in maxpool2d\n features[23].stride = 1\n features[23].kernel_size = 5\n features[23].padding = 2\n\n self.features = nn.ModuleList(features).eval()\n # adding dropout layer\n self.fddropout = nn.Dropout2d(p=0.5)\n # adding convolution layer to down number of filters 1280 ==> 64\n self.int_conv = nn.Conv2d(1280, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.pre_final_conv = nn.Conv2d(64, 1, kernel_size=(1, 1), stride=(1, 1) ,padding=(0, 0))\n # prior initialized to ones\n self.prior = nn.Parameter(torch.ones((1, 1, self.prior_size[0], self.prior_size[1]), requires_grad=True))\n \n # bilinear upsampling layer\n self.bilinearup = torch.nn.UpsamplingBilinear2d(scale_factor=self.scale_factor)\n\n # initialize new parameters\n self.init_new_params()\n \n\n def freeze_params(self, model, last_freeze_layer):\n # freezing Layer\n for i, param in enumerate(model.parameters()):\n if i <= last_freeze_layer:\n param.requires_grad = False\n\n\n def init_new_params(self):\n\n def zero_params(tensor):\n if tensor is not None:\n tensor.data.fill_(0)\n \n nn.init.kaiming_normal_(self.int_conv.weight, mode='fan_out', nonlinearity='relu')\n zero_params(self.int_conv.bias)\n nn.init.kaiming_normal_(self.pre_final_conv.weight, mode='fan_out', nonlinearity='relu')\n zero_params(self.pre_final_conv.bias)\n torch.nn.init.xavier_normal_(self.prior)\n\n\n def forward(self, x, return_bottom=False):\n results = []\n for ii, model in enumerate(self.features):\n # model = model.to(x.device)\n x = model(x)\n if ii in {16,23,29}:\n results.append(x)\n \n # concat to get 1280 = 512 + 512 + 256\n x = torch.cat((results[0],results[1],results[2]),1) \n \n # adding dropout layer with dropout set to 0.5 (default)\n x = self.fddropout(x)\n \n # 64 filters convolution layer\n bottom = self.int_conv(x)\n # 1*1 convolution layer\n x = self.pre_final_conv(bottom)\n\n upscaled_prior = self.bilinearup(self.prior)\n\n # dot product with prior\n x = x * upscaled_prior\n # x = torch.sigmoid(x)\n x = torch.nn.functional.relu(x,inplace=True)\n\n if return_bottom:\n return x, bottom\n return x\n\n \n# Modified MSE Loss Function\nclass ModMSELoss(torch.nn.Module):\n def __init__(self, shape_gt):\n super(ModMSELoss, self).__init__()\n self.shape_r_gt = shape_gt[0]\n self.shape_c_gt = shape_gt[1]\n \n def forward(self, output , label , prior):\n prior_size = prior.shape\n output_max = torch.max(torch.max(output,2)[0],2)[0].unsqueeze(2).unsqueeze(2).expand(output.shape[0],output.shape[1],self.shape_r_gt,self.shape_c_gt)\n reg = ( 1.0/(prior_size[0]*prior_size[1]) ) * ( 1 - prior)**2 # (1, 1, 6, 8)\n loss = torch.mean( ((output / (output_max + 1e-6) - label) / (1 - label + 0.1))**2) + torch.sum(reg)\n return loss" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.UpsamplingBilinear2d", "torch.max", "torch.nn.init.kaiming_normal_", "torch.ones", "torch.nn.Conv2d", "torch.nn.functional.relu", "torch.nn.init.xavier_normal_", "torch.nn.Dropout2d", "torch.mean", "torch.sum" ] ]
npfp/agents
[ "21ae8a6b7ccf80b7c693cf34debe68a616c8387e" ]
[ "tf_agents/agents/ppo/ppo_policy_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.policies.actor_policy.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.agents.ppo import ppo_policy\nfrom tf_agents.environments import time_step as ts\nfrom tf_agents.networks import network\nfrom tf_agents.specs import distribution_spec\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.utils import test_utils\n\n\nclass DummyActorNet(network.Network):\n\n def __init__(self, action_spec, name=None):\n super(DummyActorNet, self).__init__(name, (), 'DummyActorNet')\n self._action_spec = action_spec\n self._flat_action_spec = tf.nest.flatten(self._action_spec)[0]\n\n self._layers.append(\n tf.keras.layers.Dense(\n self._flat_action_spec.shape.num_elements(),\n kernel_initializer=tf.compat.v1.initializers.constant([2, 1]),\n bias_initializer=tf.compat.v1.initializers.constant([5]),\n activation=tf.keras.activations.tanh,\n ))\n\n def call(self, inputs, unused_step_type=None, network_state=()):\n hidden_state = tf.cast(tf.nest.flatten(inputs), tf.float32)[0]\n for layer in self.layers:\n hidden_state = layer(hidden_state)\n\n means = tf.reshape(hidden_state,\n [-1] + self._flat_action_spec.shape.as_list())\n spec_means = (\n self._flat_action_spec.maximum + self._flat_action_spec.minimum) / 2.0\n spec_ranges = (\n self._flat_action_spec.maximum - self._flat_action_spec.minimum) / 2.0\n action_means = spec_means + spec_ranges * means\n\n return tf.nest.pack_sequence_as(self._action_spec,\n [action_means]), network_state\n\n\nclass DummyActorDistributionNet(network.DistributionNetwork):\n\n def __init__(self, action_spec, name=None):\n output_spec = tf.nest.map_structure(self._get_normal_distribution_spec,\n action_spec)\n super(DummyActorDistributionNet, self).__init__(\n name,\n (),\n output_spec=output_spec,\n name='DummyActorDistributionNet')\n self._action_net = DummyActorNet(action_spec)\n\n def _get_normal_distribution_spec(self, sample_spec):\n input_param_shapes = tfp.distributions.Normal.param_static_shapes(\n sample_spec.shape)\n input_param_spec = tf.nest.map_structure(\n lambda tensor_shape: tensor_spec.TensorSpec( # pylint: disable=g-long-lambda\n shape=tensor_shape,\n dtype=sample_spec.dtype),\n input_param_shapes)\n\n return distribution_spec.DistributionSpec(\n tfp.distributions.Normal, input_param_spec, sample_spec=sample_spec)\n\n def call(self, inputs, unused_step_type=None, network_state=()):\n action_means, network_state = self._action_net(inputs, network_state)\n\n def _action_distribution(action_mean):\n action_std = tf.ones_like(action_mean)\n return tfp.distributions.Normal(action_mean, action_std)\n\n return tf.nest.map_structure(_action_distribution,\n action_means), network_state\n\n\nclass DummyValueNet(network.Network):\n\n def __init__(self, name=None):\n super(DummyValueNet, self).__init__(name, (), 'DummyValueNet')\n self._layers.append(\n tf.keras.layers.Dense(\n 1,\n kernel_initializer=tf.compat.v1.initializers.constant([2, 1]),\n bias_initializer=tf.compat.v1.initializers.constant([5])))\n\n def call(self, inputs, unused_step_type=None, network_state=()):\n hidden_state = tf.cast(tf.nest.flatten(inputs), tf.float32)[0]\n for layer in self.layers:\n hidden_state = layer(hidden_state)\n return hidden_state, network_state\n\n\ndef _test_cases(prefix=''):\n return [{\n 'testcase_name': '%s0' % prefix,\n 'network_cls': DummyActorNet,\n }, {\n 'testcase_name': '%s1' % prefix,\n 'network_cls': DummyActorDistributionNet,\n }]\n\n\nclass PPOPolicyTest(parameterized.TestCase, test_utils.TestCase):\n\n def setUp(self):\n super(PPOPolicyTest, self).setUp()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, 2, 3)\n\n @property\n def _time_step(self):\n return ts.TimeStep(\n step_type=tf.constant([1], dtype=tf.int32),\n reward=tf.constant([1], dtype=tf.float32),\n discount=tf.constant([1], dtype=tf.float32),\n observation=tf.constant([[1, 2]], dtype=tf.float32))\n\n @property\n def _time_step_batch(self):\n return ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST, dtype=tf.int32, shape=[2], name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[2], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[2], name='discount'),\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32, name='observation'))\n\n @parameterized.named_parameters(*_test_cases('test_build'))\n def testBuild(self, network_cls):\n actor_network = network_cls(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n self.assertEqual(policy.time_step_spec, self._time_step_spec)\n self.assertEqual(policy.action_spec, self._action_spec)\n\n @parameterized.named_parameters(*_test_cases('test_reset'))\n def testReset(self, network_cls):\n actor_network = network_cls(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n policy_state = policy.get_initial_state(batch_size=1)\n\n # Dummy network has no policy_state so expect empty tuple from reset.\n self.assertEqual((), policy_state)\n\n @parameterized.named_parameters(*_test_cases('test_action'))\n def testAction(self, network_cls):\n actor_network = network_cls(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n action_step = policy.action(self._time_step)\n self.assertEqual(action_step.action.shape.as_list(), [1, 1])\n self.assertEqual(action_step.action.dtype, tf.float32)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(np.all(actions_ >= self._action_spec.minimum))\n self.assertTrue(np.all(actions_ <= self._action_spec.maximum))\n\n @parameterized.named_parameters(*_test_cases('test_action_list'))\n def testActionList(self, network_cls):\n action_spec = [self._action_spec]\n actor_network = network_cls(action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n action_step = policy.action(self._time_step)\n self.assertIsInstance(action_step.action, list)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(np.all(actions_ >= action_spec[0].minimum))\n self.assertTrue(np.all(actions_ <= action_spec[0].maximum))\n\n @parameterized.named_parameters(*_test_cases('test_action_batch'))\n def testActionBatch(self, network_cls):\n actor_network = network_cls(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n action_step = policy.action(self._time_step_batch)\n self.assertEqual(action_step.action.shape.as_list(), [2, 1])\n self.assertEqual(action_step.action.dtype, tf.float32)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(np.all(actions_ >= self._action_spec.minimum))\n self.assertTrue(np.all(actions_ <= self._action_spec.maximum))\n\n @parameterized.named_parameters(*_test_cases('test_action'))\n def testValue(self, network_cls):\n actor_network = network_cls(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n batch_size = tf.compat.dimension_value(self._time_step.step_type.shape[0])\n policy_state = policy.get_initial_state(batch_size=batch_size)\n value_pred, unused_policy_state = policy.apply_value_network(\n self._time_step.observation, self._time_step.step_type, policy_state)\n self.assertEqual(value_pred.shape.as_list(), [1, 1])\n self.assertEqual(value_pred.dtype, tf.float32)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(value_pred)\n\n def testUpdate(self):\n tf.compat.v1.set_random_seed(1)\n actor_network = DummyActorNet(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n new_policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n action_step = policy.action(self._time_step)\n new_action_step = new_policy.action(self._time_step)\n\n self.assertEqual(action_step.action.shape, new_action_step.action.shape)\n self.assertEqual(action_step.action.dtype, new_action_step.action.dtype)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(new_policy.update(policy))\n actions_, new_actions_ = self.evaluate(\n [action_step.action, new_action_step.action])\n self.assertAllEqual(actions_, new_actions_)\n\n def testDeterministicDistribution(self):\n actor_network = DummyActorNet(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n action_step = policy.action(self._time_step)\n distribution_step = policy.distribution(self._time_step)\n self.assertIsInstance(distribution_step.action,\n tfp.distributions.Deterministic)\n distribution_mean = distribution_step.action.mean()\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n distribution_mean_ = self.evaluate(distribution_mean)\n self.assertNear(actions_, distribution_mean_, 1e-6)\n\n def testGaussianDistribution(self):\n actor_network = DummyActorDistributionNet(self._action_spec)\n value_network = DummyValueNet()\n\n policy = ppo_policy.PPOPolicy(\n self._time_step_spec,\n self._action_spec,\n actor_network=actor_network,\n value_network=value_network)\n\n distribution_step = policy.distribution(self._time_step)\n self.assertIsInstance(distribution_step.action, tfp.distributions.Normal)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.nest.pack_sequence_as", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.ones_like", "tensorflow.nest.flatten", "tensorflow.nest.map_structure", "tensorflow.constant", "tensorflow.compat.v1.initializers.constant", "tensorflow.test.main", "numpy.all", "tensorflow.compat.v1.set_random_seed", "tensorflow.compat.dimension_value" ] ]
juholeinonen/ParlAI
[ "612daf902d9194cead7b380f39ffbcc738c36a88" ]
[ "projects/blenderbot2/agents/blenderbot2.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nBlenderBot2 Agent Code.\n\nBlenderBot 2 combines a long-term memory module with a retriever module.\n\nThe Search Query Generator generates a query that tells BB2 to either access\nits memory or access the internet.\n\nThe Memory Decoder examines the context and generates memories to write to\nthe long-term memory module.\n\"\"\"\nimport torch\nimport torch.nn\nimport torch.nn.functional as F\nfrom typing import Union, Dict, List, Tuple, Optional, Any\n\nfrom parlai.agents.fid.fid import FidAgent\nfrom parlai.agents.rag.args import DPR_ZOO_MODEL, QUERY_MODEL_TYPES\nfrom parlai.agents.rag.rag import RagAgent\nfrom parlai.agents.rag.model_types import (\n RagTurn,\n RagSequence,\n RagToken,\n RagModelInterface,\n)\nfrom parlai.core.message import Message\nfrom parlai.core.metrics import AverageMetric\nfrom parlai.core.opt import Opt\nfrom parlai.core.params import ParlaiParser\nfrom parlai.core.torch_agent import Batch\nfrom parlai.tasks.wizard_of_internet.constants import (\n SELECTED_DOCS,\n SELECTED_DOCS_TITLES,\n SELECTED_SENTENCES,\n)\nfrom parlai.utils.torch import padded_3d\n\nfrom .modules import (\n BlenderBot2RagModel,\n T5BlenderBot2RagModel,\n BlenderBot2FidModel,\n T5BlenderBot2FidModel,\n)\nfrom .sub_modules import RetrievalType, KnowledgeAccessMethod\nfrom parlai.agents.fid.fid import SearchQuerySearchEngineFiDAgent\nfrom parlai.utils.fsdp import is_fsdp\n\n\nZOO_QUERY_GENERATOR = 'zoo:blenderbot2/query_generator/model'\nZOO_MEMORY_DECODER = 'zoo:blenderbot2/memory_decoder/model'\n\n\nclass BlenderBot2ModelTypeMixin(RagModelInterface):\n \"\"\"\n Override Normal RAG Model Types, in case we retrieve from both memory and search.\n \"\"\"\n\n def __init__(self, opt: Opt, null_idx: int):\n super().__init__(opt, null_idx)\n if (\n KnowledgeAccessMethod(opt['knowledge_access_method'])\n is KnowledgeAccessMethod.ALL\n ):\n self.n_docs *= 2\n\n\nclass BlenderBot2RagSequence(BlenderBot2ModelTypeMixin, RagSequence):\n def augment_batch_for_generation(\n self, batch: Batch, model: BlenderBot2RagModel\n ) -> Batch:\n \"\"\"\n Augment batch for generation.\n\n For RAG Sequence, we retrieve prior to generation, as we do not consider the\n document probabilities until after generating all of the beams.\n\n :param batch:\n batch to augment\n :param model:\n model to possibly help with augmenting\n\n :return batch:\n return batch with text vec swapped out.\n \"\"\"\n (expanded_input, _, doc_scores) = model.retrieve_and_concat(\n batch.text_vec,\n batch.text_vec.ne(self.null_idx).sum(1),\n batch.query_generator_vec,\n batch.query_vec,\n batch.input_turn_cnt_vec,\n batch.memory_vec,\n batch.num_memories,\n batch.gold_doc_vec,\n batch.gold_doc_title_vec,\n batch.num_gold_docs,\n batch.memory_decoder_vec,\n batch.num_memory_decoder_vecs,\n )\n doc_log_probs = F.log_softmax(doc_scores, dim=1)\n batch.src_text_vec = batch.text_vec\n batch.text_vec = expanded_input\n batch.doc_log_probs = doc_log_probs\n batch.batchsize = batch.text_vec.size(0)\n\n return batch\n\n def get_generation_input(\n self, batch: Batch\n ) -> Tuple[\n torch.LongTensor,\n Optional[torch.LongTensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n Optional[torch.Tensor],\n ]:\n \"\"\"\n For RAG Sequence, we retrieve prior to generation.\n \"\"\"\n assert batch.text_vec is not None\n return (\n batch.text_vec,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n )\n\n\nclass BlenderBot2RagToken(BlenderBot2ModelTypeMixin, RagToken):\n pass\n\n\nclass BlenderBot2RagTurn(BlenderBot2ModelTypeMixin, RagTurn):\n pass\n\n\nRAG_MODELS = {\n 'sequence': BlenderBot2RagSequence,\n 'token': BlenderBot2RagToken,\n 'turn': BlenderBot2RagTurn,\n}\n\n\nclass BlenderBot2RagAgent(RagAgent):\n \"\"\"\n Subclass RagAgent to provide BlenderBot2Model with appropriate inputs (specifically,\n memory vectors).\n \"\"\"\n\n model: BlenderBot2RagModel\n\n ##########################\n # Housekeeping functions #\n ##########################\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n \"\"\"\n Add RAG Args.\n \"\"\"\n RagAgent.add_cmdline_args(parser, partial_opt)\n SearchQuerySearchEngineFiDAgent.add_cmdline_args(parser, partial_opt)\n bb2_group = parser.add_argument_group('BlenderBot2 Args')\n bb2_group.add_argument(\n '--knowledge-access-method',\n type=str,\n default=KnowledgeAccessMethod.CLASSIFY.value,\n choices=[r.value for r in KnowledgeAccessMethod],\n help='How to access knowledge for BlenderBot2 '\n 'classify => classify the input text, determine which knowledge to access\\n'\n 'memory_only => only access memories\\n'\n 'search_only => only access search\\n'\n 'all => for each input, access from memories and search\\n'\n 'none => do not access any knowledge.\\n',\n )\n bb2_group.add_argument(\n '--memory-key',\n type=str,\n default='full_text',\n help='Field in the observation from which to read memories.',\n )\n bb2_group.add_argument(\n '--query-generator-key',\n type=str,\n default='full_text',\n help='Field for input to the knowledge access classifier.',\n )\n bb2_group.add_argument(\n '--gold-document-key',\n type=str,\n default=SELECTED_DOCS,\n help='Field for selected docs.',\n )\n bb2_group.add_argument(\n '--gold-sentence-key',\n type=str,\n default=SELECTED_SENTENCES,\n help='Field for selected sentences',\n )\n bb2_group.add_argument(\n '--gold-document-titles-key',\n type=str,\n default=SELECTED_DOCS_TITLES,\n help='Field for selected docs titles.',\n )\n bb2_group.add_argument(\n '--insert-gold-docs',\n type='bool',\n default=False,\n help='Set true to insert gold docs into retrieved docs.',\n )\n bb2_group.add_argument(\n '--memory-extractor-phrase',\n type=str,\n default='persona:',\n help=\"phrase used to extract memories from `--memory-key` in the observation. \"\n \"For example, set to 'your persona:' to limit memories to only lines that \"\n \"contain 'your persona:'\",\n )\n bb2_group.add_argument(\n '--retriever-ignore-phrase',\n type=str,\n default='persona:',\n help='filter input to the global knowledge retriever such that any utterance containing '\n 'the phrase will not be given as input.',\n )\n q_gen_group = parser.add_argument_group('BlenderBot2 Query Generator Args')\n q_gen_group.add_argument(\n '--query-generator-ignore-phrase',\n type=str,\n default='persona:',\n help='filter input to the query generator such that any utterance containing '\n 'the phrase will not be given as input.',\n )\n q_gen_group.add_argument(\n '--query-generator-model-file',\n type=str,\n default=ZOO_QUERY_GENERATOR,\n help='path to a query generator; specify if searching OR classifying inputs.',\n )\n q_gen_group.add_argument(\n '--query-generator-delimiter',\n type=str,\n default='\\n',\n help='delimiter for the query generator',\n )\n q_gen_group.add_argument(\n '--query-generator-inference',\n type=str,\n default='beam',\n help='query generator inference type',\n )\n q_gen_group.add_argument(\n '--query-generator-beam-size', type=int, default=1, help='SQ Gen Beam Size'\n )\n q_gen_group.add_argument(\n '--query-generator-beam-min-length',\n type=int,\n default=2,\n help='SQ Gen Beam Min Length',\n )\n q_gen_group.add_argument(\n '--query-generator-truncate',\n type=int,\n default=-1,\n help='Specify >0 for truncation to SQ generator',\n )\n bb2_group.add_argument(\n '--memory-retriever-truncate',\n type=int,\n default=-1,\n help='Specify >0 for truncation to the memory retriever.',\n )\n bb2_group.add_argument(\n '--retriever-delimiter',\n type=str,\n default='\\n',\n help='delimiter for the retriever',\n )\n bb2_group.add_argument(\n '--share-search-and-memory-query-encoder',\n type='bool',\n default=False,\n help='if true, query encoder is shared between search and memory retrievers.',\n )\n bb2_group.add_argument(\n '--memory-reader-model',\n type=str,\n default=None,\n choices=QUERY_MODEL_TYPES,\n help='Model for accessing the memory',\n )\n bb2_group.add_argument(\n '--memory-doc-title-delimiter',\n type=str,\n default=' / ',\n help='title delimiter for memory docs',\n )\n bb2_group.add_argument(\n '--memory-writer-model',\n type=str,\n default='bert',\n hidden=True,\n help='model for writing the memories',\n )\n bb2_group.add_argument(\n '--memory-writer-model-file',\n type=str,\n default=DPR_ZOO_MODEL,\n hidden=True,\n help='model file for memory writer',\n )\n memory_decoder = parser.add_argument_group('BlenderBot2 Memory Decoder Args')\n memory_decoder.add_argument(\n '--memory-decoder-key',\n type=str,\n default='full_text',\n help='key of the observation for the memory decoder',\n )\n memory_decoder.add_argument(\n '--memory-decoder-ignore-phrase',\n type=str,\n default='persona:',\n help='filter input to the memory decoder such that any utterance containing '\n 'the phrase will not be given as input.',\n )\n memory_decoder.add_argument(\n '--memory-decoder-model-file',\n type=str,\n default=ZOO_MEMORY_DECODER,\n help='path to a memory decoder.',\n )\n memory_decoder.add_argument(\n '--memory-decoder-delimiter',\n type=str,\n default='\\n',\n help='delimiter for the memory decoder',\n )\n memory_decoder.add_argument(\n '--memory-decoder-beam-size',\n type=int,\n default=3,\n help='memory decoder Beam Size',\n )\n memory_decoder.add_argument(\n '--memory-decoder-beam-min-length',\n type=int,\n default=10,\n help='memory decoder Beam Min Length',\n )\n memory_decoder.add_argument(\n '--memory-decoder-truncate',\n type=int,\n default=-1,\n help='Specify >0 for truncation to memory decoder',\n )\n memory_decoder.add_argument(\n '--memory-decoder-one-line-memories',\n type='bool',\n default=False,\n help='specify to combine memories on one line, rather than several.',\n )\n return parser\n\n @property\n def rag_model_type(self) -> str:\n return self._rag_model_type\n\n @rag_model_type.setter\n def rag_model_type(self, model: str):\n self._rag_model_type = model\n self._rag_model_interface = RAG_MODELS[model](self.opt, self.NULL_IDX)\n\n @property\n def model_api(self) -> BlenderBot2RagModel:\n if hasattr(self.model, 'module') and not is_fsdp(self.model):\n return self.model.module\n else:\n return self.model\n\n def build_model(self) -> BlenderBot2RagModel:\n \"\"\"\n Build and return BlenderBot2RagModel.\n \"\"\"\n if self.generation_model == 't5':\n model = T5BlenderBot2RagModel(self.opt, self.dict)\n else:\n model = BlenderBot2RagModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(\n model.encoder.embeddings.weight, self.opt['embedding_type']\n )\n return model\n\n @classmethod\n def upgrade_opt(cls, opt_from_disk: Opt):\n # call the parent upgrades\n opt_from_disk = super().upgrade_opt(opt_from_disk)\n\n if 'memory_doc_delimiter' not in opt_from_disk:\n # 2020-06-22 old delimiter was ':'\n opt_from_disk['memory_doc_delimiter'] = ':'\n\n return opt_from_disk\n\n @staticmethod\n def update_state_dict(\n opt: Opt, state_dict: Dict[str, torch.Tensor], model: torch.nn.Module\n ):\n \"\"\"\n Override RagAgent.update_state_dict to store long term memory state.\n \"\"\"\n state_dict = RagAgent.update_state_dict(opt, state_dict, model)\n # 1. Retriever state\n if not [k for k in state_dict if 'long_term_memory' in k]:\n long_term_memory_state = {\n f\"long_term_memory.{k}\": v\n for k, v in model.long_term_memory.state_dict().items() # type: ignore\n }\n state_dict.update(long_term_memory_state)\n return state_dict\n\n ###############################\n # Text/Tokenization Overrides #\n ###############################\n def observe(self, observation: Union[Dict, Message]) -> Message:\n \"\"\"\n Overrides TA.observe to tokenize various additional vectors.\n \"\"\"\n observation = super().observe(observation)\n if 'memory_vec' not in observation and self.opt['memory_key'] in observation:\n self._set_memory_vec(observation)\n if (\n 'query_generator_vec' not in observation\n and self.opt['query_generator_key'] in observation\n ):\n self._set_query_generator_vec(observation)\n if 'gold_doc_vec' not in observation and all(\n k in observation\n for k in [\n self.opt['gold_document_key'],\n self.opt['gold_sentence_key'],\n self.opt['gold_document_titles_key'],\n ]\n ):\n self._set_gold_doc_vec(observation)\n if (\n 'memory_decoder_vec' not in observation\n and self.opt['memory_decoder_key'] in observation\n ):\n self._set_memory_decoder_vec(observation)\n return observation\n\n def _filter_text(self, text: str, filter_phrase: str, delimiter: str = '\\n') -> str:\n \"\"\"\n Filter text such that utterances containing a filter phrase are removed.\n\n :param text:\n text to filter\n :param filter_phrase:\n phrase on which to filter\n :param delimiter:\n optional extra delimiter on which to split\n\n :return text:\n return the text after filtering (including or excluding) turns with the filter phrase.\n \"\"\"\n split_text = [\n t\n for tt in text.split(self.opt.get('delimiter', '\\n'))\n for t in tt.split('\\n')\n ]\n turns = [t for t in split_text if filter_phrase not in t]\n if not turns:\n new_text = text\n else:\n new_text = delimiter.join(turns)\n return new_text\n\n def _remove_person_tokens(self, text: str) -> str:\n \"\"\"\n Remove person tokens from a text input.\n \"\"\"\n return text.replace(f'{self.P1_TOKEN} ', '').replace(f'{self.P2_TOKEN} ', '')\n\n def _set_query_vec(self, observation: Message) -> Message:\n \"\"\"\n Override RAG.set_query_vec to optionally filter phrases.\n \"\"\"\n query_str = observation[self._query_key]\n if self.opt['retriever_ignore_phrase']:\n query_str = self._filter_text(\n query_str,\n self.opt['retriever_ignore_phrase'],\n delimiter=self.opt['retriever_delimiter'],\n )\n if self.add_person_tokens:\n query_str = self._remove_person_tokens(query_str)\n observation['query_vec'] = self.model_api.tokenize_query(query_str)\n return observation\n\n def _set_memory_vec(self, observation: Message) -> Message:\n \"\"\"\n Tokenize the memories for use in long-term memory scoring.\n\n :param observation:\n observation with input text.\n\n :return observation:\n return observation with memory vec.\n \"\"\"\n mem_vecs = None\n method = KnowledgeAccessMethod(self.opt['knowledge_access_method'])\n if method in [\n KnowledgeAccessMethod.ALL,\n KnowledgeAccessMethod.CLASSIFY,\n KnowledgeAccessMethod.MEMORY_ONLY,\n ]:\n memories = observation[self.opt['memory_key']]\n if isinstance(memories, str):\n memories = [\n t\n for tt in memories.split(self.opt.get('delimiter', '\\n'))\n for t in tt.split('\\n')\n ]\n assert isinstance(memories, list)\n if self.opt['memory_extractor_phrase']:\n # extract text lines only containing the memory extractor phrase\n memories = [\n m for m in memories if self.opt['memory_extractor_phrase'] in m\n ]\n if memories:\n mem_vecs = [self.model_api.tokenize_memory(mem) for mem in memories]\n\n observation['memory_vec'] = mem_vecs\n return observation\n\n def _set_query_generator_vec(self, observation: Message) -> Message:\n \"\"\"\n Tokenize text for use in the query generator.\n\n :param observation:\n observation with input text.\n\n :return observation:\n return observation with query generator vec.\n \"\"\"\n query_generator_vec = None\n method = KnowledgeAccessMethod(self.opt['knowledge_access_method'])\n if (\n method\n in [\n KnowledgeAccessMethod.ALL,\n KnowledgeAccessMethod.CLASSIFY,\n KnowledgeAccessMethod.SEARCH_ONLY,\n ]\n and self.model_api.has_query_generator()\n ):\n query_generator_input = observation[self.opt['query_generator_key']]\n if self.opt['query_generator_ignore_phrase']:\n query_generator_input = self._filter_text(\n query_generator_input,\n self.opt['query_generator_ignore_phrase'],\n self.opt['query_generator_delimiter'],\n )\n if self.add_person_tokens:\n query_generator_input = self._remove_person_tokens(\n query_generator_input\n )\n query_generator_vec = self.model_api.tokenize_query_generator_input(\n query_generator_input\n )\n\n observation['query_generator_vec'] = query_generator_vec\n return observation\n\n def _set_gold_doc_vec(self, observation: Message) -> Message:\n \"\"\"\n Tokenize the gold documents, in case we want to include in retrieved documents.\n\n We chunk up the docs and try to find the chunk that contains the selected sentence.\n\n If we can't find it, we just use the first chunk.\n\n :param observation:\n observation with input text.\n\n :return observation:\n return observation with gold doc vec.\n \"\"\"\n if not observation[self.opt['gold_document_key']]:\n return observation\n doc_vecs = None\n doc_title_vecs = None\n method = KnowledgeAccessMethod(self.opt['knowledge_access_method'])\n chunk_len = self.opt.get(\"splitted_chunk_length\", 256)\n if method in [\n KnowledgeAccessMethod.ALL,\n KnowledgeAccessMethod.CLASSIFY,\n KnowledgeAccessMethod.SEARCH_ONLY,\n ]:\n selected_documents = observation[self.opt['gold_document_key']]\n sentences = observation[self.opt['gold_sentence_key']]\n document_titles = observation[self.opt['gold_document_titles_key']]\n if isinstance(selected_documents, str):\n documents = [selected_documents]\n assert isinstance(selected_documents, list)\n\n documents = []\n for doc in selected_documents:\n # Try to find the chunk with the selected sentence\n used_chunk = None\n words = doc.split(' ')\n chunks = [\n ' '.join(words[i : i + chunk_len])\n for i in range(0, len(words), chunk_len)\n ]\n for chunk in chunks:\n if any(s in chunk for s in sentences):\n used_chunk = chunk\n break\n if not used_chunk:\n used_chunk = chunks[0]\n documents.append(used_chunk)\n\n if documents:\n doc_vecs = [self.dict.txt2vec(doc) for doc in documents]\n doc_title_vecs = [self.dict.txt2vec(title) for title in document_titles]\n\n observation['gold_doc_vec'] = doc_vecs\n observation['gold_doc_title_vec'] = doc_title_vecs\n return observation\n\n def _set_memory_decoder_vec(self, observation: Message) -> Message:\n \"\"\"\n Tokenize the input to the memory decoder.\n\n :param observation:\n observation with input text.\n\n :return observation:\n return observation with memory vec.\n \"\"\"\n memory_decoder_vec = None\n method = KnowledgeAccessMethod(self.opt['knowledge_access_method'])\n if (\n method\n in [\n KnowledgeAccessMethod.ALL,\n KnowledgeAccessMethod.CLASSIFY,\n KnowledgeAccessMethod.MEMORY_ONLY,\n ]\n and self.model_api.has_memory_decoder()\n ):\n memory_decoder_input = observation[self.opt['memory_decoder_key']]\n if self.opt['memory_decoder_ignore_phrase']:\n memory_decoder_input = self._filter_text(\n memory_decoder_input,\n self.opt['memory_decoder_ignore_phrase'],\n self.opt['memory_decoder_delimiter'],\n )\n if self.add_person_tokens:\n memory_decoder_input = self._remove_person_tokens(memory_decoder_input)\n conv_lines = [\n t\n for tt in memory_decoder_input.split(self.opt.get('delimiter', '\\n'))\n for t in tt.split('\\n')\n ]\n memory_decoder_vec = [\n self.model_api.tokenize_memory_decoder_input(i) for i in conv_lines\n ]\n\n observation['memory_decoder_vec'] = memory_decoder_vec\n return observation\n\n def batchify(self, obs_batch: List[Message], sort: bool = False) -> Batch:\n \"\"\"\n Overrides RagAgent.batchify to add several input vectors.\n \"\"\"\n batch = super().batchify(obs_batch, sort)\n valid_exs = [ex for ex in obs_batch if self.is_valid(ex)]\n batch.memory_vec = None\n batch.num_memories = None\n batch.query_generator_vec = None\n batch.gold_doc_vec = None\n batch.gold_doc_title_vec = None\n batch.num_gold_docs = None\n batch.memory_decoder_vec = None\n batch.num_memory_decoder_vecs = None\n if any(ex.get('memory_vec') is not None for ex in valid_exs):\n batch = self._set_batch_memory_vec(valid_exs, batch)\n if any(ex.get('query_generator_vec') is not None for ex in valid_exs):\n batch = self._set_batch_query_generator_vec(valid_exs, batch)\n if any(ex.get('gold_doc_vec') is not None for ex in valid_exs):\n batch = self._set_batch_gold_doc_vec(valid_exs, batch)\n if any(ex.get('memory_decoder_vec') is not None for ex in valid_exs):\n batch = self._set_batch_memory_decoder_vec(valid_exs, batch)\n return batch\n\n def _set_batch_memory_vec(self, valid_exs: List[Message], batch: Batch) -> Batch:\n \"\"\"\n Set the memory vec for the batch.\n \"\"\"\n mems = []\n num_mems = []\n for ex in valid_exs:\n if ex.get('memory_vec') is not None:\n ms, _ = self._pad_tensor(ex['memory_vec'])\n mems.append(ms)\n num_mems.append(len(ex['memory_vec']))\n else:\n num_mems.append(0)\n batch.memory_vec = padded_3d(mems)\n batch.num_memories = torch.LongTensor(num_mems)\n return batch\n\n def _set_batch_query_generator_vec(\n self, valid_exs: List[Message], batch: Batch\n ) -> Batch:\n \"\"\"\n Set the query generator vec for the batch.\n \"\"\"\n _q_gens = [ex.get('query_generator_vec', self.EMPTY) for ex in valid_exs]\n q_gen_vecs, _lens = self._pad_tensor(_q_gens)\n batch.query_generator_vec = q_gen_vecs\n return batch\n\n def _set_batch_gold_doc_vec(self, valid_exs: List[Message], batch: Batch) -> Batch:\n \"\"\"\n Set the gold docs vecs for the batch.\n \"\"\"\n docs = []\n titles = []\n num_docs = []\n for ex in valid_exs:\n if ex.get('gold_doc_vec') is not None:\n ds, _ = self._pad_tensor(ex['gold_doc_vec'])\n ts, _ = self._pad_tensor(ex['gold_doc_title_vec'])\n docs.append(ds)\n titles.append(ts)\n num_docs.append(len(ex['gold_doc_vec']))\n else:\n docs.append(self.EMPTY.unsqueeze(0))\n titles.append(self.EMPTY.unsqueeze(0))\n num_docs.append(0)\n batch.gold_doc_vec = padded_3d(docs)\n batch.gold_doc_title_vec = padded_3d(titles)\n batch.num_gold_docs = torch.LongTensor(num_docs)\n return batch\n\n def _set_batch_memory_decoder_vec(\n self, valid_exs: List[Message], batch: Batch\n ) -> Batch:\n \"\"\"\n Set the memory decoder vec for the batch.\n \"\"\"\n memory_dec_toks = []\n num_memory_dec_toks = []\n for ex in valid_exs:\n if ex.get('memory_decoder_vec') is not None:\n p_sum_vecs, _lens = self._pad_tensor(ex['memory_decoder_vec'])\n memory_dec_toks.append(p_sum_vecs)\n num_memory_dec_toks.append(len(ex['memory_decoder_vec']))\n else:\n num_memory_dec_toks.append(0)\n batch.memory_decoder_vec = padded_3d(memory_dec_toks)\n batch.num_memory_decoder_vecs = torch.LongTensor(num_memory_dec_toks)\n return batch\n\n def eval_step(self, batch):\n output = super().eval_step(batch)\n if output is None or not hasattr(self.model, 'retriever'):\n return output\n if hasattr(self.model_api.retriever, 'top_docs'):\n output.top_docs = self.model_api.retriever.top_docs\n if hasattr(self.model_api.retriever, 'search_queries'):\n output.search_queries = self.model_api.retriever.search_queries\n return output\n\n def _model_input(\n self, batch: Batch\n ) -> Tuple[\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n torch.LongTensor,\n ]:\n \"\"\"\n Override RagAgent._model_input to include several more input vectors.\n\n See BlenderBot2RagModel.encoder for details.\n \"\"\"\n return (\n batch.text_vec,\n batch.text_vec.ne(self.NULL_IDX).sum(1),\n batch.query_vec,\n batch.input_turn_cnt_vec,\n batch.memory_vec,\n batch.num_memories,\n batch.query_generator_vec,\n batch.gold_doc_vec,\n batch.gold_doc_title_vec,\n batch.num_gold_docs,\n batch.memory_decoder_vec,\n batch.num_memory_decoder_vecs,\n )\n\n def compute_loss(\n self, batch: Batch, return_output: bool = False\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, Any]]:\n \"\"\"\n Override Rag.compute_loss to add some additional metrics.\n \"\"\"\n loss, output = super().compute_loss(batch, return_output=True)\n assert isinstance(self.model, BlenderBot2RagModel)\n if (\n KnowledgeAccessMethod(self.opt['knowledge_access_method'])\n is KnowledgeAccessMethod.CLASSIFY\n and self.model_api.has_query_generator()\n ):\n _scores, _preds, enc_state, *_ = output\n _, _, input_turns_cnt, _, _ = enc_state\n retrieval_type = self.model_api.get_retrieval_type()\n assert isinstance(retrieval_type, torch.Tensor)\n if input_turns_cnt is not None:\n new_ret_type = torch.zeros(input_turns_cnt.size(0))\n offset = 0\n for i in range(input_turns_cnt.size(0)):\n new_ret_type[i] = retrieval_type[offset]\n offset += input_turns_cnt[i]\n retrieval_type = new_ret_type\n self.record_local_metric(\n 'search_class',\n AverageMetric.many(\n retrieval_type.eq(RetrievalType.SEARCH.value).int().tolist(),\n [1] * retrieval_type.size(0),\n ),\n )\n self.record_local_metric(\n 'memory_class',\n AverageMetric.many(\n retrieval_type.eq(RetrievalType.MEMORY.value).int().tolist(),\n [1] * retrieval_type.size(0),\n ),\n )\n self.record_local_metric(\n 'none_class',\n AverageMetric.many(\n retrieval_type.eq(RetrievalType.NONE.value).int().tolist(),\n [1] * retrieval_type.size(0),\n ),\n )\n if return_output:\n return loss, output\n else:\n return loss\n\n\nclass BlenderBot2FidAgent(FidAgent, BlenderBot2RagAgent):\n model: BlenderBot2FidModel\n\n def build_model(self) -> Union[BlenderBot2FidModel, T5BlenderBot2FidModel]:\n if self.generation_model == 't5':\n model = T5BlenderBot2FidModel(self.opt, self.dict)\n else:\n model = BlenderBot2FidModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(\n model.encoder.embeddings.weight, self.opt['embedding_type']\n )\n return model\n" ]
[ [ "torch.LongTensor", "torch.nn.functional.log_softmax" ] ]
whwu95/DSANet
[ "5c877ebdc2126f1e170b1f4079451110cf4422d7" ]
[ "codes/models/builder.py" ]
[ "import torch.nn as nn\n\nfrom ..utils import Registry, build_from_cfg\n\nRECOGNIZERS = Registry('recognizer')\nBACKBONES = Registry('backbone')\nHEADS = Registry('head')\nSPATIAL_TEMPORAL_MODULES = Registry('spatial_temporal_module')\nSEGMENTAL_CONSENSUSES = Registry('segmental_consensus')\n\n\ndef build(cfg, registry, default_args=None):\n if isinstance(cfg, list):\n modules = [\n build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg\n ]\n return nn.Sequential(*modules)\n else:\n return build_from_cfg(cfg, registry, default_args)\n\n\ndef build_recognizer(cfg, train_cfg=None, test_cfg=None):\n return build(cfg, RECOGNIZERS,\n dict(train_cfg=train_cfg, test_cfg=test_cfg))\n\n\ndef build_backbone(cfg):\n return build(cfg, BACKBONES)\n\n\ndef build_head(cfg):\n return build(cfg, HEADS)\n\n\ndef build_spatial_temporal_module(cfg):\n return build(cfg, SPATIAL_TEMPORAL_MODULES)\n\n\ndef build_segmental_consensus(cfg):\n return build(cfg, SEGMENTAL_CONSENSUSES)\n" ]
[ [ "torch.nn.Sequential" ] ]
zhengxuanyu/NNByPytorch
[ "f90b807a44234a48f914e03e45c9691f17eb3189" ]
[ "NN_PyTorch/09_optimizer.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.utils.data as data\r\nimport torch.nn.functional as F\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# hyper parameters\r\nLR = 0.01\r\nBATCH_SIZE = 32\r\nEPOCH = 12\r\n\r\nx = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)\r\ny = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))\r\n\r\ntorch_dataset = data.TensorDataset(x, y)\r\nloader = data.DataLoader(\r\n dataset=torch_dataset,\r\n batch_size=BATCH_SIZE,\r\n shuffle=True,\r\n num_workers=2,\r\n)\r\n\r\n\r\n# initialize four neural networks\r\nnet_SGD = nn.Sequential(\r\n nn.Linear(1, 20),\r\n nn.ReLU(),\r\n nn.Linear(20, 1)\r\n)\r\n\r\nnet_Momentum = nn.Sequential(\r\n nn.Linear(1, 20),\r\n nn.ReLU(),\r\n nn.Linear(20, 1)\r\n)\r\n\r\nnet_RMSprop = nn.Sequential(\r\n nn.Linear(1, 20),\r\n nn.ReLU(),\r\n nn.Linear(20, 1)\r\n)\r\n\r\nnet_Adam = nn.Sequential(\r\n nn.Linear(1, 20),\r\n nn.ReLU(),\r\n nn.Linear(20, 1)\r\n)\r\n\r\nif __name__ == '__main__':\r\n nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]\r\n\r\n # different optimizers\r\n opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)\r\n opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)\r\n opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)\r\n opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))\r\n optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]\r\n\r\n loss_func = torch.nn.MSELoss()\r\n losses_his = [[], [], [], []] # record loss\r\n\r\n # training\r\n for epoch in range(EPOCH):\r\n print('Epoch: ', epoch)\r\n for step, (batch_x, batch_y) in enumerate(loader):\r\n for net, opt, l_his in zip(nets, optimizers, losses_his):\r\n prediction = net(batch_x)\r\n loss = loss_func(prediction, batch_y)\r\n opt.zero_grad()\r\n loss.backward()\r\n opt.step()\r\n l_his.append(loss.data.numpy())\r\n\r\n labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']\r\n for i, l_his in enumerate(losses_his):\r\n plt.plot(l_his, label=labels[i])\r\n plt.legend(loc='best')\r\n plt.xlabel('Steps')\r\n plt.ylabel('Loss')\r\n plt.ylim((0, 0.2))\r\n plt.show()\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.MSELoss", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "torch.linspace", "torch.nn.ReLU", "torch.utils.data.DataLoader", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "torch.utils.data.TensorDataset" ] ]
Jan20/mnist
[ "00d3e2be1887206218ccdabb07052e7a36d10731" ]
[ "app/learning/visualization.py" ]
[ "import keras.callbacks\nimport matplotlib.pyplot as plt\nfrom keras import models\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom numpy import expand_dims, ndarray, zeros, clip\n\n\ndef display_progress(history: keras.callbacks.History):\n \"\"\"\n Visualizes the training and validation progress.\n\n @param history:\n @return: None\n \"\"\"\n acc = history.history['accuracy']\n val_acc = history.history['val_acc']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(acc) + 1)\n\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()\n\n\ndef visualize_graph(image_path: str) -> ndarray:\n\n img = image.load_img(path=image_path, target_size=(500, 500))\n\n image_tensor = image.img_to_array(img)\n\n image_tensor = expand_dims(image_tensor, axis=0)\n\n image_tensor /= 255.\n\n print(image_tensor.shape)\n\n # plt.imshow(image_tensor[0])\n # plt.show()\n\n return image_tensor\n\n\ndef visualize_feature_map(image_path: str, model_path: str) -> None:\n\n model = load_model(model_path)\n\n image_tensor = visualize_graph(image_path=image_path)\n\n layer_outputs = [layer.output for layer in model.layers[:8]]\n\n activation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n\n activations = activation_model.predict(image_tensor)\n\n first_layer_activation = activations[0]\n\n print(first_layer_activation.shape)\n\n plt.matshow(first_layer_activation[0, :, :, 2], cmap='viridis')\n plt.show()\n\n\ndef visualize_feature_maps(image_path: str, model_path: str) -> None:\n\n model = load_model(model_path)\n\n layer_names = []\n\n image_tensor = visualize_graph(image_path=image_path)\n\n layer_outputs = [layer.output for layer in model.layers[:8]]\n\n activation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n\n activations = activation_model.predict(image_tensor)\n\n for layer in model.layers[:12]:\n\n layer_names.append(layer.name)\n\n images_per_row = 16\n\n test = zip(layer_names, activations)\n\n for layer_name, layer_activation in zip(layer_names, activations):\n\n n_features = layer_activation.shape[-1]\n\n size = layer_activation.shape[1]\n\n n_cols = n_features // images_per_row\n\n display_grid = zeros((size * n_cols, images_per_row * size))\n\n for col in range(n_cols):\n\n for row in range(images_per_row):\n\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = clip(channel_image, 0, 255).astype('uint8')\n\n display_grid[col * size : (col + 1) * size,\n row * size : (row + 1) * size] = channel_image\n\n scale = 1. / size\n\n plt.figure(figsize=(scale * display_grid.shape[1],\n scale * display_grid.shape[0]))\n\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.matshow", "numpy.zeros", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.clip", "matplotlib.pyplot.show", "numpy.expand_dims", "matplotlib.pyplot.imshow" ] ]
arokem/ffn
[ "95d55f77e9560a8d34715ec2bdfef554b205824d" ]
[ "compute_partitions.py" ]
[ "r\"\"\"Computes the partition map for a segmentation.\n\nFor every labeled voxel of the input volume, computes the fraction of identically\nlabeled voxels within a neighborhood of radius `lom_radius`, and then quantizes\nthat number according to `thresholds`.\n\nSample invocation:\n python compute_partitions.py \\\n --input_volume third_party/neuroproof_examples/training_sample2/groundtruth.h5:stack \\\n --output_volume af.h5:af \\\n --thresholds 0.025,0.05,0.075,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9 \\\n --lom_radius 16,16,16 \\\n --min_size 10000\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom ffn.inference import segmentation\nfrom ffn.inference import storage\nfrom ffn.utils import bounding_box\n\nimport h5py\nimport numpy as np\nfrom scipy.ndimage import filters\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('input_volume', None,\n 'Segmentation volume as <volume_path>:<dataset>, where'\n 'volume_path points to a HDF5 volume.')\nflags.DEFINE_string('output_volume', None,\n 'Volume in which to save the partition map, as '\n '<volume_path>:<dataset>.')\nflags.DEFINE_list('thresholds', None,\n 'List of activation voxel fractions used for partitioning.')\nflags.DEFINE_list('lom_radius', None,\n 'Local Object Mask (LOM) radii as (x, y, z).')\nflags.DEFINE_list('id_whitelist', None,\n 'Whitelist of object IDs for which to compute the partition '\n 'numbers.')\nflags.DEFINE_list('exclusion_regions', None,\n 'List of (x, y, z, r) tuples specifying spherical regions to '\n 'mark as excluded (i.e. set the output value to 255).')\nflags.DEFINE_string('mask_configs', None,\n 'MaskConfigs proto in text foramt. Any locations where at '\n 'least one voxel of the LOM is masked will be marked as '\n 'excluded.')\nflags.DEFINE_integer('min_size', 10000,\n 'Minimum number of voxels for a segment to be considered for '\n 'partitioning.')\n\n\ndef _summed_volume_table(val):\n \"\"\"Computes a summed volume table of 'val'.\"\"\"\n val = val.astype(np.int32)\n svt = val.cumsum(axis=0).cumsum(axis=1).cumsum(axis=2)\n return np.pad(svt, [[1, 0], [1, 0], [1, 0]], mode='constant')\n\n\ndef _query_summed_volume(svt, diam):\n \"\"\"Queries a summed volume table.\n\n Operates in 'VALID' mode, i.e. only computes the sums for voxels where the\n full diam // 2 context is available.\n\n Args:\n svt: summed volume table (see _summed_volume_table)\n diam: diameter (z, y, x tuple) of the area within which to compute sums\n\n Returns:\n sum of all values within a diam // 2 radius (under L1 metric) of every voxel\n in the array from which 'svt' was built.\n \"\"\"\n return (\n svt[diam[0]:, diam[1]:, diam[2]:] - svt[diam[0]:, diam[1]:, :-diam[2]] -\n svt[diam[0]:, :-diam[1], diam[2]:] - svt[:-diam[0], diam[1]:, diam[2]:] +\n svt[:-diam[0], :-diam[1], diam[2]:] + svt[:-diam[0], diam[1]:, :-diam[2]]\n + svt[diam[0]:, :-diam[1], :-diam[2]] -\n svt[:-diam[0], :-diam[1], :-diam[2]])\n\n\ndef load_mask(mask_configs, box, lom_diam_zyx):\n if mask_configs is None:\n return None\n\n mask = storage.build_mask(self.mask_config.masks, box.start[::-1],\n box.size[::-1])\n svt = _summed_volume_table(mask)\n mask = _query_summed_volume(svt, lom_diam_zyx) >= 1\n return mask\n\n\ndef compute_partitions(seg_array,\n thresholds,\n lom_radius,\n id_whitelist=None,\n exclusion_regions=None,\n mask_configs=None,\n min_size=10000):\n \"\"\"Computes quantized fractions of active voxels in a local object mask.\n\n Args:\n thresholds: list of activation voxel fractions to use for partitioning.\n lom_radius: LOM radii as [x, y, z]\n id_whitelist: (optional) whitelist of object IDs for which to compute the\n partition numbers\n exclusion_regions: (optional) list of x, y, z, r tuples specifying regions\n to mark as excluded (with 255). The regions are spherical, with\n (x, y, z) definining the center of the sphere and 'r' specifying its\n radius. All values are in voxels.\n mask_configs: (optional) MaskConfigs proto; any locations where at least\n one voxel of the LOM is masked will be marked as excluded (255).\n\n Returns:\n tuple of:\n corner of output subvolume as (x, y, z)\n uint8 ndarray of active fraction voxels\n \"\"\"\n seg_array = segmentation.clear_dust(seg_array, min_size=min_size)\n assert seg_array.ndim == 3\n\n lom_radius = np.array(lom_radius)\n lom_radius_zyx = lom_radius[::-1]\n lom_diam_zyx = 2 * lom_radius_zyx + 1\n\n def _sel(i):\n if i == 0:\n return slice(None)\n else:\n return slice(i, -i)\n\n valid_sel = [_sel(x) for x in lom_radius_zyx]\n output = np.zeros(seg_array[valid_sel].shape, dtype=np.uint8)\n corner = lom_radius\n\n if exclusion_regions is not None:\n sz, sy, sx = output.shape\n hz, hy, hx = np.mgrid[:sz, :sy, :sx]\n\n hz += corner[2]\n hy += corner[1]\n hx += corner[0]\n\n for x, y, z, r in exclusion_regions:\n mask = (hx - x)**2 + (hy - y)**2 + (hz - z)**2 <= r**2\n output[mask] = 255\n\n labels = set(np.unique(seg_array))\n logging.info('Labels to process: %d', len(labels))\n\n if id_whitelist is not None:\n labels &= set(id_whitelist)\n\n mask = load_mask(mask_configs,\n bounding_box.BoundingBox(\n start=(0, 0, 0), size=seg_array.shape[::-1]),\n lom_diam_zyx)\n if mask is not None:\n output[mask] = 255\n\n fov_volume = np.prod(lom_diam_zyx)\n for l in labels:\n # Don't create a mask for the background component.\n if l == 0:\n continue\n\n object_mask = (seg_array == l)\n\n svt = _summed_volume_table(object_mask)\n active_fraction = _query_summed_volume(svt, lom_diam_zyx) / fov_volume\n assert active_fraction.shape == output.shape\n\n # Drop context that is only necessary for computing the active fraction\n # (i.e. one LOM radius in every direction).\n object_mask = object_mask[valid_sel]\n\n # TODO(mjanusz): Use np.digitize here.\n for i, th in enumerate(thresholds):\n output[object_mask & (active_fraction < th) & (output == 0)] = i + 1\n\n output[object_mask & (active_fraction >= thresholds[-1]) &\n (output == 0)] = len(thresholds) + 1\n\n logging.info('Done processing %d', l)\n\n logging.info('Nonzero values: %d', np.sum(output > 0))\n\n return corner, output\n\n\ndef adjust_bboxes(bboxes, lom_radius):\n ret = []\n\n for bbox in bboxes:\n bbox = bbox.adjusted_by(start=lom_radius, end=-lom_radius)\n if np.all(bbox.size > 0):\n ret.append(bbox)\n\n return ret\n\n\ndef main(argv):\n del argv # Unused.\n path, dataset = FLAGS.input_volume.split(':')\n with h5py.File(path) as f:\n segmentation = f[dataset]\n bboxes = []\n for name, v in segmentation.attrs.items():\n if name.startswith('bounding_boxes'):\n for bbox in v:\n bboxes.append(bounding_box.BoundingBox(bbox[0], bbox[1]))\n\n if not bboxes:\n bboxes.append(\n bounding_box.BoundingBox(\n start=(0, 0, 0), size=segmentation.shape[::-1]))\n\n shape = segmentation.shape\n lom_radius = [int(x) for x in FLAGS.lom_radius]\n corner, partitions = compute_partitions(\n segmentation[...], [float(x) for x in FLAGS.thresholds], lom_radius,\n FLAGS.id_whitelist, FLAGS.exclusion_regions, FLAGS.mask_configs,\n FLAGS.min_size)\n\n bboxes = adjust_bboxes(bboxes, np.array(lom_radius))\n\n path, dataset = FLAGS.output_volume.split(':')\n with h5py.File(path, 'w') as f:\n ds = f.create_dataset(dataset, shape=shape, dtype=np.uint8, fillvalue=255,\n chunks=True, compression='gzip')\n s = partitions.shape\n ds[corner[2]:corner[2] + s[0],\n corner[1]:corner[1] + s[1],\n corner[0]:corner[0] + s[2]] = partitions\n ds.attrs['bounding_boxes'] = [(b.start, b.size) for b in bboxes]\n ds.attrs['partition_counts'] = np.array(np.unique(partitions,\n return_counts=True))\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('input_volume')\n flags.mark_flag_as_required('output_volume')\n flags.mark_flag_as_required('thresholds')\n flags.mark_flag_as_required('lom_radius')\n app.run(main)\n" ]
[ [ "numpy.array", "numpy.pad", "numpy.zeros", "numpy.sum", "numpy.prod", "numpy.all", "numpy.unique" ] ]
Wisc-HCI/wiscutils
[ "fd53f06d3d3e70aef7e8a7a7121edfc02acf0220" ]
[ "wisc_tools/src/wisc_tools/structures/structures.py" ]
[ "import numpy as np\nimport math\nfrom scipy import interpolate\nfrom pyquaternion import Quaternion as pyQuaternion\nfrom wisc_tools.convenience import pairwise\nfrom wisc_tools.conversions import transformations\nfrom wisc_msgs.msg import Euler, EulerPose, EEPoseGoals\nfrom geometry_msgs.msg import Vector3 as rosVector3\nfrom geometry_msgs.msg import Point as rosPoint\nfrom geometry_msgs.msg import Quaternion as rosQuaternion\nfrom geometry_msgs.msg import Pose as rosPose\nfrom abc import abstractmethod\nimport json\n\nclass Mode(object):\n '''\n Mode Class\n Itty bitty mode object that handles override and deferred values\n '''\n def __init__(self, override_value=None, deferred_value=None):\n self.deferred_value = deferred_value\n self.override_value = override_value\n\n @property\n def empty(self):\n return self.override_value == None and self.deferred_value == None\n\n @property\n def has_override(self):\n return self.override_value != None\n\n @property\n def has_deferred(self):\n return self.deferred_value != None\n\n def __repr__(self):\n return '[override:{0},defer:{1}]'.format(self.override_value,self.deferred_value)\n\nclass Position(object):\n def __init__(self,x,y,z):\n self.x = x\n self.y = y\n self.z = z\n\n @property\n def ros_vector3(self):\n return rosVector3(x=self.x,y=self.y,z=self.z)\n\n @property\n def ros_point(self):\n return rosPoint(x=self.x,y=self.y,z=self.z)\n\n @property\n def array(self):\n return np.array([self.x,self.y,self.z])\n\n @property\n def dict(self):\n return {'x':self.x,'y':self.y,'z':self.z}\n\n @classmethod\n def from_ros_vector3(cls,vector3):\n return Position(x=vector3.x,y=vector3.y,z=vector3.z)\n\n @classmethod\n def from_ros_point(cls,point):\n return Position(x=point.x,y=point.y,z=point.z)\n\n def distance_to(self,other):\n return math.sqrt(math.pow(self.x-other.x,2)+math.pow(self.y-other.y,2)+math.pow(self.z-other.z,2))\n\n def __repr__(self):\n return '[x:{0},y:{1},z:{2}]'.format(self.x,self.y,self.z)\n\nclass Quaternion(pyQuaternion):\n\n @property\n def ros_quaternion(self):\n return rosQuaternion(x=self.x,y=self.y,z=self.z,w=self.w)\n\n @property\n def ros_euler(self):\n (r,p,y) = transformations.euler_from_quaternion([self.w,self.x,self.y,self.z],'szxy')\n return Euler(r=r,p=p,y=y)\n\n @property\n def dict(self):\n (r,p,y) = transformations.euler_from_quaternion([self.w,self.x,self.y,self.z],'szxy')\n return {'r':r,'p':p,'y':y}\n\n @classmethod\n def from_vector_quaternion(self,vector):\n return Quaternion(w=vector[0],x=vector[1],y=vector[2],z=vector[3])\n\n @classmethod\n def from_py_quaternion(self,pyquaternion):\n return Quaternion(x=pyquaternion.x,y=pyquaternion.y,z=pyquaternion.z,w=pyquaternion.w)\n\n @classmethod\n def from_ros_quaternion(self,quaternion):\n return Quaternion(x=quaternion.x,y=quaternion.y,z=quaternion.z,w=quaternion.w)\n\n @classmethod\n def from_ros_euler(self,euler):\n tf_quat = transformations.quaternion_from_euler(euler.r,euler.p,euler.y,'szxy')\n return Quaternion.from_vector_quaternion(tf_quat)\n\n @classmethod\n def from_euler_dict(self,dict):\n tf_quat = transformations.quaternion_from_euler(dict['r'],dict['p'],dict['y'],'szxy')\n return Quaternion.from_vector_quaternion(tf_quat)\n\n def distance_to(self,other):\n return pyQuaternion.distance(self,other)\n\nclass Pose(object):\n def __init__(self,position,quaternion):\n self.position = position\n self.quaternion = quaternion\n\n @property\n def ros_pose(self):\n return rosPose(position=self.position.ros_point,orientation=self.quaternion.ros_quaternion)\n\n @property\n def ros_eulerpose(self):\n return EulerPose(position=self.position.ros_point,orientation=self.orientation.ros_euler)\n\n @classmethod\n def from_ros_eulerpose(self,eulerpose):\n pass\n\n @classmethod\n def from_eulerpose_dict(cls,dict):\n position = Position(**dict['position'])\n quaternion = Quaternion.from_euler_dict(dict['rotation'])\n return cls(position,quaternion)\n\n @classmethod\n def from_pose_dict(cls,dict):\n position = Position(**dict['position'])\n quaternion = Quaternion(**dict['quaternion'])\n return cls(position,quaternion)\n\n @classmethod\n def from_ros_pose(self,pose):\n return Pose(position=Position.from_ros_point(pose.position),orientation=Quaternion.from_ros_quaternion(pose.orientation))\n\n @property\n def dict(self):\n return {'position':self.position.dict,'rotation':self.quaternion.dict}\n\n def distance_to(self,pose):\n return (self.position.distance_to(pose.position),self.quaternion.distance_to(pose.quaternion))\n\n def __repr__(self):\n return '({0}, {1})'.format(self.position,self.quaternion)\n\nclass Trajectory(object):\n\n def __init__(self,waypoints,kind='slinear',circuit=False,min_value=None,max_value=None):\n self.wps = waypoints\n self.kind = kind\n self.circuit = circuit\n self.min_value = min_value\n self.max_Value = max_value\n self.__interpolate__()\n\n @property\n def t(self):\n if len(self.wps) < 4:\n base = self.wps[0]['time']\n return [base-20,base-15,base-10,base-5] + [wp['time'] for wp in self.wps] + [self.wps[-1]['time']+5, self.wps[-1]['time']+10, self.wps[-1]['time']+15]\n else:\n return [wp['time'] for wp in self.wps] + [self.wps[-1]['time']+5, self.wps[-1]['time']+10, self.wps[-1]['time']+15]\n\n def __len__(self):\n t = self.t\n start = min(t)\n stop = max(t)\n return stop-start\n\n def __pad__(self,vals):\n if len(vals) < 4:\n return [vals[0],vals[0],vals[0],vals[0]] + vals + [vals[-1],vals[-1],vals[-1]]\n else:\n return vals + [vals[-1],vals[-1],vals[-1]]\n\n def __iter__(self):\n return self.wps.__iter__()\n\n @abstractmethod\n def __filter__(self,value):\n return value\n\n @abstractmethod\n def __interpolate__(self):\n pass\n\n def __repr__(self):\n return json.dumps(self.wps)\n\nclass ModeTrajectory(Trajectory):\n\n def __init__(self,waypoints,fill='interpolate',kind='slinear',circuit=False,min_value=None,max_value=None):\n super(ModeTrajectory,self).__init__(waypoints,kind='slinear',circuit=False,min_value=None,max_value=None)\n\n @property\n def v(self):\n return self.__pad__([wp['mode'] for wp in self.wps])\n\n def __getitem__(self,time):\n if self.circuit:\n start = min(self.t)\n time = time - start % (len(self) + start)\n return self.__filter__(self.vfn(time))\n\n def __filter__(self,value):\n if type(value) == np.ndarray:\n value = float(value)\n if min(self.v) > value:\n return min(self.v)\n elif max(self.v) < value:\n return max(self.v)\n else:\n return value\n\n def __interpolate__(self):\n assert len(self.wps) > 0\n t = self.t\n v = self.v\n if not self.circuit:\n self.vfn = interpolate.interp1d(t,v,kind=self.kind,fill_value='extrapolate')\n # self.vfn = interpolate.UnivariateSpline(t,v,k=self.kind,ext='const')\n else:\n tp = [t[-2]-t[-1]]+t+[t[1]+t[-1]]\n vp = [v[-2]-v[-1]]+v+[v[1]+v[-1]]\n self.vfn = interpolate.interp1d(tp,vp,kind=self.kind,fill_value='extrapolate')\n # self.vfn = interpolate.UnivariateSpline(t,v,k=self.kind,ext='const')\n\nclass AnnotationTrajectory(Trajectory):\n\n @property\n def a(self):\n return [wp['annotation'] for wp in self.wps]\n\n def __getitem__(self,time):\n if self.circuit:\n start = min(self.t)\n time = time - start % (len(self) + start)\n if time in self.t:\n return [event['annotation'] for event in self.wps][0]\n else:\n return None\n\n def __filter__(self,value):\n return value\n\n def __interpolate__(self):\n pass\n\n\nclass PoseTrajectory(Trajectory):\n\n @property\n def x(self):\n return self.__pad__([wp['pose'].position.x for wp in self.wps])\n\n @property\n def y(self):\n return self.__pad__([wp['pose'].position.y for wp in self.wps])\n\n @property\n def z(self):\n return self.__pad__([wp['pose'].position.z for wp in self.wps])\n\n @property\n def q(self):\n return self.__pad__([wp['pose'].quaternion for wp in self.wps])\n\n def __filter__(self,value):\n if type(value) == np.ndarray:\n value = float(value)\n return value\n\n def __getitem__(self,time):\n times = self.t\n if self.circuit:\n start = min(times)\n time = time - start % (len(self) + start)\n x = self.__filter__(self.xfn(time))\n y = self.__filter__(self.yfn(time))\n z = self.__filter__(self.zfn(time))\n q = self.q\n pos = Position(x,y,z)\n\n start = min(times)\n stop = max(times)\n if time < start:\n quat = self.wps[0]['pose'].quaternion\n elif time > stop:\n quat = self.wps[-1]['pose'].quaternion\n else:\n for start_idx,pair in enumerate(pairwise(times)):\n if pair[0] <= time <= pair[1]:\n quat1 = q[start_idx]\n quat2 = q[start_idx+1]\n quat_times = (pair[0],pair[1])\n percent = (time - quat_times[0]) / (quat_times[1] - quat_times[0])\n quat = Quaternion.from_py_quaternion(pyQuaternion.slerp(quat1,quat2,percent))\n return Pose(pos,quat)\n\n def __interpolate__(self):\n assert len(self.wps) > 0\n times = self.t\n if not self.circuit:\n self.xfn = interpolate.interp1d(times,self.x,kind=self.kind,fill_value='extrapolate')\n self.yfn = interpolate.interp1d(times,self.y,kind=self.kind,fill_value='extrapolate')\n self.zfn = interpolate.interp1d(times,self.z,kind=self.kind,fill_value='extrapolate')\n # TODO: Test whether the code below produces better results\n # self.xfn = interpolate.UnivariateSpline(times,self.x,k=self.kind,ext='const')\n # self.yfn = interpolate.UnivariateSpline(times,self.y,k=self.kind,ext='const')\n # self.zfn = interpolate.UnivariateSpline(times,self.z,k=self.kind,ext='const')\n else:\n xs = self.x\n ys = self.y\n zs = self.z\n tp = [times[-2]-times[-1]]+t+[times[1]+times[-1]]\n xp = [xs[-2]-xs[-1]]+xs+[xs[1]+xs[-1]]\n yp = [ys[-2]-ys[-1]]+ys+[ys[1]+ys[-1]]\n zp = [zs[-2]-zs[-1]]+zs+[zs[1]+zs[-1]]\n self.xfn = interpolate.interp1d(tp,xp,kind=self.kind,fill_value='extrapolate')\n self.yfn = interpolate.interp1d(tp,yp,kind=self.kind,fill_value='extrapolate')\n self.zfn = interpolate.interp1d(tp,zp,kind=self.kind,fill_value='extrapolate')\n\n # self.xfn = interpolate.UnivariateSpline(tp,xp,k=self.kind,ext='const')\n # self.yfn = interpolate.UnivariateSpline(tp,yp,k=self.kind,ext='const')\n # self.zfn = interpolate.UnivariateSpline(tp,zp,k=self.kind,ext='const')\n" ]
[ [ "numpy.array", "scipy.interpolate.interp1d" ] ]
magicake/2017-2018
[ "2c795e47e0036e73ff9f88dcb0972cf6c0ec2a91" ]
[ "hsv_sprite_shifter/color_change.py" ]
[ "#SY Dec 2018\n#github.com/magicake\n\nfrom PIL import Image\nimport numpy as np\nfrom random import randint\nfrom timeit import default_timer as timer\n\ndef rgb_to_hsv(rgb):\n rgb = rgb.astype('float')\n hsv = np.zeros_like(rgb)\n hsv[..., 3:] = rgb[..., 3:]\n r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]\n maxc = np.max(rgb[..., :3], axis=-1)\n minc = np.min(rgb[..., :3], axis=-1)\n hsv[..., 2] = maxc\n mask = maxc != minc\n hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]\n rc = np.zeros_like(r)\n gc = np.zeros_like(g)\n bc = np.zeros_like(b)\n rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]\n gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]\n bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]\n hsv[..., 0] = np.select(\n [r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)\n hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0\n return hsv\n\ndef hsv_to_rgb(hsv):\n rgb = np.empty_like(hsv)\n rgb[..., 3:] = hsv[..., 3:]\n h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]\n i = (h * 6.0).astype('uint8')\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n i = i % 6\n conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]\n rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)\n rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)\n rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)\n return rgb.astype('uint8')\n\n\ndef shift_thing(arr,hout,thing, random):\n hsv=rgb_to_hsv(arr)\n if random:\n hsv[..., 0]=(180-(randint(0, 180)))/360.0\n hsv[..., 1]=(180-(randint(0, 180)))/360.0\n hsv[..., 1]=(180-(randint(0, 10)))/360.0\n else:\n hsv[...,thing]=hout\n rgb=hsv_to_rgb(hsv)\n return rgb\n\ndemo = int(raw_input(\"which demo? (2 is the star dude)\"))\nr_inq = int(raw_input(\"random? 1 for yes 2 for no\"))\nrandom = False\nthing = 0\nif r_inq == 1:\n random = True\nelse:\n thing = int(raw_input(\"1: hue, 2: saturation, 3: value\")) - 1\niter = int(raw_input(\"how many iterations??\"))\nog = Image.open('demo'+str(demo)+'/og.png').convert('RGBA')\narr = np.array(og)\nbase = Image.open('demo'+str(demo)+'/base.png').convert('RGBA')\nif __name__=='__main__':\n #deviation = int(raw_input(\"pick a number between 0 and 180\"))\n #modified_hue = (180-deviation)/360.0\n\n #new_img = Image.fromarray(shift_hue(arr,modified_hue), 'RGBA')\n start = timer()\n for i in range (0, iter):\n deviation = int(randint(0, 180))\n modifier = (180-deviation)/360.0\n new_img = Image.fromarray(shift_thing(arr,modifier, thing, random), 'RGBA')\n result = Image.alpha_composite(base, new_img)\n result.save('opt_test/result'+str(i)+'.png')\n print(timer() - start)\n end = timer()\n print(\"Done: \" + str(end-start))\n" ]
[ [ "numpy.max", "numpy.zeros_like", "numpy.array", "numpy.min", "numpy.select", "numpy.empty_like" ] ]
ronekko/adaptive_computation_time
[ "eff1b9d9ae0205668db72f29e54c3e6a63640876" ]
[ "links/stateless_simple_act.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 10 19:42:51 2017\n\n@author: sakurai\n\"\"\"\n\nimport numpy as np\nimport chainer\nimport chainer.functions as F\nimport chainer.functions as L\nimport six\n\n\nclass StatelessSimpleACT(chainer.Chain):\n def __init__(self, in_size, s_size, out_size,\n epsilon=0.01, max_ponder_steps=100):\n super(StatelessSimpleACT, self).__init__(\n l_xs=L.Linear(in_size + 1, s_size),\n l_ss=L.Linear(s_size, s_size),\n l_sh=L.Linear(s_size, 1),\n l_sy=L.Linear(s_size, out_size))\n self.in_size = in_size\n self.s_size = s_size\n self.out_size = out_size\n self.epsilon = epsilon\n self.max_ponder_steps = max_ponder_steps\n\n def __call__(self, x_t, s_t):\n xp = self.xp\n batch_size = x_t.shape[0]\n\n ponder_costs = []\n s_t_n = s_t\n\n s_t_ns = []\n y_t_ns = []\n p_t_ns = []\n r_t_n = chainer.Variable(xp.ones((batch_size, 1), np.float32))\n r_t = [r_t_n]\n n_t = xp.full((batch_size, 1), -1, np.int32)\n already_halted = xp.full((batch_size, 1), False, np.bool)\n\n n = 0\n x_t_n = F.hstack((x_t, xp.ones((batch_size, 1), x_t.dtype)))\n x_t_n = self.l_xs(x_t_n)\n\n for n in six.moves.range(self.max_ponder_steps):\n if xp.all(already_halted):\n break\n\n s_t_n = F.tanh(x_t_n + self.l_ss(s_t_n))\n y_t_n = self.l_sy(s_t_n)\n h_t_n = F.sigmoid(self.l_sh(s_t_n))\n\n if n < self.max_ponder_steps - 1: # normal case\n halt = r_t_n.data - h_t_n.data < self.epsilon\n else: # truncation by max ponder steps\n halt = np.full((batch_size, 1), True)\n p_t_n = F.where(already_halted,\n xp.zeros((batch_size, 1), xp.float32),\n F.where(halt,\n r_t_n,\n h_t_n))\n\n s_t_ns.append(s_t_n)\n y_t_ns.append(y_t_n)\n p_t_ns.append(p_t_n)\n r_t_n -= p_t_n\n r_t.append(r_t_n)\n\n now_halted = xp.logical_and(r_t_n.data < self.epsilon,\n xp.logical_not(already_halted))\n n_t[now_halted] = n\n already_halted = xp.logical_or(already_halted, now_halted)\n\n # compute x_t_n for n > 1 once\n if n == 0:\n x_t_n = F.hstack(\n (x_t, xp.zeros((batch_size, 1), x_t.dtype)))\n x_t_n = self.l_xs(x_t_n)\n print(n + 1, end=', ')\n\n s_t_ns = F.stack(s_t_ns, 1)\n y_t_ns = F.stack(y_t_ns, 1)\n p_t_ns = F.stack(p_t_ns, 1)\n s_t = F.batch_matmul(p_t_ns, s_t_ns, transa=True)\n y_t = F.batch_matmul(p_t_ns, y_t_ns, transa=True)\n\n s_t = s_t.reshape(batch_size, -1)\n y_t = y_t.reshape(batch_size, -1)\n remainders_at_halt = F.concat(r_t)[np.arange(batch_size), n_t.ravel()]\n ponder_costs = n_t.ravel().astype(xp.float32) + remainders_at_halt\n ponder_cost = F.sum(ponder_costs)\n\n return y_t, s_t, ponder_cost\n" ]
[ [ "numpy.full", "numpy.arange" ] ]
JeongsooHa/ray
[ "cc93fee4a47dc9b9f754d0b53ae2f1e4f598aeb1" ]
[ "rllib/agents/es/es_tf_policy.py" ]
[ "# Code in this file is copied and adapted from\n# https://github.com/openai/evolution-strategies-starter.\n\nimport gym\nimport numpy as np\n\nimport ray\nimport ray.experimental.tf_utils\nfrom ray.rllib.evaluation.sampler import _unbatch_tuple_actions\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.filter import get_filter\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf = try_import_tf()\n\n\ndef rollout(policy, env, timestep_limit=None, add_noise=False, offset=0.0):\n \"\"\"Do a rollout.\n\n If add_noise is True, the rollout will take noisy actions with\n noise drawn from that stream. Otherwise, no action noise will be added.\n\n Args:\n policy (Policy): Rllib Policy from which to draw actions.\n env (gym.Env): Environment from which to draw rewards, done, and\n next state.\n timestep_limit (Optional[int]): Steps after which to end the rollout.\n If None, use `env.spec.max_episode_steps` or 999999.\n add_noise (bool): Indicates whether exploratory action noise should be\n added.\n offset (float): Value to subtract from the reward (e.g. survival bonus\n from humanoid).\n \"\"\"\n max_timestep_limit = 999999\n env_timestep_limit = env.spec.max_episode_steps if (\n hasattr(env, \"spec\") and hasattr(env.spec, \"max_episode_steps\")) \\\n else max_timestep_limit\n timestep_limit = (env_timestep_limit if timestep_limit is None else min(\n timestep_limit, env_timestep_limit))\n rewards = []\n t = 0\n observation = env.reset()\n for _ in range(timestep_limit or max_timestep_limit):\n ac = policy.compute_actions(\n observation, add_noise=add_noise, update=True)[0]\n observation, r, done, _ = env.step(ac)\n if offset != 0.0:\n r -= np.abs(offset)\n rewards.append(r)\n t += 1\n if done:\n break\n rewards = np.array(rewards, dtype=np.float32)\n return rewards, t\n\n\ndef make_session(single_threaded):\n if not single_threaded:\n return tf.Session()\n return tf.Session(\n config=tf.ConfigProto(\n inter_op_parallelism_threads=1, intra_op_parallelism_threads=1))\n\n\nclass ESTFPolicy:\n def __init__(self, obs_space, action_space, config):\n self.action_space = action_space\n self.action_noise_std = config[\"action_noise_std\"]\n self.preprocessor = ModelCatalog.get_preprocessor_for_space(obs_space)\n self.observation_filter = get_filter(config[\"observation_filter\"],\n self.preprocessor.shape)\n self.single_threaded = config.get(\"single_threaded\", False)\n self.sess = make_session(single_threaded=self.single_threaded)\n self.inputs = tf.placeholder(tf.float32,\n [None] + list(self.preprocessor.shape))\n\n # Policy network.\n dist_class, dist_dim = ModelCatalog.get_action_dist(\n self.action_space, config[\"model\"], dist_type=\"deterministic\")\n model = ModelCatalog.get_model({\n SampleBatch.CUR_OBS: self.inputs\n }, obs_space, action_space, dist_dim, config[\"model\"])\n dist = dist_class(model.outputs, model)\n self.sampler = dist.sample()\n\n self.variables = ray.experimental.tf_utils.TensorFlowVariables(\n model.outputs, self.sess)\n\n self.num_params = sum(\n np.prod(variable.shape.as_list())\n for _, variable in self.variables.variables.items())\n self.sess.run(tf.global_variables_initializer())\n\n def compute_actions(self, observation, add_noise=False, update=True):\n observation = self.preprocessor.transform(observation)\n observation = self.observation_filter(observation[None], update=update)\n action = self.sess.run(\n self.sampler, feed_dict={self.inputs: observation})\n action = _unbatch_tuple_actions(action)\n if add_noise and isinstance(self.action_space, gym.spaces.Box):\n action += np.random.randn(*action.shape) * self.action_noise_std\n return action\n\n def set_flat_weights(self, x):\n self.variables.set_flat(x)\n\n def get_flat_weights(self):\n return self.variables.get_flat()\n" ]
[ [ "numpy.array", "numpy.random.randn", "numpy.abs" ] ]
Multilevel-NN/deepxde
[ "10c7c6e0de610ee560089eb11e01496b84383721" ]
[ "examples/Beltrami_flow.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport deepxde as dde\n\n\ndef main():\n a = 1\n d = 1\n Re = 1\n\n def pde(x, u):\n u_vel, v_vel, w_vel, p = u[:, 0:1], u[:, 1:2], u[:, 2:3], u[:, 3:4]\n\n u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)\n u_vel_y = dde.grad.jacobian(u, x, i=0, j=1)\n u_vel_z = dde.grad.jacobian(u, x, i=0, j=2)\n u_vel_t = dde.grad.jacobian(u, x, i=0, j=3)\n u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)\n u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)\n u_vel_zz = dde.grad.hessian(u, x, component=0, i=2, j=2)\n\n v_vel_x = dde.grad.jacobian(u, x, i=1, j=0)\n v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)\n v_vel_z = dde.grad.jacobian(u, x, i=1, j=2)\n v_vel_t = dde.grad.jacobian(u, x, i=1, j=3)\n v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)\n v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)\n v_vel_zz = dde.grad.hessian(u, x, component=1, i=2, j=2)\n\n w_vel_x = dde.grad.jacobian(u, x, i=2, j=0)\n w_vel_y = dde.grad.jacobian(u, x, i=2, j=1)\n w_vel_z = dde.grad.jacobian(u, x, i=2, j=2)\n w_vel_t = dde.grad.jacobian(u, x, i=2, j=3)\n w_vel_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)\n w_vel_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)\n w_vel_zz = dde.grad.hessian(u, x, component=2, i=2, j=2)\n\n p_x = dde.grad.jacobian(u, x, i=3, j=0)\n p_y = dde.grad.jacobian(u, x, i=3, j=1)\n p_z = dde.grad.jacobian(u, x, i=3, j=2)\n\n momentum_x = (\n u_vel_t\n + (u_vel * u_vel_x + v_vel * u_vel_y + w_vel * u_vel_z)\n + p_x\n - 1 / Re * (u_vel_xx + u_vel_yy + u_vel_zz)\n )\n momentum_y = (\n v_vel_t\n + (u_vel * v_vel_x + v_vel * v_vel_y + w_vel * v_vel_z)\n + p_y\n - 1 / Re * (v_vel_xx + v_vel_yy + v_vel_zz)\n )\n momentum_z = (\n w_vel_t\n + (u_vel * w_vel_x + v_vel * w_vel_y + w_vel * w_vel_z)\n + p_z\n - 1 / Re * (w_vel_xx + w_vel_yy + w_vel_zz)\n )\n continuity = u_vel_x + v_vel_y + w_vel_z\n\n return [momentum_x, momentum_y, momentum_z, continuity]\n\n def u_func(x):\n return (\n -a\n * (\n np.exp(a * x[:, 0:1]) * np.sin(a * x[:, 1:2] + d * x[:, 2:3])\n + np.exp(a * x[:, 2:3]) * np.cos(a * x[:, 0:1] + d * x[:, 1:2])\n )\n * np.exp(-(d ** 2) * x[:, 3:4])\n )\n\n def v_func(x):\n return (\n -a\n * (\n np.exp(a * x[:, 1:2]) * np.sin(a * x[:, 2:3] + d * x[:, 0:1])\n + np.exp(a * x[:, 0:1]) * np.cos(a * x[:, 1:2] + d * x[:, 2:3])\n )\n * np.exp(-(d ** 2) * x[:, 3:4])\n )\n\n def w_func(x):\n return (\n -a\n * (\n np.exp(a * x[:, 2:3]) * np.sin(a * x[:, 0:1] + d * x[:, 1:2])\n + np.exp(a * x[:, 1:2]) * np.cos(a * x[:, 2:3] + d * x[:, 0:1])\n )\n * np.exp(-(d ** 2) * x[:, 3:4])\n )\n\n def p_func(x):\n return (\n -0.5\n * a ** 2\n * (\n np.exp(2 * a * x[:, 0:1])\n + np.exp(2 * a * x[:, 0:1])\n + np.exp(2 * a * x[:, 2:3])\n + 2\n * np.exp(a * x[:, 0:1] + d * x[:, 1:2])\n * np.cos(a * x[:, 2:3] + d * x[:, 0:1])\n * np.exp(a * (x[:, 1:2] + x[:, 2:3]))\n + 2\n * np.exp(a * x[:, 1:2] + d * x[:, 2:3])\n * np.cos(a * x[:, 0:1] + d * x[:, 1:2])\n * np.exp(a * (x[:, 2:3] + x[:, 0:1]))\n + 2\n * np.exp(a * x[:, 2:3] + d * x[:, 0:1])\n * np.cos(a * x[:, 1:2] + d * x[:, 2:3])\n * np.exp(a * (x[:, 0:1] + x[:, 1:2]))\n )\n * np.exp(-2 * d ** 2 * x[:, 3:4])\n )\n\n spatial_domain = dde.geometry.Cuboid(xmin=[-1, -1, -1], xmax=[1, 1, 1])\n temporal_domain = dde.geometry.TimeDomain(0, 1)\n spatio_temporal_domain = dde.geometry.GeometryXTime(spatial_domain, temporal_domain)\n\n boundary_condition_u = dde.DirichletBC(\n spatio_temporal_domain, u_func, lambda _, on_boundary: on_boundary, component=0\n )\n boundary_condition_v = dde.DirichletBC(\n spatio_temporal_domain, v_func, lambda _, on_boundary: on_boundary, component=1\n )\n boundary_condition_w = dde.DirichletBC(\n spatio_temporal_domain, w_func, lambda _, on_boundary: on_boundary, component=2\n )\n\n initial_condition_u = dde.IC(\n spatio_temporal_domain, u_func, lambda _, on_initial: on_initial, component=0\n )\n initial_condition_v = dde.IC(\n spatio_temporal_domain, v_func, lambda _, on_initial: on_initial, component=1\n )\n initial_condition_w = dde.IC(\n spatio_temporal_domain, w_func, lambda _, on_initial: on_initial, component=2\n )\n\n data = dde.data.TimePDE(\n spatio_temporal_domain,\n pde,\n [\n boundary_condition_u,\n boundary_condition_v,\n boundary_condition_w,\n initial_condition_u,\n initial_condition_v,\n initial_condition_w,\n ],\n num_domain=50000,\n num_boundary=5000,\n num_initial=5000,\n num_test=10000,\n )\n\n net = dde.maps.FNN([4] + 4 * [50] + [4], \"tanh\", \"Glorot normal\")\n\n model = dde.Model(data, net)\n\n model.compile(\n \"adam\", lr=1e-3, loss_weights=[1, 1, 1, 1, 100, 100, 100, 100, 100, 100]\n )\n model.train(epochs=30000)\n model.compile(\"L-BFGS-B\", loss_weights=[1, 1, 1, 1, 100, 100, 100, 100, 100, 100])\n losshistory, train_state = model.train()\n\n x, y, z = np.meshgrid(\n np.linspace(-1, 1, 10),\n np.linspace(-1, 1, 10),\n np.linspace(-1, 1, 10),\n )\n\n X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T\n\n t_0 = np.zeros(1000).reshape(1000, 1)\n t_1 = np.ones(1000).reshape(1000, 1)\n\n X_0 = np.hstack((X, t_0))\n X_1 = np.hstack((X, t_1))\n\n output_0 = model.predict(X_0)\n output_1 = model.predict(X_1)\n\n u_pred_0 = output_0[:, 0].reshape(-1)\n v_pred_0 = output_0[:, 1].reshape(-1)\n w_pred_0 = output_0[:, 2].reshape(-1)\n p_pred_0 = output_0[:, 3].reshape(-1)\n\n u_exact_0 = u_func(X_0).reshape(-1)\n v_exact_0 = v_func(X_0).reshape(-1)\n w_exact_0 = w_func(X_0).reshape(-1)\n p_exact_0 = p_func(X_0).reshape(-1)\n\n u_pred_1 = output_1[:, 0].reshape(-1)\n v_pred_1 = output_1[:, 1].reshape(-1)\n w_pred_1 = output_1[:, 2].reshape(-1)\n p_pred_1 = output_1[:, 3].reshape(-1)\n\n u_exact_1 = u_func(X_1).reshape(-1)\n v_exact_1 = v_func(X_1).reshape(-1)\n w_exact_1 = w_func(X_1).reshape(-1)\n p_exact_1 = p_func(X_1).reshape(-1)\n\n f_0 = model.predict(X_0, operator=pde)\n f_1 = model.predict(X_1, operator=pde)\n\n l2_difference_u_0 = dde.metrics.l2_relative_error(u_exact_0, u_pred_0)\n l2_difference_v_0 = dde.metrics.l2_relative_error(v_exact_0, v_pred_0)\n l2_difference_w_0 = dde.metrics.l2_relative_error(w_exact_0, w_pred_0)\n l2_difference_p_0 = dde.metrics.l2_relative_error(p_exact_0, p_pred_0)\n residual_0 = np.mean(np.absolute(f_0))\n\n l2_difference_u_1 = dde.metrics.l2_relative_error(u_exact_1, u_pred_1)\n l2_difference_v_1 = dde.metrics.l2_relative_error(v_exact_1, v_pred_1)\n l2_difference_w_1 = dde.metrics.l2_relative_error(w_exact_1, w_pred_1)\n l2_difference_p_1 = dde.metrics.l2_relative_error(p_exact_1, p_pred_1)\n residual_1 = np.mean(np.absolute(f_1))\n\n print(\"Accuracy at t = 0:\")\n print(\"Mean residual:\", residual_0)\n print(\"L2 relative error in u:\", l2_difference_u_0)\n print(\"L2 relative error in v:\", l2_difference_v_0)\n print(\"L2 relative error in w:\", l2_difference_w_0)\n print(\"\\n\")\n print(\"Accuracy at t = 1:\")\n print(\"Mean residual:\", residual_1)\n print(\"L2 relative error in u:\", l2_difference_u_1)\n print(\"L2 relative error in v:\", l2_difference_v_1)\n print(\"L2 relative error in w:\", l2_difference_w_1)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.sin", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.ravel", "numpy.cos", "numpy.absolute", "numpy.hstack", "numpy.linspace" ] ]
yuriok/QGrain
[ "39a136d4e89716a26a88d68ffa00d36ef6036279" ]
[ "QGrain/kernels.py" ]
[ "import numpy as np\nimport torch\nfrom QGrain import DistributionType\nfrom QGrain.distributions import BaseDistribution\nfrom torch.nn import Module, Parameter, ReLU, Softmax\n\n\ndef normal_pdf(x, loc, scale):\n pdf = 1 / (scale*np.sqrt(2*np.pi)) * torch.exp(-torch.square(x - loc) / (2*scale**2))\n return pdf\n\ndef std_normal_cdf(x):\n cdf = 0.5 * (1 + torch.erf(x / np.sqrt(2)))\n return cdf\n\ndef weibull_pdf(x, shape, loc, scale):\n y = x - loc\n key = torch.greater_equal(y, 0)\n pdf = torch.zeros_like(y)\n pdf[key] = (shape / scale) * (y[key] / scale) ** (shape-1) * torch.exp(-(y[key]/scale)**shape)\n return pdf\n\ndef skew_normal_pdf(x, shape, loc, scale):\n pdf = 2 * normal_pdf(x, loc, scale) * std_normal_cdf(shape*(x-loc)/scale)\n return pdf\n\nclass NonparametricKernel(Module):\n def __init__(self, n_classes):\n super().__init__()\n self.distribution = Parameter(torch.rand(n_classes), requires_grad=True)\n self.softmax = Softmax(dim=0)\n\n def forward(self, _):\n frequency = self.softmax(self.distribution)\n return frequency\n\n @property\n def frequency(self):\n with torch.no_grad():\n frequency = self.softmax(self.distribution)\n return frequency\n\nclass NormalKernel(Module):\n def __init__(self, loc=None, scale=None):\n super().__init__()\n self.__relu = ReLU()\n self.__loc = Parameter(torch.rand(1)+6 if loc is None else torch.Tensor([loc]), requires_grad=True)\n self.__scale = Parameter(torch.rand(1)+1 if scale is None else torch.Tensor([scale]), requires_grad=True)\n\n @property\n def loc(self) -> float:\n return self.__loc.item()\n\n @property\n def scale(self) -> float:\n with torch.no_grad():\n return self.__relu(self.__scale).item()\n\n @property\n def params(self):\n return self.loc, self.scale\n\n def forward(self, classes_φ):\n interval = torch.abs((classes_φ[0]-classes_φ[-1]) / (classes_φ.shape[0]-1)).item()\n loc = self.__loc\n scale = self.__relu(self.__scale)\n x = classes_φ\n pdf = normal_pdf(x, loc, scale)\n # scale pdf to frequency\n frequency = pdf * interval\n return frequency\n\nclass WeibullKernel(Module):\n def __init__(self, shape=None, loc=None, scale=None):\n super().__init__()\n self.__relu = ReLU()\n self.__shape = Parameter(torch.rand(1)+3 if shape is None else torch.Tensor([shape]), requires_grad=True)\n self.__loc = Parameter(torch.rand(1)+4 if loc is None else torch.Tensor([loc]), requires_grad=True)\n self.__scale = Parameter(torch.rand(1)+1 if scale is None else torch.Tensor([scale]), requires_grad=True)\n\n @property\n def shape(self) -> float:\n with torch.no_grad():\n return self.__relu(self.__shape).item()\n\n @property\n def loc(self) -> float:\n return self.__loc.item()\n\n @property\n def scale(self) -> float:\n with torch.no_grad():\n return self.__relu(self.__scale).item()\n\n @property\n def params(self):\n return self.shape, self.loc, self.scale\n\n def forward(self, classes_φ):\n interval = torch.abs((classes_φ[0]-classes_φ[-1]) / (classes_φ.shape[0]-1)).item()\n shape = self.__relu(self.__shape)\n loc = self.__loc\n scale = self.__relu(self.__scale)\n x = classes_φ\n pdf = weibull_pdf(x, shape, loc, scale)\n # scale pdf to frequency\n frequency = pdf * interval\n return frequency\n\nclass SkewNormalKernel(Module):\n def __init__(self, shape=None, loc=None, scale=None):\n super().__init__()\n self.__relu = ReLU()\n self.__shape = Parameter(torch.rand(1)*0.1 if shape is None else torch.Tensor([shape]), requires_grad=True)\n self.__loc = Parameter(torch.rand(1)+6 if loc is None else torch.Tensor([loc]), requires_grad=True)\n self.__scale = Parameter(torch.rand(1)+1 if scale is None else torch.Tensor([scale]), requires_grad=True)\n\n @property\n def shape(self) -> float:\n return self.__shape.item()\n\n @property\n def loc(self) -> float:\n return self.__loc.item()\n\n @property\n def scale(self) -> float:\n with torch.no_grad():\n return self.__relu(self.__scale).item()\n\n @property\n def params(self):\n return self.shape, self.loc, self.scale\n\n def forward(self, classes_φ):\n interval = torch.abs((classes_φ[0]-classes_φ[-1]) / (classes_φ.shape[0]-1)).item()\n shape = self.__shape\n loc = self.__loc\n scale = self.__relu(self.__scale)\n x = classes_φ\n pdf = skew_normal_pdf(x, shape, loc, scale)\n # scale pdf to frequency\n frequency = pdf * interval\n return frequency\n\ndef log10MSE_distance(values: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n return torch.log10(torch.mean(torch.square(values - targets)))\n\ndef MSE_distance(values: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n return torch.mean(torch.square(values - targets))\n\ndef p_norm(values: torch.Tensor, targets: torch.Tensor, p=2) -> torch.Tensor:\n return torch.sum(torch.abs(values - targets) ** p) ** (1 / p)\n\ndef cosine_distance(values: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n cosine = torch.sum(values * targets) / (torch.sqrt(torch.sum(torch.square(values))) * torch.sqrt(torch.sum(torch.square(targets))))\n return torch.abs(cosine)\n\ndef angular_distance(values: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n cosine = cosine_distance(values, targets)\n angular = 2 * torch.arccos(cosine) / np.pi\n return angular\n\ndef get_distance_func_by_name(distance: str):\n if distance[-4:] == \"norm\":\n p = int(distance[0])\n return lambda x, y: p_norm(x, y, p)\n elif distance == \"MSE\":\n return lambda x, y: MSE_distance(x, y)\n elif distance == \"log10MSE\":\n return lambda x, y: log10MSE_distance(x, y)\n elif distance == \"angular\":\n return lambda x, y: angular_distance(x, y)\n else:\n raise NotImplementedError(distance)\n\ndef get_initial_guess(distribution_type: DistributionType, reference):\n return BaseDistribution.get_initial_guess(distribution_type, reference)\n\n\nKERNEL_MAP = {DistributionType.Normal: NormalKernel,\n DistributionType.Weibull: WeibullKernel,\n DistributionType.SkewNormal: SkewNormalKernel}\n\nN_PARAMS_MAP = {DistributionType.Normal: 2,\n DistributionType.Weibull: 3,\n DistributionType.SkewNormal: 3}\n" ]
[ [ "torch.rand", "torch.arccos", "torch.nn.Softmax", "torch.greater_equal", "torch.square", "torch.no_grad", "torch.abs", "torch.nn.ReLU", "numpy.sqrt", "torch.zeros_like", "torch.Tensor", "torch.exp", "torch.sum" ] ]
adityapb/compyle
[ "0aab06b48eefb2b3cf4689ec35b11e3d55771d5a" ]
[ "compyle/jit.py" ]
[ "from textwrap import dedent\n\nimport numpy as np\nimport inspect\nimport ast\nimport importlib\nimport warnings\nimport time\nfrom pytools import memoize\nfrom .config import get_config\nfrom .cython_generator import CythonGenerator\nfrom .transpiler import Transpiler, BUILTINS\nfrom .types import (dtype_to_ctype, get_declare_info,\n dtype_to_knowntype, annotate, BITS)\nfrom .extern import Extern\nfrom .utils import getsourcelines\nfrom .profile import profile\n\nfrom . import array\nfrom . import parallel\n\n\ndef memoize_kernel(key=lambda *args: args):\n def memoize_deco(method):\n def wrapper(*args):\n f = args[0].func\n key_val = key(*args)\n if not hasattr(f, 'cached_kernel'):\n setattr(f, 'cached_kernel', {key_val: method(*args)})\n elif key_val not in f.cached_kernel:\n f.cached_kernel[key_val] = method(*args)\n return f.cached_kernel[key_val]\n return wrapper\n return memoize_deco\n\n\ndef get_ctype_from_arg(arg, backend=None):\n if isinstance(arg, array.Array):\n return arg.gptr_type\n elif isinstance(arg, np.ndarray) or isinstance(arg, np.floating):\n return dtype_to_ctype(arg.dtype, backend=backend)\n else:\n if isinstance(arg, float):\n return 'double'\n else:\n if arg > 2147483648:\n return 'long long' if BITS.startswith('32') else 'long'\n else:\n return 'int'\n\n\ndef kernel_cache_key_args(obj, *args):\n key = [get_ctype_from_arg(arg, backend=obj.backend) for arg in args]\n key.append(obj.func)\n key.append(obj.name)\n return tuple(key + list(parallel.get_common_cache_key(obj)))\n\n\ndef kernel_cache_key_kwargs(obj, **kwargs):\n key = [get_ctype_from_arg(arg, backend=obj.backend)\n for arg in kwargs.values()]\n key.append(obj.input_func)\n key.append(obj.output_func)\n key.append(obj.scan_expr)\n return tuple(key + list(parallel.get_common_cache_key(obj)))\n\n\ndef getargspec(f):\n getargspec_f = getattr(inspect, 'getfullargspec',\n getattr(inspect, 'getargspec'))\n return getargspec_f(f)[0]\n\n\ndef get_signed_type(a):\n return a[1:] if a.startswith('u') else a\n\n\ndef get_binop_return_type(a, b):\n int_types = ['short', 'int', 'long']\n float_types = ['float', 'double']\n\n if a is None or b is None:\n return None\n\n if a.endswith('p') and get_signed_type(b) in int_types:\n return a\n if b.endswith('p') and get_signed_type(a) in int_types:\n return b\n\n preference_order = int_types + float_types\n\n unsigned_a = unsigned_b = False\n if a.startswith('u'):\n unsigned_a = True\n a = a[1:]\n if b.startswith('u'):\n unsigned_b = True\n b = b[1:]\n\n idx_a = preference_order.index(a)\n idx_b = preference_order.index(b)\n return_type = preference_order[idx_a] if idx_a > idx_b else \\\n preference_order[idx_b]\n if unsigned_a and unsigned_b:\n return_type = 'u%s' % return_type\n return return_type\n\n\nclass AnnotationHelper(ast.NodeVisitor):\n def __init__(self, func, arg_types):\n self.func = func\n self.name = self.func.__name__\n self.arg_types = {name: self.get_declare_type(type_str)\n for name, type_str in arg_types.items()}\n self.var_types = self.arg_types.copy()\n self.undecl_var_types = {}\n self.external_funcs = {}\n self.external_missing_decl = {}\n self.warning_msg = ('''\n Function called is not marked by the annotate decorator. Argument\n type defaulting to 'double'. If the type is not 'double', store\n the value in a variable of appropriate type and use the variable\n '''\n )\n\n def get_declare_type(self, type_str):\n kind, address_space, ctype, shape = get_declare_info(type_str)\n if 'unsigned' in ctype:\n ctype = ctype.replace('unsigned ', 'u')\n if kind == 'matrix':\n ctype = '%sp' % ctype\n return ctype\n\n def get_missing_declarations(self, undecl_var_types):\n declarations = {}\n for var_name, dtype in undecl_var_types.items():\n declarations[var_name] = '%s %s;' % (dtype, var_name)\n missing_decl = {self.func.__name__: declarations}\n missing_decl.update(self.external_missing_decl)\n return missing_decl\n\n def record_var_type(self, name, dtype):\n self.var_types[name] = self.get_declare_type(dtype)\n\n def record_undecl_var_type(self, name, dtype):\n if name not in self.var_types and name not in self.undecl_var_types:\n self.undecl_var_types[name] = self.get_declare_type(dtype)\n\n def get_var_type(self, name):\n return self.var_types.get(\n name, self.undecl_var_types.get(name, 'double'))\n\n def get_return_type(self):\n return self.arg_types.get('return_', 'double')\n\n def annotate(self):\n src = dedent('\\n'.join(getsourcelines(self.func)[0]))\n self._src = src.splitlines()\n code = ast.parse(src)\n self.visit(code)\n self.func = annotate(self.func, **self.arg_types)\n return self.get_missing_declarations(self.undecl_var_types)\n\n def recursive_annotate(self, f, node):\n arg_types = {}\n f_arg_names = getargspec(f)\n for f_arg, arg in zip(f_arg_names, node.args):\n arg_type = self.visit(arg)\n if not arg_type:\n arg_type = 'double'\n arg_types[f_arg] = arg_type\n f_helper = AnnotationHelper(f, arg_types)\n self.external_missing_decl.update(f_helper.annotate())\n self.external_funcs[node.func.id] = f_helper\n return f_helper\n\n def error(self, message, node):\n msg = '\\nError in code in line %d:\\n' % node.lineno\n if self._src: # pragma: no branch\n if node.lineno > 1: # pragma no branch\n msg += self._src[node.lineno - 2] + '\\n'\n msg += self._src[node.lineno - 1] + '\\n'\n msg += ' ' * node.col_offset + '^' + '\\n\\n'\n msg += message\n raise NotImplementedError(msg)\n\n def warn(self, message, node):\n msg = '\\nIn code in line %d:\\n' % node.lineno\n if self._src: # pragma: no branch\n if node.lineno > 1: # pragma no branch\n msg += self._src[node.lineno - 2] + '\\n'\n msg += self._src[node.lineno - 1] + '\\n'\n msg += ' ' * node.col_offset + '^' + '\\n\\n'\n msg += message\n warnings.warn(msg)\n\n def visit_declare(self, node):\n if not isinstance(node.args[0], ast.Str):\n self.error(\"Argument to declare should be a string.\", node)\n type_str = node.args[0].s\n return self.get_declare_type(type_str)\n\n def visit_cast(self, node):\n if not isinstance(node.args[1], ast.Str):\n self.error(\"Cast type should be a string.\", node)\n return node.args[1].s\n\n def visit_address(self, node):\n base_type = self.visit(node.args[0])\n if base_type.endswith('p'):\n self.error(\"Cannot find address of a pointer\", node)\n if isinstance(node.args[0], ast.Subscript):\n array_type = self.visit(node.args[0].value)\n if array_type.startswith('g'):\n base_type = 'g' + base_type\n return base_type + 'p'\n\n def visit_For(self, node):\n self.record_undecl_var_type(node.target.id, 'int')\n for stmt in node.body:\n self.visit(stmt)\n\n def visit_IfExp(self, node):\n return self.visit(node.body)\n\n def visit_Call(self, node):\n # FIXME: External functions have to be at the module level\n # for this to work. Pass list of external functions to\n # make this work\n if node.func.id == 'annotate':\n return\n mod = importlib.import_module(self.func.__module__)\n f = getattr(mod, node.func.id, None)\n if node.func.id == 'declare':\n return self.visit_declare(node)\n if node.func.id == 'cast':\n return self.visit_cast(node)\n if node.func.id == 'atomic_inc':\n return self.visit(node.args[0])\n if node.func.id == 'address':\n return self.visit_address(node)\n if node.func.id in self.external_funcs:\n return self.external_funcs[node.func.id].get_return_type()\n if isinstance(node.func, ast.Name) and node.func.id not in BUILTINS:\n if f is None or isinstance(f, Extern):\n self.warn(\"%s could not be found or is an external function\"\n \"and cannot be handled by JIT\" % node.func.id)\n return 'double'\n else:\n f_helper = self.recursive_annotate(f, node)\n return f_helper.get_return_type()\n self.warn(dedent(self.warning_msg), node.func)\n return 'double'\n\n def visit_Subscript(self, node):\n base_type = self.visit(node.value)\n if base_type.startswith('g'):\n base_type = base_type[1:]\n return base_type[:-1]\n\n def visit_Name(self, node):\n return self.get_var_type(node.id)\n\n def visit_Assign(self, node):\n if len(node.targets) != 1:\n self.error(\"Assignments can have only one target.\", node)\n left, right = node.targets[0], node.value\n right_type = self.visit(right)\n if isinstance(right, ast.Call) and right.func.id == 'declare':\n if isinstance(left, ast.Name):\n self.record_var_type(left.id, right_type)\n elif isinstance(left, ast.Tuple):\n names = [x.id for x in left.elts]\n for name in names:\n self.record_var_type(name, right_type)\n elif isinstance(left, ast.Name):\n self.record_undecl_var_type(left.id, right_type)\n\n def visit_Compare(self, node):\n return 'int'\n\n def visit_BinOp(self, node):\n if isinstance(node.op, ast.Pow):\n return self.visit(node.left)\n else:\n return get_binop_return_type(self.visit(node.left),\n self.visit(node.right))\n\n def visit_Num(self, node):\n return get_ctype_from_arg(node.n)\n\n def visit_UnaryOp(self, node):\n return self.visit(node.operand)\n\n def visit_Return(self, node):\n if node and node.value:\n result_type = self.visit(node.value)\n if result_type:\n self.arg_types['return_'] = result_type\n return result_type\n\n\nclass ElementwiseJIT(parallel.ElementwiseBase):\n def __init__(self, func, backend=None):\n backend = array.get_backend(backend)\n self.tp = Transpiler(backend=backend)\n self.backend = backend\n self.name = 'elwise_%s' % func.__name__\n self.func = func\n self._config = get_config()\n self.cython_gen = CythonGenerator()\n self.source = '# Code jitted, call the function to generate the code.'\n self.all_source = self.source\n if backend == 'opencl':\n from .opencl import get_context, get_queue\n self.queue = get_queue()\n\n def get_type_info_from_args(self, *args):\n type_info = {}\n arg_names = getargspec(self.func)\n if 'i' in arg_names:\n arg_names.remove('i')\n type_info['i'] = 'int'\n for arg, name in zip(args, arg_names):\n arg_type = get_ctype_from_arg(arg, backend=self.backend)\n if not arg_type:\n arg_type = 'double'\n type_info[name] = arg_type\n return type_info\n\n @memoize_kernel(key=kernel_cache_key_args)\n def _generate_kernel(self, *args):\n if self.func is not None:\n arg_types = self.get_type_info_from_args(*args)\n helper = AnnotationHelper(self.func, arg_types)\n declarations = helper.annotate()\n self.func = helper.func\n return self._generate(declarations=declarations)\n\n def _massage_arg(self, x):\n if isinstance(x, array.Array):\n return x.dev\n elif self.backend != 'cuda' or isinstance(x, np.ndarray):\n return x\n else:\n return np.asarray(x)\n\n @profile\n def __call__(self, *args, **kw):\n c_func = self._generate_kernel(*args)\n c_args = [self._massage_arg(x) for x in args]\n\n if self.backend == 'cython':\n size = len(c_args[0])\n c_args.insert(0, size)\n c_func(*c_args, **kw)\n elif self.backend == 'opencl':\n c_func(*c_args, **kw)\n self.queue.finish()\n elif self.backend == 'cuda':\n import pycuda.driver as drv\n event = drv.Event()\n c_func(*c_args, **kw)\n event.record()\n event.synchronize()\n\n\nclass ReductionJIT(parallel.ReductionBase):\n def __init__(self, reduce_expr, map_func=None, dtype_out=np.float64,\n neutral='0', backend='cython'):\n backend = array.get_backend(backend)\n self.tp = Transpiler(backend=backend)\n self.backend = backend\n self.func = map_func\n if map_func is not None:\n self.name = 'reduce_' + map_func.__name__\n else:\n self.name = 'reduce'\n self.reduce_expr = reduce_expr\n self.dtype_out = dtype_out\n self.type = dtype_to_ctype(dtype_out, backend)\n if backend == 'cython':\n # On Windows, INFINITY is not defined so we use INFTY which we\n # internally define.\n self.neutral = neutral.replace('INFINITY', 'INFTY')\n else:\n self.neutral = neutral\n self._config = get_config()\n self.cython_gen = CythonGenerator()\n self.source = '# Code jitted, call the function to generate the code.'\n self.all_source = self.source\n if backend == 'opencl':\n from .opencl import get_context, get_queue\n self.queue = get_queue()\n\n def get_type_info_from_args(self, *args):\n type_info = {}\n arg_names = getargspec(self.func)\n if 'i' in arg_names:\n arg_names.remove('i')\n type_info['i'] = 'int'\n for arg, name in zip(args, arg_names):\n arg_type = get_ctype_from_arg(arg, backend=self.backend)\n if not arg_type:\n arg_type = 'double'\n type_info[name] = arg_type\n return type_info\n\n @memoize_kernel(key=kernel_cache_key_args)\n def _generate_kernel(self, *args):\n if self.func is not None:\n arg_types = self.get_type_info_from_args(*args)\n helper = AnnotationHelper(self.func, arg_types)\n declarations = helper.annotate()\n self.func = helper.func\n return self._generate(declarations=declarations)\n\n def _massage_arg(self, x):\n if isinstance(x, array.Array):\n return x.dev\n elif self.backend != 'cuda' or isinstance(x, np.ndarray):\n return x\n else:\n return np.asarray(x)\n\n @profile\n def __call__(self, *args, **kw):\n c_func = self._generate_kernel(*args)\n c_args = [self._massage_arg(x) for x in args]\n\n if self.backend == 'cython':\n size = len(c_args[0])\n c_args.insert(0, size)\n return c_func(*c_args, **kw)\n elif self.backend == 'opencl':\n result = c_func(*c_args, **kw)\n self.queue.finish()\n return result.get()\n elif self.backend == 'cuda':\n import pycuda.driver as drv\n event = drv.Event()\n result = c_func(*c_args, **kw)\n event.record()\n event.synchronize()\n return result.get()\n\n\nclass ScanJIT(parallel.ScanBase):\n def __init__(self, input=None, output=None, scan_expr=\"a+b\",\n is_segment=None, dtype=np.float64, neutral='0',\n complex_map=False, backend='opencl'):\n backend = array.get_backend(backend)\n self.tp = Transpiler(backend=backend, incl_cluda=False)\n self.backend = backend\n self.input_func = input\n self.output_func = output\n self.is_segment_func = is_segment\n self.complex_map = complex_map\n if input is not None:\n self.name = 'scan_' + input.__name__\n else:\n self.name = 'scan'\n self.scan_expr = scan_expr\n self.dtype = dtype\n self.type = dtype_to_ctype(dtype, backend)\n if backend == 'cython':\n # On Windows, INFINITY is not defined so we use INFTY which we\n # internally define.\n self.neutral = neutral.replace('INFINITY', 'INFTY')\n else:\n self.neutral = neutral\n self._config = get_config()\n self.source = '# Code jitted, call the function to generate the code.'\n self.all_source = self.source\n self.cython_gen = CythonGenerator()\n if backend == 'opencl':\n from .opencl import get_context, get_queue\n self.queue = get_queue()\n builtin_symbols = ['item', 'prev_item', 'last_item']\n self.builtin_types = {'i': 'int', 'N': 'int'}\n for sym in builtin_symbols:\n self.builtin_types[sym] = dtype_to_knowntype(\n self.dtype, backend=backend\n )\n\n def get_type_info_from_kwargs(self, func, **kwargs):\n type_info = {}\n arg_names = getargspec(func)\n for name in arg_names:\n arg = kwargs.get(name, None)\n if arg is None and name not in self.builtin_types:\n raise ValueError(\"Argument %s not found\" % name)\n if name in self.builtin_types:\n arg_type = self.builtin_types[name]\n else:\n arg_type = get_ctype_from_arg(arg, backend=self.backend)\n if not arg_type:\n arg_type = 'double'\n type_info[name] = arg_type\n return type_info\n\n @memoize(key=kernel_cache_key_kwargs, use_kwargs=True)\n def _generate_kernel(self, **kwargs):\n declarations = {}\n if self.input_func is not None:\n arg_types = self.get_type_info_from_kwargs(\n self.input_func, **kwargs)\n arg_types['return_'] = dtype_to_knowntype(\n self.dtype, backend=self.backend\n )\n helper = AnnotationHelper(self.input_func, arg_types)\n declarations.update(helper.annotate())\n self.input_func = helper.func\n\n if self.output_func is not None:\n arg_types = self.get_type_info_from_kwargs(\n self.output_func, **kwargs)\n helper = AnnotationHelper(self.output_func, arg_types)\n declarations.update(helper.annotate())\n self.output_func = helper.func\n\n if self.is_segment_func is not None:\n arg_types = self.get_type_info_from_kwargs(\n self.is_segment_func, **kwargs)\n arg_types['return_'] = 'int'\n helper = AnnotationHelper(self.is_segment_func, arg_types)\n declarations.update(helper.annotate())\n self.is_segment_func = helper.func\n\n return self._generate(declarations=declarations)\n\n def _massage_arg(self, x):\n if isinstance(x, array.Array):\n return x.dev\n elif self.backend != 'cuda' or isinstance(x, np.ndarray):\n return x\n else:\n return np.asarray(x)\n\n @profile\n def __call__(self, **kwargs):\n c_func = self._generate_kernel(**kwargs)\n c_args_dict = {k: self._massage_arg(x) for k, x in kwargs.items()}\n if self._get_backend_key() in self.output_func.arg_keys:\n output_arg_keys = self.output_func.arg_keys[\n self._get_backend_key()]\n else:\n raise ValueError(\"No kernel arguments found for backend = %s, \"\n \"use_openmp = %s, use_double = %s\" %\n self._get_backend_key())\n\n if self.backend == 'cython':\n size = len(c_args_dict[output_arg_keys[1]])\n c_args_dict['SIZE'] = size\n c_func(*[c_args_dict[k] for k in output_arg_keys])\n elif self.backend == 'opencl':\n c_func(*[c_args_dict[k] for k in output_arg_keys])\n self.queue.finish()\n elif self.backend == 'cuda':\n import pycuda.driver as drv\n event = drv.Event()\n c_func(*[c_args_dict[k] for k in output_arg_keys])\n event.record()\n event.synchronize()\n" ]
[ [ "numpy.asarray" ] ]
supriya-gdptl/IM-NET-pytorch
[ "c64895471a6b2e60189ae643c045fb6c2eb69a5c" ]
[ "utils.py" ]
[ "import numpy as np \nimport math\n\n\ndef write_ply_point(name, vertices):\n\tfout = open(name, 'w')\n\tfout.write(\"ply\\n\")\n\tfout.write(\"format ascii 1.0\\n\")\n\tfout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n\tfout.write(\"property float x\\n\")\n\tfout.write(\"property float y\\n\")\n\tfout.write(\"property float z\\n\")\n\tfout.write(\"end_header\\n\")\n\tfor ii in range(len(vertices)):\n\t\tfout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\"\\n\")\n\tfout.close()\n\n\ndef write_ply_point_normal(name, vertices, normals=None):\n\tfout = open(name, 'w')\n\tfout.write(\"ply\\n\")\n\tfout.write(\"format ascii 1.0\\n\")\n\tfout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n\tfout.write(\"property float x\\n\")\n\tfout.write(\"property float y\\n\")\n\tfout.write(\"property float z\\n\")\n\tfout.write(\"property float nx\\n\")\n\tfout.write(\"property float ny\\n\")\n\tfout.write(\"property float nz\\n\")\n\tfout.write(\"end_header\\n\")\n\tif normals is None:\n\t\tfor ii in range(len(vertices)):\n\t\t\tfout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\" \"+str(vertices[ii,3])+\" \"+str(vertices[ii,4])+\" \"+str(vertices[ii,5])+\"\\n\")\n\telse:\n\t\tfor ii in range(len(vertices)):\n\t\t\tfout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\" \"+str(normals[ii,0])+\" \"+str(normals[ii,1])+\" \"+str(normals[ii,2])+\"\\n\")\n\tfout.close()\n\n\ndef write_ply_triangle(name, vertices, triangles):\n\tfout = open(name, 'w')\n\tfout.write(\"ply\\n\")\n\tfout.write(\"format ascii 1.0\\n\")\n\tfout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n\tfout.write(\"property float x\\n\")\n\tfout.write(\"property float y\\n\")\n\tfout.write(\"property float z\\n\")\n\tfout.write(\"element face \"+str(len(triangles))+\"\\n\")\n\tfout.write(\"property list uchar int vertex_index\\n\")\n\tfout.write(\"end_header\\n\")\n\tfor ii in range(len(vertices)):\n\t\tfout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\"\\n\")\n\tfor ii in range(len(triangles)):\n\t\tfout.write(\"3 \"+str(triangles[ii,0])+\" \"+str(triangles[ii,1])+\" \"+str(triangles[ii,2])+\"\\n\")\n\tfout.close()\n\n\ndef sample_points_triangle(vertices, triangles, num_of_points):\n\tepsilon = 1e-6\n\ttriangle_area_list = np.zeros([len(triangles)],np.float32)\n\ttriangle_normal_list = np.zeros([len(triangles),3],np.float32)\n\tfor i in range(len(triangles)):\n\t\t#area = |u x v|/2 = |u||v|sin(uv)/2\n\t\ta,b,c = vertices[triangles[i,1]]-vertices[triangles[i,0]]\n\t\tx,y,z = vertices[triangles[i,2]]-vertices[triangles[i,0]]\n\t\tti = b*z-c*y\n\t\ttj = c*x-a*z\n\t\ttk = a*y-b*x\n\t\tarea2 = math.sqrt(ti*ti+tj*tj+tk*tk)\n\t\tif area2<epsilon:\n\t\t\ttriangle_area_list[i] = 0\n\t\t\ttriangle_normal_list[i,0] = 0\n\t\t\ttriangle_normal_list[i,1] = 0\n\t\t\ttriangle_normal_list[i,2] = 0\n\t\telse:\n\t\t\ttriangle_area_list[i] = area2\n\t\t\ttriangle_normal_list[i,0] = ti/area2\n\t\t\ttriangle_normal_list[i,1] = tj/area2\n\t\t\ttriangle_normal_list[i,2] = tk/area2\n\t\n\ttriangle_area_sum = np.sum(triangle_area_list)\n\tsample_prob_list = (num_of_points/triangle_area_sum)*triangle_area_list\n\n\ttriangle_index_list = np.arange(len(triangles))\n\n\tpoint_normal_list = np.zeros([num_of_points,6],np.float32)\n\tcount = 0\n\twatchdog = 0\n\n\twhile(count<num_of_points):\n\t\tnp.random.shuffle(triangle_index_list)\n\t\twatchdog += 1\n\t\tif watchdog>100:\n\t\t\tprint(\"infinite loop here!\")\n\t\t\treturn point_normal_list\n\t\tfor i in range(len(triangle_index_list)):\n\t\t\tif count>=num_of_points: break\n\t\t\tdxb = triangle_index_list[i]\n\t\t\tprob = sample_prob_list[dxb]\n\t\t\tprob_i = int(prob)\n\t\t\tprob_f = prob-prob_i\n\t\t\tif np.random.random()<prob_f:\n\t\t\t\tprob_i += 1\n\t\t\tnormal_direction = triangle_normal_list[dxb]\n\t\t\tu = vertices[triangles[dxb,1]]-vertices[triangles[dxb,0]]\n\t\t\tv = vertices[triangles[dxb,2]]-vertices[triangles[dxb,0]]\n\t\t\tbase = vertices[triangles[dxb,0]]\n\t\t\tfor j in range(prob_i):\n\t\t\t\t#sample a point here:\n\t\t\t\tu_x = np.random.random()\n\t\t\t\tv_y = np.random.random()\n\t\t\t\tif u_x+v_y>=1:\n\t\t\t\t\tu_x = 1-u_x\n\t\t\t\t\tv_y = 1-v_y\n\t\t\t\tppp = u*u_x+v*v_y+base\n\t\t\t\t\n\t\t\t\tpoint_normal_list[count,:3] = ppp\n\t\t\t\tpoint_normal_list[count,3:] = normal_direction\n\t\t\t\tcount += 1\n\t\t\t\tif count>=num_of_points: break\n\n\treturn point_normal_list" ]
[ [ "numpy.sum", "numpy.random.shuffle", "numpy.zeros", "numpy.random.random" ] ]
KyleKing/PersonalFinanceExplorer
[ "e46e139e006788b8e942a4b4b72c80c8a8623fd6" ]
[ "pfe/profile.py" ]
[ "\"\"\"Profile Settings Page.\"\"\"\n\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nfrom dash_charts import appUtils\nfrom icecream import ic\n\nfrom .plaidWrapper import PlaidDashWrapper\n\n\nclass TabProfile(appUtils.TabBase):\n \"\"\"Profile Page.\"\"\"\n\n NAME = 'Profile'\n\n df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/solar.csv')\n\n def __init__(self, app):\n \"\"\"Initialize the tab and verify data members.\n\n app -- Dash application instance\n\n \"\"\"\n super().__init__(app)\n self.pdw = PlaidDashWrapper(app)\n\n def createLayout(self):\n \"\"\"Return the Dash layout components.\"\"\"\n return html.Div(className='section', children=[\n html.H1('Manage User Profile'),\n html.H2('Edit Linked Accounts'),\n # TODO: Add confirmation modal when deleting an account\n dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in self.df.columns],\n data=self.df.to_dict('records'),\n row_deletable=True,\n ),\n self.pdw.createLayout(),\n ])\n\n def registerCallbacks(self):\n \"\"\"Register callbacks necessary for this tab.\"\"\"\n self._edits()\n self.pdw.registerCallbacks()\n\n def _edits(self):\n \"\"\"Read changes to the data table.\"\"\"\n @self.app.callback(\n Output('table', 'figure'),\n [Input('table', 'data'), Input('table', 'columns')])\n def readTableChanges(rows, columns):\n self.df = pd.DataFrame(rows, columns=[c['name'] for c in columns])\n return {} # Make no changes to table\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
RitchieLabIGH/IRFinder
[ "89fc165c1f8f5ffbbbb766f67c3f7d79e266f8f8" ]
[ "src/cnnfilter/testCNN/actions/extract.py" ]
[ "import numpy as np\nfrom scipy.interpolate import interp1d\n\n\n\n\n\ndef getImageArrayFromRegion(region, img_size=None):\n '''\n Return the numpy array representing the image from a given region\n '''\n\n\n read_img, ann_img = generateImagesArrayGreyFromRegion(region,img_size)\n\n return read_img\n\n\n\ndef generateImagesArrayGreyFromRegion(region, img_size=None):\n\n\n '''\n Return the arrays composing an image from a given region\n '''\n\n region_size = len(region)\n\n depth = max([sum(i) for i in region])\n if depth == 0:\n raise ArithmeticError(\"Error! trying to generate an image with zero depth.\")\n reads_img = (np.array(region)[:, :] / depth) * 255\n\n\n if region_size < img_size:\n kindinterp = \"nearest\"\n else:\n kindinterp = \"zero\" #\"linear\"\n\n\n f0 = interp1d(np.arange(0, region_size-30), reads_img[15:-15,0], kind=kindinterp)\n f1 = interp1d(np.arange(0, region_size-30), reads_img[15:-15,1], kind=kindinterp)\n\n reads_imgd1 = np.array([np.array(reads_img[0:15, 0])])\n reads_imgd1 = np.append(reads_imgd1, f0(np.arange(0, (img_size - 30)) * ((region_size - 31) / (img_size - 30))))\n reads_imgd1 = np.append(reads_imgd1, reads_img[-15:, 0])\n\n reads_imgd2 = np.array([np.array(reads_img[0:15, 1]+reads_img[0:15, 0])])\n\n\n reads_imgd2 = np.append(reads_imgd2, f1(np.arange(0, (img_size - 30)) * ((region_size - 31) / (img_size - 30)))+reads_imgd1[15:-15])\n\n reads_imgd2 = np.append(reads_imgd2, reads_img[ -15:,1]+reads_img[-15:, 0])\n\n reads_img2 = np.array([reads_imgd1,reads_imgd2])\n\n reads_img2 = np.expand_dims(np.rot90(np.round(reads_img2).astype(\"float32\"), k=3), axis=2)\n\n return reads_img2, None\n\n" ]
[ [ "numpy.round", "numpy.array", "numpy.arange", "numpy.append" ] ]
Jackzhou1999/Mask_detection
[ "41b1995591e419a35e75c8ea8508f62db30de1b7" ]
[ "train.py" ]
[ "#-------------------------------------#\r\n# 对数据集进行训练\r\n#-------------------------------------#\r\nimport os\r\nimport numpy as np\r\nimport time\r\nimport torch\r\nfrom torch.autograd import Variable\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport torch.backends.cudnn as cudnn\r\nfrom utils.config import Config\r\nfrom nets.yolo_training import YOLOLoss, Generator\r\nfrom nets.yolo3 import YoloBody\r\nimport matplotlib.pyplot as plt\r\n\r\ndef fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch):\r\n total_loss = 0\r\n val_loss = 0\r\n for iteration in range(epoch_size):\r\n start_time = time.time()\r\n images, targets = next(gen)\r\n with torch.no_grad():\r\n images = Variable(torch.from_numpy(images).cuda().type(torch.FloatTensor))\r\n targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]\r\n\r\n optimizer.zero_grad()\r\n outputs = net(images)\r\n losses = []\r\n for i in range(3):\r\n loss_item = yolo_losses[i](outputs[i], targets)\r\n losses.append(loss_item[0])\r\n loss = sum(losses)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n total_loss += loss\r\n waste_time = time.time() - start_time\r\n print('\\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))\r\n print('iter:' + str(iteration) + '/' + str(epoch_size) + ' || Total Loss: %.4f || %.4fs/step' % (total_loss/(iteration+1),waste_time))\r\n\r\n\r\n print('Start Validation')\r\n for iteration in range(epoch_size_val):\r\n images_val, targets_val = next(gen_val)\r\n\r\n with torch.no_grad():\r\n images_val = Variable(torch.from_numpy(images_val).cuda().type(torch.FloatTensor))\r\n targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]\r\n optimizer.zero_grad()\r\n outputs = net(images_val)\r\n losses = []\r\n for i in range(3):\r\n loss_item = yolo_losses[i](outputs[i], targets_val)\r\n losses.append(loss_item[0])\r\n loss = sum(losses)\r\n val_loss += loss\r\n print('Finish Validation')\r\n print('\\nEpoch:' + str(epoch+1) + '/' + str(Epoch))\r\n print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1), val_loss/(epoch_size_val+1)))\r\n losslist.append(total_loss/(epoch_size+1))\r\n valloss.append(val_loss/(epoch_size_val+1))\r\n\r\n print('Saving state, iter:', str(epoch+1))\r\n if epoch % 4 == 0:\r\n torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth' % ((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 参数初始化\r\n annotation_path = '/home/jackzhou/PycharmProjects/mask_detection/Data/Trainset.txt'\r\n model = YoloBody(Config)\r\n\r\n print('Loading weights into state dict...')\r\n model_dict = model.state_dict()\r\n pretrained_dict = torch.load(\"/home/jackzhou/Downloads/yolo_weights.pth\")\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}\r\n model_dict.update(pretrained_dict)\r\n model.load_state_dict(model_dict)\r\n print('Finished!')\r\n\r\n net = model\r\n\r\n net = torch.nn.DataParallel(model)\r\n cudnn.benchmark = True\r\n net = net.cuda().train()\r\n\r\n # 建立loss函数\r\n yolo_losses = []\r\n for i in range(3):\r\n yolo_losses.append(YOLOLoss(np.reshape(Config[\"yolo\"][\"anchors\"],[-1,2]),\r\n Config[\"yolo\"][\"classes\"], (Config[\"img_w\"], Config[\"img_h\"])))\r\n losslist = []\r\n valloss = []\r\n # 0.1用于验证,0.9用于训练\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n \r\n\r\n if True:\r\n # 最开始使用1e-3的学习率可以收敛的更快\r\n lr = 1e-3\r\n Batch_size = 8\r\n Init_Epoch = 0\r\n Freeze_Epoch = 25\r\n \r\n optimizer = optim.Adam(net.parameters(), lr)\r\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\r\n\r\n gen = Generator(Batch_size, lines[:num_train],\r\n (Config[\"img_h\"], Config[\"img_w\"])).generate()\r\n gen_val = Generator(Batch_size, lines[num_train:],\r\n (Config[\"img_h\"], Config[\"img_w\"])).generate()\r\n \r\n epoch_size = num_train//Batch_size\r\n epoch_size_val = num_val//Batch_size\r\n #------------------------------------#\r\n # 冻结一定部分训练\r\n #------------------------------------#\r\n for param in model.backbone.parameters():\r\n param.requires_grad = False\r\n\r\n for epoch in range(Init_Epoch,Freeze_Epoch):\r\n fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch)\r\n lr_scheduler.step()\r\n \r\n if True:\r\n lr = 1e-4\r\n Batch_size = 4\r\n Freeze_Epoch = 25\r\n Unfreeze_Epoch = 150\r\n\r\n optimizer = optim.Adam(net.parameters(),lr)\r\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\r\n gen = Generator(Batch_size, lines[:num_train],\r\n (Config[\"img_h\"], Config[\"img_w\"])).generate()\r\n gen_val = Generator(Batch_size, lines[num_train:],\r\n (Config[\"img_h\"], Config[\"img_w\"])).generate()\r\n \r\n epoch_size = num_train//Batch_size\r\n epoch_size_val = num_val//Batch_size\r\n #------------------------------------#\r\n # 解冻后训练\r\n #------------------------------------#\r\n for param in model.backbone.parameters():\r\n param.requires_grad = True\r\n\r\n for epoch in range(Freeze_Epoch,Unfreeze_Epoch):\r\n fit_ont_epoch(net, yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch)\r\n lr_scheduler.step()\r\n\r\n plt.subplot()\r\n plt.plot(range(len(losslist)), losslist, 'black', label='train')\r\n plt.plot(range(len(valloss)), valloss, 'red', label='val')\r\n plt.legend()\r\n plt.ylabel(\"loss\")\r\n plt.grid(True)\r\n plt.show()\r\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "numpy.reshape", "numpy.random.seed", "matplotlib.pyplot.grid", "torch.no_grad", "matplotlib.pyplot.legend", "numpy.random.shuffle", "numpy.shape", "torch.from_numpy", "torch.load", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "torch.nn.DataParallel", "matplotlib.pyplot.subplot" ] ]
gbellandi/pystran
[ "6a283a0a5b2c3dbeb52e8f7f64170be2d829063f" ]
[ "pystran/distributions.py" ]
[ "\"\"\"\r\nCreated on Wed Oct 10 16:37:52 2012\r\n@author: VHOEYS\r\n\r\nPROBABILITY DISTRIBUTIONS\r\n\r\nA more extended set of distributions is available in the scipy.stats library,\r\nhttp://docs.scipy.org/doc/scipy/reference/stats.html\r\nor by pymc: https://pymc-devs.github.io/pymc/distributions.html\r\n\"\"\"\r\n\r\n#Import general packages\r\nimport math\r\nimport numpy as np\r\n\r\ndef NormalizePDF(inputs):\r\n '''\r\n Sum of all elements becomes 1\r\n '''\r\n normed = inputs/inputs.sum()\r\n return normed\r\n\r\ndef Normalize(inputs):\r\n '''\r\n Normalization, values recalculated between 0 and 1\r\n '''\r\n Normed=(inputs-min(inputs))/(max(inputs)-min(inputs))\r\n return Normed\r\n\r\n#########################################################################\r\n# PDF Get probability value for a certain input! ##\r\n#########################################################################\r\n\r\ndef UniformDistribution(x, left=0.0, right=1.0, val=1.0):\r\n '''\r\n Probability is 1. when in the region\r\n '''\r\n if left<=x<=right:\r\n px=val\r\n else:\r\n px=0.0\r\n return px\r\n\r\ndef TriangularDistribution(x,left,mode,right):\r\n '''\r\n Calculates the \"weight factor\" of triangular, based on a certain inputvalue\r\n\r\n see numpy manual (or Beven_book: left=0, right=1)\r\n '''\r\n if mode>right:\r\n print('right en mode zijn omgewisseld!!')\r\n if left<=x<=mode:\r\n px=2*(x-left)/((right-left)*(mode-left))\r\n elif mode<=x<=right:\r\n px=2*(right-x)/((right-left)*(right-mode))\r\n else:\r\n px=0.0\r\n return px\r\n\r\ndef TrapezoidalDistribution(x,left,mode1,mode2,right):\r\n '''\r\n Calculates the \"weight factor\" of trapezoidal\r\n based on a certain inputvalue\r\n '''\r\n if mode1>right:\r\n print('right en mode1 zijn omgewisseld!!')\r\n if mode2>right:\r\n print('right en mode1 zijn omgewisseld!!')\r\n if mode1>mode2:\r\n print('right en mode1 zijn omgewisseld!!')\r\n\r\n u=2/(right+mode2-mode1-left)\r\n\r\n if left<=x<=mode1:\r\n px=u*(x-left)/(mode1-left)\r\n elif mode1<=x<=mode2:\r\n px=u\r\n elif mode2<=x<=right:\r\n px=u*(right-x)/(right-mode2)\r\n else:\r\n px=0.0\r\n return px\r\n\r\ndef NormalDistribution(x, mu=0.0, sigma=1.0, wholePDF=True,left=None,right=None):\r\n if wholePDF == True:\r\n px=1*np.exp(- (x - mu)**2 / (2 * sigma**2)) /(sigma * np.sqrt(2 * np.pi))\r\n else: #cut boundaries to use it as membership function\r\n if left<=x<=right:\r\n px=1*np.exp(- (x - mu)**2 / (2 * sigma**2)) /(sigma * np.sqrt(2 * np.pi))\r\n else:\r\n px=0.0\r\n return px\r\n\r\ndef LogNormalDistribution(x, mu=0.0, sigma=1.0, wholePDF=True,left=0.0,right=None):\r\n if wholePDF == True:\r\n px = np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))/(x * sigma * np.sqrt(2 * np.pi))\r\n else:\r\n if left<=x<=right:\r\n px = np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))/(x * sigma * np.sqrt(2 * np.pi))\r\n else:\r\n px=0.0\r\n return px\r\n\r\n##########################################################################\r\n##RANDOM SAMPLING ##\r\n##########################################################################\r\n\r\n#Uniform\r\ndef randomUniform(left=0.0,right=1.0,rnsize=None):\r\n '''\r\n link to uniform sampling of numpy, to remain consistency in names\r\n of the pyFUSE module\r\n\r\n Parameters\r\n ----------\r\n left: float\r\n lower value\r\n right: float\r\n upper value\r\n rnsize: int\r\n number of samples\r\n\r\n See Also\r\n ---------\r\n numpy.random.uniform\r\n '''\r\n rn=np.random.uniform(left,right,rnsize)\r\n return rn\r\n\r\n#triangular\r\ndef randomTriangular(left=0.0, mode=None, right=1.0, rnsize=None):\r\n '''\r\n link to triangular sampling of numpy, to remain consistency in names\r\n of the pyFUSE module\r\n\r\n Parameters\r\n ----------\r\n left: float\r\n lower value\r\n mode: float\r\n value between left and right, highest probability\r\n right: float\r\n upper value\r\n rnsize: int\r\n number of samples\r\n\r\n See Also\r\n ---------\r\n numpy.random.triangular\r\n '''\r\n\r\n if mode==None:\r\n print('Triangular needs mode-value')\r\n rn=np.random.triangular(left, mode, right, rnsize)\r\n return rn\r\n\r\n#trapezoidal\r\ndef randomTrapezoidal(left=0.0, mode1=None, mode2=None,\r\n right=1.0, rnsize=None):\r\n '''\r\n random sampling from trapezoidal function\r\n\r\n Parameters\r\n ----------\r\n left: float\r\n lower value\r\n mode1: float\r\n value between left and right, highest probability left side\r\n mode2: float\r\n value between left and right, highest probability right side\r\n right: float\r\n upper value\r\n rnsize: int\r\n number of samples\r\n\r\n '''\r\n if mode1==None:\r\n print('Triangular needs 2 mode-values')\r\n if mode1==None:\r\n print('Triangular needs 2 mode-values')\r\n\r\n rn=np.zeros(rnsize)\r\n for i in range(np.size(rn)):\r\n y = np.random.uniform(0.0,1.0,1)\r\n h=2/(right+mode2-mode1-left)\r\n\r\n a=left\r\n b=right\r\n c=mode1\r\n d=mode2\r\n\r\n if 0.0<=y<=(h*(c-a)/2):\r\n rn[i]=a+np.sqrt(2*(c-a)/h)*np.sqrt(y)\r\n elif (h*(c-a)/2)<=y<=(1-h*(b-d)/2):\r\n rn[i]=(a+c)/2 + y/h\r\n elif (1-h*(b-d)/2)<=y<=1.0:\r\n rn[i]=b-np.sqrt(2*(b-d)/h)*np.sqrt(1-y)\r\n else:\r\n print('not in correct range')\r\n return rn\r\n\r\n#Normal\r\ndef randomNormal(mu=0.0, sigma=1.0, rnsize=None):\r\n '''\r\n link to sampling of normal distribution of numpy, to remain consistency in names\r\n of the pyFUSE module\r\n\r\n Parameters\r\n ----------\r\n mu: float\r\n mean value\r\n sigma: float\r\n Standard deviation (spread or 'width') of the distribution\r\n rnsize: int\r\n number of samples\r\n\r\n See Also\r\n ---------\r\n numpy.random.normal\r\n '''\r\n\r\n rn=np.random.normal(mu, sigma, rnsize)\r\n return rn\r\n\r\n#lognormal\r\ndef randomLogNormal(mu=0.0, sigma=1.0, rnsize=None):\r\n '''\r\n link to sampling of lognormal distribution of numpy, to remain consistency in names\r\n of the pyFUSE module\r\n\r\n Parameters\r\n ----------\r\n mu: float\r\n Mean value of the underlying normal distribution\r\n sigma: float\r\n Standard deviation of the underlying normal distribution\r\n rnsize: int\r\n number of samples\r\n\r\n See Also\r\n ---------\r\n numpy.random.lognormal\r\n '''\r\n\r\n rn=np.random.lognormal(mu, sigma, rnsize)\r\n return rn\r\n\r\n#distribution selector\r\ndef DistSelector(args,distname='randomUniform'):\r\n if distname =='randomUniform':\r\n return randomUniform(left=args[0],right=args[1],rnsize=args[2])\r\n elif distname =='randomTriangular':\r\n return randomTriangular(left=args[0], mode=args[1], right=args[2], rnsize=args[3])\r\n elif distname =='randomTrapezoidal':\r\n return randomTrapezoidal(left=args[0],mode1=args[1],mode2=args[2],right=args[3],rnsize=args[4])\r\n elif distname =='randomNormal':\r\n return randomNormal(mu=args[0], sigma=args[1], rnsize=args[2])\r\n elif distname =='randomLogNormal':\r\n return randomLogNormal(mu=args[0], sigma=args[0], rnsize=args[0])\r\n else:\r\n raise Exception(\"\"\"\r\n Wrong ditribution name; choose from:\r\n randomUniform, randomTriangular,\r\n randomTrapezoidal, randomNormal,\r\n randomLogNormal\r\n \"\"\")\r\n\r\n##########################################################################\r\n##INVERSE DISTRIBUTIONS (from uniform to other distributions sampling) ##\r\n##########################################################################\r\n\r\ndef stnorm2norm(stn,mu,sigma):\r\n return sigma*stn + mu\r\n\r\ndef ltqnorm(p):\r\n \"\"\"\r\n Modified from the author's original perl code (original comments follow below)\r\n by [email protected]. May 3, 2004.\r\n\r\n Lower tail quantile for standard normal distribution function.\r\n\r\n This function returns an approximation of the inverse cumulative\r\n standard normal distribution function. I.e., given P, it returns\r\n an approximation to the X satisfying P = Pr{Z <= X} where Z is a\r\n random variable from the standard normal distribution.\r\n\r\n The algorithm uses a minimax approximation by rational functions\r\n and the result has a relative error whose absolute value is less\r\n than 1.15e-9.\r\n\r\n Author: Peter John Acklam\r\n Time-stamp: 2000-07-19 18:26:14\r\n E-mail: [email protected]\r\n WWW URL: http://home.online.no/~pjacklam\r\n \"\"\"\r\n\r\n if p <= 0 or p >= 1:\r\n # The original perl code exits here, we'll throw an exception instead\r\n raise ValueError( \"Argument to ltqnorm %f must be in open interval (0,1)\" % p )\r\n\r\n # Coefficients in rational approximations.\r\n a = (-3.969683028665376e+01, 2.209460984245205e+02, \\\r\n -2.759285104469687e+02, 1.383577518672690e+02, \\\r\n -3.066479806614716e+01, 2.506628277459239e+00)\r\n b = (-5.447609879822406e+01, 1.615858368580409e+02, \\\r\n -1.556989798598866e+02, 6.680131188771972e+01, \\\r\n -1.328068155288572e+01 )\r\n c = (-7.784894002430293e-03, -3.223964580411365e-01, \\\r\n -2.400758277161838e+00, -2.549732539343734e+00, \\\r\n 4.374664141464968e+00, 2.938163982698783e+00)\r\n d = ( 7.784695709041462e-03, 3.224671290700398e-01, \\\r\n 2.445134137142996e+00, 3.754408661907416e+00)\r\n\r\n # Define break-points.\r\n plow = 0.02425\r\n phigh = 1 - plow\r\n\r\n # Rational approximation for lower region:\r\n if p < plow:\r\n q = math.sqrt(-2*math.log(p))\r\n return (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \\\r\n ((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)\r\n\r\n # Rational approximation for upper region:\r\n if phigh < p:\r\n q = math.sqrt(-2*math.log(1-p))\r\n return -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \\\r\n ((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)\r\n\r\n # Rational approximation for central region:\r\n q = p - 0.5\r\n r = q*q\r\n return (((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q / \\\r\n (((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1)\r\n\r\ndef ltqnormarr(parr, mu=0.0, sigma=1.):\r\n stnorm=np.array([ltqnorm(p) for p in parr])\r\n print((type(stnorm)))\r\n return stnorm2norm(stnorm, mu, sigma)\r\n" ]
[ [ "numpy.random.normal", "numpy.random.lognormal", "numpy.zeros", "numpy.log", "numpy.exp", "numpy.random.triangular", "numpy.random.uniform", "numpy.size", "numpy.sqrt" ] ]
antoinedemathelin/adapt
[ "cae888b1a0ae2d82772ae8575457f5ad7799a8b7" ]
[ "tests/test_utils.py" ]
[ "\"\"\"\nTest functions for utils module.\n\"\"\"\n\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, Ridge\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin\nfrom sklearn.tree._tree import Tree\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor\nfrom tensorflow.keras import Model, Sequential\nfrom tensorflow.keras.layers import Input, Dense, Flatten, Reshape\nfrom tensorflow.python.keras.engine.input_layer import InputLayer\n\nfrom adapt.utils import *\n\n\ndef is_equal_estimator(v1, v2):\n assert type(v2) == type(v1)\n if isinstance(v1, np.ndarray):\n assert np.array_equal(v1, v2)\n elif isinstance(v1, (BaseEstimator, KerasClassifier, KerasRegressor)):\n assert is_equal_estimator(v1.__dict__, v2.__dict__)\n elif isinstance(v1, Model):\n assert is_equal_estimator(v1.get_config(),\n v2.get_config())\n elif isinstance(v1, dict):\n diff_keys = ((set(v1.keys())-set(v2.keys())) |\n (set(v2.keys())-set(v1.keys())))\n for k in diff_keys:\n assert \"input_shape\" in k\n for k1_i, v1_i in v1.items():\n # Avoid exception due to new input layer name\n if k1_i != \"name\" and not \"input_shape\" in str(k1_i):\n v2_i = v2[k1_i]\n assert is_equal_estimator(v1_i, v2_i)\n elif isinstance(v1, (list, tuple)):\n assert len(v1) == len(v2)\n for v1_i, v2_i in zip(v1, v2):\n assert is_equal_estimator(v1_i, v2_i)\n elif isinstance(v1, Tree):\n pass # TODO create a function to check if two tree are equal\n else:\n if not \"input\" in str(v1):\n assert v1 == v2\n return True\n\n \n\nclass CustomEstimator(BaseEstimator):\n \n def __init__(self):\n pass\n \n def fit(self, X, y):\n pass\n\n \nclass DummyModel(Model):\n \n def __init__(self):\n pass\n \n\nclass CantBeDeepCopied(BaseEstimator):\n \n def __init__(self):\n pass\n \n def __deepcopy__(self):\n raise ValueError(\"Can not be deep copied!\")\n\n\ndef _get_model_Model(compiled=True, custom_loss=False):\n inputs = Input((10,))\n output = Dense(1)(inputs)\n model = Model(inputs, output)\n if custom_loss:\n loss = K.mean(output)\n model.add_loss(loss)\n if compiled:\n model.compile(loss=\"mse\", optimizer=\"adam\")\n return model\n\n\ndef _get_model_Sequential(input_shape=None, compiled=True):\n model = Sequential()\n if input_shape is not None:\n model.add(Dense(1, input_shape=input_shape))\n else:\n model.add(Dense(1))\n if compiled:\n model.compile(loss=\"mse\", optimizer=\"adam\")\n return model\n\n\narrays_nd = [np.ones((10, 1)), np.zeros((10, 10)),\n np.zeros((10, 5, 1)), np.full((10, 20), -5.5),\n np.ones((1, 1)), np.random.randn(1, 5, 5, 1)]\n\[email protected](\"z\", arrays_nd)\ndef test_check_arrays_nd(z):\n Xs, ys, Xt, yt = check_arrays(z, z, z, z)\n assert np.array_equal(Xs, z)\n assert np.array_equal(ys, z)\n assert np.array_equal(Xt, z)\n assert np.array_equal(yt, z)\n \n \ndef test_check_arrays_diff_input():\n Xs, ys, Xt, yt = arrays_nd[:4]\n assert np.array_equal(Xs, arrays_nd[0])\n assert np.array_equal(ys, arrays_nd[1])\n assert np.array_equal(Xt, arrays_nd[2])\n assert np.array_equal(yt, arrays_nd[3])\n \n \narrays_1d = [np.ones((10,)), np.ones((1,))]\narrays_2d = [np.ones((10, 1)), np.ones((1, 1))]\n\[email protected](\"z, zz\", zip(arrays_1d, arrays_2d))\ndef test_check_arrays_1d(z, zz):\n Xs, ys, Xt, yt = check_arrays(z, z, z, z)\n assert np.array_equal(Xs, zz)\n assert np.array_equal(ys, zz)\n assert np.array_equal(Xt, zz)\n assert np.array_equal(yt, zz)\n\n\ndef test_check_arrays_no_yt():\n z = arrays_nd[0]\n Xs, ys, Xt, yt = check_arrays(z, z, z)\n assert yt is None\n assert np.array_equal(Xs, z)\n assert np.array_equal(ys, z)\n assert np.array_equal(Xt, z)\n\n\ndef test_check_arrays_length_error():\n z = arrays_nd[0]\n with pytest.raises(ValueError) as excinfo:\n Xs, ys, Xt, yt = check_arrays(z, z[:5], z)\n assert \"Length of Xs and ys mismatch: 10 != 5\" in str(excinfo.value)\n with pytest.raises(ValueError) as excinfo:\n Xs, ys, Xt, yt = check_arrays(z, z, z, z[:5])\n assert \"Length of Xt and yt mismatch: 10 != 5\" in str(excinfo.value)\n \n \ndef test_check_arrays_no_array():\n z = np.array([1,2,3])\n with pytest.raises(TypeError) as excinfo:\n Xs, ys, Xt, yt = check_arrays(\"123\", z, z)\n \n\[email protected](\"X\", arrays_nd)\ndef test_check_one_array_nd(X):\n Xt = check_one_array(X)\n assert np.array_equal(Xt, X)\n \n \[email protected](\"X, Xtt\", zip(arrays_1d, arrays_2d))\ndef test_check_one_array_1d(X, Xtt):\n Xt = check_one_array(X)\n assert np.array_equal(Xt, Xtt)\n\n \nnetworks = [\n _get_model_Model(compiled=True, custom_loss=False),\n _get_model_Sequential(compiled=True, input_shape=(10,)),\n _get_model_Sequential(compiled=True, input_shape=None),\n _get_model_Model(compiled=False, custom_loss=False),\n _get_model_Model(compiled=False, custom_loss=True),\n _get_model_Sequential(compiled=False, input_shape=(10,)),\n _get_model_Sequential(compiled=False, input_shape=None)\n]\n \n\[email protected](\"net\", networks)\ndef test_check_network_network(net):\n new_net = check_network(net, compile_=False)\n assert is_equal_estimator(new_net, net)\n if net.built:\n for i in range(len(net.get_weights())):\n assert np.array_equal(net.get_weights()[i],\n new_net.get_weights()[i])\n net.predict(np.ones((10, 10)))\n new_net = check_network(net, compile_=False)\n assert is_equal_estimator(new_net, net)\n for i in range(len(net.get_weights())):\n assert np.array_equal(net.get_weights()[i],\n new_net.get_weights()[i])\n\n\[email protected](\"net\", networks)\ndef test_check_network_copy(net):\n new_net = check_network(net, copy=True, compile_=False)\n assert hex(id(new_net)) != hex(id(net))\n new_net = check_network(net, copy=False, compile_=False)\n assert hex(id(new_net)) == hex(id(net))\n \n\nno_networks = [\"lala\", Ridge(), 123, np.ones((10, 10))]\n\[email protected](\"no_net\", no_networks)\ndef test_check_network_no_model(no_net):\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(no_net)\n assert (\"Expected `network` argument \"\n \"to be a `Model` instance,\"\n \" got: %s\"%str(no_net) in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(no_net, display_name=\"tireli\")\n assert (\"Expected `tireli` argument \"\n \"to be a `Model` instance,\"\n \" got: %s\"%str(no_net) in str(excinfo.value))\n \n\ndef test_check_network_force_copy():\n model = DummyModel()\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(model, copy=True, force_copy=True)\n assert (\"`network` argument can't be duplicated. \"\n \"Recorded exception: \" in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(model, copy=True, force_copy=True,\n display_name=\"tireli\")\n assert (\"`tireli` argument can't be duplicated. \"\n \"Recorded exception: \" in str(excinfo.value))\n \n with pytest.warns(UserWarning) as record:\n new_net = check_network(model, copy=True, force_copy=False,\n compile_=False)\n assert (\"`network` argument can't be duplicated. \"\n \"Recorded exception: \" in str(record[0].message))\n with pytest.warns(UserWarning) as record:\n new_net = check_network(model, copy=True, force_copy=False,\n compile_=False,\n display_name=\"tireli\")\n assert (\"`tireli` argument can't be duplicated. \"\n \"Recorded exception: \" in str(record[0].message))\n \n new_net = check_network(model, copy=False, force_copy=True)\n \n \ndef test_check_network_compile():\n net = _get_model_Sequential(compiled=False)\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(net, copy=True, compile_=True)\n assert (\"The given `network` argument is not compiled yet. \"\n \"Please use `model.compile(optimizer, loss)`.\" \n in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_net = check_network(net, copy=True, compile_=True,\n display_name=\"tireli\")\n assert (\"The given `tireli` argument is not compiled yet. \"\n \"Please use `model.compile(optimizer, loss)`.\" \n in str(excinfo.value))\n \n \ndef test_check_network_high_dataset():\n Xs, ys, Xt, yt = make_regression_da(100000, 1001)\n net = _get_model_Sequential(compiled=True)\n new_net = check_network(net, copy=True, compile_=True)\n new_net.predict(Xs)\n \n\nestimators = [\n Ridge(),\n Ridge(alpha=10, fit_intercept=False, tol=0.1),\n DecisionTreeClassifier(max_depth=10),\n AdaBoostRegressor(Ridge(alpha=0.01)),\n TransformedTargetRegressor(regressor=Ridge(alpha=25), transformer=StandardScaler()),\n MultiOutputRegressor(Ridge(alpha=0.3)),\n make_pipeline(StandardScaler(), Ridge(alpha=0.2)),\n KerasClassifier(_get_model_Sequential, input_shape=(1,)),\n CustomEstimator()\n]\n\[email protected](\"est\", estimators)\ndef test_check_estimator_estimators(est):\n new_est = check_estimator(est, copy=True, force_copy=True)\n assert is_equal_estimator(est, new_est)\n if isinstance(est, MultiOutputRegressor):\n est.fit(np.linspace(0, 1, 10).reshape(-1, 1),\n np.stack([np.linspace(0, 1, 10)<0.5]*2, -1).astype(float))\n else:\n est.fit(np.linspace(0, 1, 10).reshape(-1, 1),\n (np.linspace(0, 1, 10)<0.5).astype(float))\n if isinstance(est, KerasClassifier):\n new_est = check_estimator(est, copy=False)\n else:\n new_est = check_estimator(est, copy=True, force_copy=True)\n assert is_equal_estimator(est, new_est)\n \n \[email protected](\"est\", networks[:3])\ndef test_check_estimator_networks(est):\n new_est = check_estimator(est)\n assert is_equal_estimator(est, new_est)\n \n \nno_estimators = [\"lala\", 123, np.ones((10, 10))]\n\[email protected](\"no_est\", no_estimators)\ndef test_check_estimator_no_estimators(no_est):\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(no_est)\n assert (\"`estimator` argument is neither a sklearn `BaseEstimator` \"\n \"instance nor a tensorflow Model instance. \"\n \"Given argument, %s\"%str(no_est) in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(no_est, display_name=\"tireli\")\n assert (\"`tireli` argument is neither a sklearn `BaseEstimator` \"\n \"instance nor a tensorflow Model instance. \"\n \"Given argument, %s\"%str(no_est) in str(excinfo.value))\n \n\[email protected](\"est\", estimators)\ndef test_check_estimator_copy(est):\n new_est = check_estimator(est, copy=True)\n assert hex(id(new_est)) != hex(id(est))\n new_est = check_estimator(est, copy=False)\n assert hex(id(new_est)) == hex(id(est))\n \n \ndef test_check_estimator_force_copy():\n est = CantBeDeepCopied()\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(est, copy=True, force_copy=True)\n assert (\"`estimator` argument can't be duplicated. \"\n \"Recorded exception: \" in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(est, copy=True, force_copy=True,\n display_name=\"tireli\")\n assert (\"`tireli` argument can't be duplicated. \"\n \"Recorded exception: \" in str(excinfo.value))\n \n with pytest.warns(UserWarning) as record:\n new_est = check_estimator(est, copy=True, force_copy=False)\n assert (\"`estimator` argument can't be duplicated. \"\n \"Recorded exception: \" in str(record[0].message))\n with pytest.warns(UserWarning) as record:\n new_est = check_estimator(est, copy=True, force_copy=False,\n display_name=\"tireli\")\n assert (\"`tireli` argument can't be duplicated. \"\n \"Recorded exception: \" in str(record[0].message))\n \n new_est = check_estimator(est, copy=False, force_copy=True)\n \n \ndef test_check_estimator_task():\n new_est = check_estimator()\n assert isinstance(new_est, LinearRegression)\n new_est = check_estimator(task=\"class\")\n assert isinstance(new_est, LogisticRegression)\n new_est = check_estimator(DecisionTreeClassifier(),\n task=\"class\")\n assert isinstance(new_est, DecisionTreeClassifier)\n new_est = check_estimator(Ridge(),\n task=\"reg\")\n assert isinstance(new_est, Ridge)\n \n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(DecisionTreeClassifier(), task=\"reg\")\n assert (\"`estimator` argument is a sklearn `ClassifierMixin` instance \"\n \"whereas the considered object handles only regression task. \"\n \"Please provide a sklearn `RegressionMixin` instance or a \"\n \"tensorflow Model instance.\" in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(DecisionTreeClassifier(), task=\"reg\",\n display_name=\"tireli\")\n assert (\"`tireli` argument is a sklearn\"\n \" `ClassifierMixin` instance \" in str(excinfo.value))\n \n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(Ridge(), task=\"class\")\n assert (\"`estimator` argument is a sklearn `RegressionMixin` instance \"\n \"whereas the considered object handles only classification task. \"\n \"Please provide a sklearn `ClassifierMixin` instance or a \"\n \"tensorflow Model instance.\" in str(excinfo.value))\n with pytest.raises(ValueError) as excinfo:\n new_est = check_estimator(Ridge(), task=\"class\",\n display_name=\"tireli\")\n assert (\"`tireli` argument is a sklearn\"\n \" `RegressionMixin` instance \" in str(excinfo.value))\n\n\ndef test_get_default_encoder():\n model = get_default_encoder()\n assert isinstance(model.layers[0], Flatten)\n assert isinstance(model.layers[1], Dense)\n assert model.layers[1].get_config()[\"units\"] == 10\n assert model.layers[1].get_config()[\"activation\"] == \"relu\"\n \n \ndef test_get_default_task():\n model = get_default_task()\n assert isinstance(model.layers[0], Flatten)\n assert isinstance(model.layers[1], Dense)\n assert isinstance(model.layers[2], Dense)\n assert isinstance(model.layers[3], Dense)\n assert model.layers[1].get_config()[\"units\"] == 10\n assert model.layers[1].get_config()[\"activation\"] == \"relu\"\n assert model.layers[2].get_config()[\"units\"] == 10\n assert model.layers[2].get_config()[\"activation\"] == \"relu\"\n assert model.layers[3].get_config()[\"units\"] == 1\n assert model.layers[3].get_config()[\"activation\"] == \"linear\"\n \n \ndef test_get_default_discriminator():\n model = get_default_discriminator()\n assert isinstance(model.layers[0], Flatten)\n assert isinstance(model.layers[1], Dense)\n assert isinstance(model.layers[2], Dense)\n assert isinstance(model.layers[3], Dense)\n assert model.layers[1].get_config()[\"units\"] == 10\n assert model.layers[1].get_config()[\"activation\"] == \"relu\"\n assert model.layers[2].get_config()[\"units\"] == 10\n assert model.layers[2].get_config()[\"activation\"] == \"relu\"\n assert model.layers[3].get_config()[\"units\"] == 1\n assert model.layers[3].get_config()[\"activation\"] == \"sigmoid\"\n\n\nscales = [-1, 0, 1., 0.1]\n\[email protected](\"lambda_\", scales)\ndef test_gradienthandler(lambda_):\n grad_handler = GradientHandler(lambda_)\n inputs = K.variable([1, 2, 3])\n assert np.all(grad_handler(inputs) == inputs)\n with tf.GradientTape() as tape:\n gradient = tape.gradient(grad_handler(inputs),\n inputs)\n assert np.all(gradient == lambda_ * np.ones(3))\n config = grad_handler.get_config()\n assert config['lambda_init'] == lambda_\n \n\n\ndef test_make_classification_da():\n Xs, ys, Xt, yt = make_classification_da()\n assert Xs.shape == (100, 2)\n assert len(ys) == 100\n assert Xt.shape == (100, 2)\n assert len(yt) == 100\n Xs, ys, Xt, yt = make_classification_da(1000, 10)\n assert Xs.shape == (1000, 10)\n assert len(ys) == 1000\n assert Xt.shape == (1000, 10)\n assert len(yt) == 1000\n\n\ndef test_make_regression_da():\n Xs, ys, Xt, yt = make_regression_da()\n assert Xs.shape == (100, 1)\n assert len(ys) == 100\n assert Xt.shape == (100, 1)\n assert len(yt) == 100\n Xs, ys, Xt, yt = make_regression_da(1000, 10)\n assert Xs.shape == (1000, 10)\n assert len(ys) == 1000\n assert Xt.shape == (1000, 10)\n assert len(yt) == 1000\n" ]
[ [ "numpy.full", "numpy.array", "numpy.array_equal", "tensorflow.GradientTape", "numpy.zeros", "tensorflow.keras.layers.Input", "tensorflow.keras.wrappers.scikit_learn.KerasClassifier", "tensorflow.keras.backend.variable", "sklearn.preprocessing.StandardScaler", "numpy.ones", "numpy.random.randn", "sklearn.linear_model.Ridge", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.backend.mean", "sklearn.tree.DecisionTreeClassifier", "numpy.linspace" ] ]
Anon-Artist/autogluon
[ "f1f76b1b14feca29f39c3e3051070a260ee602aa" ]
[ "tabular/src/autogluon/tabular/models/tab_transformer/tab_transformer_model.py" ]
[ "\"\"\" TabTransformer model \"\"\"\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom autogluon.core.utils.loaders import load_pkl\nfrom tqdm import tqdm\n\nfrom .hyperparameters.parameters import get_default_param\nfrom .hyperparameters.searchspaces import get_default_searchspace\nfrom ..abstract.abstract_model import AbstractNeuralNetworkModel\nfrom autogluon.core.utils import try_import_torch\nfrom ...features.feature_metadata import R_OBJECT, S_TEXT_NGRAM, S_TEXT_AS_CATEGORY\nfrom autogluon.core.constants import BINARY, REGRESSION, MULTICLASS\n\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nTODO: Fix Mac OS X warning spam.\nThe error message is:\nCannot set number of intraop threads after parallel work has started or after set_num_threads call when using native parallel backend (function set_num_threads)\nThis has been investigated to be a harmless warning for training and running inference on TabTransformer.\nThis warning can occur with a very specific environment: torch 1.7, Mac OS X, Python 3.6/3.7, when using torch DataLoader.\nhttps://github.com/pytorch/pytorch/issues/46409\n\"\"\"\nclass TabTransformerModel(AbstractNeuralNetworkModel):\n \"\"\"\n Main TabTransformer model that inherits from AbstractModel.\n\n This model includes the full torch pipeline (TabNet) and the internal Transformer embeddings (TabTransformer).\n This file serves as the connection of all these internal models and architectures to AutoGluon.\n\n TabTransformer uses modifications to the typical Transformer architecture and the pretraining in BERT\n and applies them to the use case of tabular data. Specifically, this makes TabTransformer suitable for unsupervised\n training of Tabular data with a subsequent fine-tuning step on labeled data.\n \"\"\"\n params_file_name = \"tab_trans_params.pth\"\n\n def __init__(self, **kwargs):\n try_import_torch()\n super().__init__(**kwargs)\n import torch\n self._verbosity = None\n self._temp_file_name = \"tab_trans_temp.pth\"\n self._period_columns_mapping = None\n\n def _set_default_params(self):\n default_params = get_default_param()\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n\n def _get_default_auxiliary_params(self) -> dict:\n default_auxiliary_params = super()._get_default_auxiliary_params()\n extra_auxiliary_params = dict(\n ignored_type_group_raw=[R_OBJECT],\n ignored_type_group_special=[S_TEXT_NGRAM, S_TEXT_AS_CATEGORY],\n )\n default_auxiliary_params.update(extra_auxiliary_params)\n return default_auxiliary_params\n\n def _get_model(self):\n from .tab_model_base import TabNet\n # If we have already initialized the model, we don't need to do it again.\n model = TabNet(self.params['n_classes'], self.params['feature_dim'],\n self.params['num_output_layers'], self.device, self.params)\n if self.device.type == \"cuda\":\n model = model.cuda()\n\n return model\n\n # NOTE: Making an assumption that X_unlabeled will not have a different schema. Otherwise, we would need two\n # period_columns_mapping fields. One for X/X_val, another for X_unlabeled, which may have different columns.\n @staticmethod\n def _get_no_period_columns(columns):\n # Latest pytorch does not support . in module names. Therefore, we must replace the \".\".\n rename_columns = dict()\n for col in columns:\n new_col_name = col\n if \".\" in col:\n new_col_name = col.replace(\".\", \"_\")\n\n if new_col_name in rename_columns:\n for i in range(1, 100):\n append_col_name = new_col_name + \"_\" + str(i)\n if append_col_name not in rename_columns:\n new_col_name = append_col_name\n break\n else:\n raise RuntimeError(\"Tried 100 column renames to eliminate duplicates.\\n\"\n \"Please check similar columns with . or _ in them.\")\n\n # Mapping for every column\n rename_columns[col] = new_col_name\n\n return rename_columns\n\n def _preprocess(self, X, **kwargs):\n from .utils import TabTransformerDataset\n X = super()._preprocess(X=X, **kwargs)\n\n X = X.rename(columns=self._period_columns_mapping)\n encoders = self.params['encoders']\n data = TabTransformerDataset(X, encoders=encoders, problem_type=self.problem_type, col_info=self._types_of_features)\n data.encode(self.fe)\n\n return data\n\n def _preprocess_train(self, X, X_val=None, X_unlabeled=None, fe=None):\n \"\"\"\n Pre-processing specific to TabTransformer. Setting up feature encoders, renaming columns with periods in\n them (torch), and converting X, X_val, X_unlabeled into TabTransformerDataset's.\n \"\"\"\n from .utils import TabTransformerDataset\n\n X = self._preprocess_nonadaptive(X)\n if X_val is not None:\n X_val = self._preprocess_nonadaptive(X_val)\n if X_unlabeled is not None:\n X_unlabeled = self._preprocess_nonadaptive(X_unlabeled)\n\n\n self._period_columns_mapping = self._get_no_period_columns(X.columns)\n X = X.rename(columns=self._period_columns_mapping)\n\n if X_val is not None:\n X_val = X_val.rename(columns=self._period_columns_mapping)\n if X_unlabeled is not None:\n X_unlabeled = X_unlabeled.rename(columns=self._period_columns_mapping)\n\n self._types_of_features, _ = self._get_types_of_features(X, needs_extra_types=False)\n\n # Also need to rename the feature names in the types_of_features dictionary.\n for feature_dict in self._types_of_features:\n # Need to check that the value is in the mapping. Otherwise, we could be updating columns that have been dropped.\n feature_dict.update(('name', self._period_columns_mapping[v]) for k, v in feature_dict.items() if k == 'name' and v in self._period_columns_mapping)\n\n encoders = self.params['encoders']\n data = TabTransformerDataset(X, encoders=encoders, problem_type=self.problem_type, col_info=self._types_of_features)\n self.fe = fe\n if self.fe is not None:\n if X_unlabeled is None:\n unlab_data = None\n elif X_unlabeled is not None:\n unlab_data = TabTransformerDataset(X_unlabeled, encoders=encoders, problem_type=self.problem_type, col_info=self._types_of_features)\n if self.fe is None:\n if X_unlabeled is None:\n data.fit_feat_encoders()\n self.fe = data.feature_encoders\n unlab_data = None\n elif X_unlabeled is not None:\n unlab_data = TabTransformerDataset(X_unlabeled, encoders=encoders, problem_type=self.problem_type, col_info=self._types_of_features)\n unlab_data.fit_feat_encoders()\n self.fe = unlab_data.feature_encoders\n\n data.encode(self.fe)\n\n if X_val is not None:\n val_data = TabTransformerDataset(X_val, encoders=encoders, problem_type=self.problem_type, col_info=self._types_of_features)\n val_data.encode(self.fe)\n else:\n val_data = None\n\n if unlab_data is not None:\n unlab_data.encode(self.fe)\n\n return data, val_data, unlab_data\n\n def _epoch(self, net, loader_train, loader_val, y_val, optimizers, loss_criterion, pretext, state, scheduler, epoch,\n epochs, databar_disable, reporter, params):\n \"\"\"\n Helper function to run one epoch of training, essentially the \"inner loop\" of training.\n \"\"\"\n import torch\n from .utils import augmentation\n is_train = (optimizers is not None)\n net.train() if is_train else net.eval()\n total_loss, total_correct, total_num = 0.0, 0.0, 0\n data_bar = tqdm(loader_train, disable=databar_disable) if is_train else tqdm(loader_val, disable=databar_disable)\n\n with (torch.enable_grad() if is_train else torch.no_grad()):\n for data, target in data_bar:\n data, target = pretext.get(data, target)\n\n if self.device.type == \"cuda\":\n data, target = data.cuda(), target.cuda()\n pretext = pretext.cuda()\n\n if state in [None, 'finetune']:\n if self.params['num_augs'] > 0:\n data, target = augmentation(data, target, **params)\n out, _ = net(data)\n elif state == 'pretrain':\n _, out = net(data)\n else:\n raise NotImplementedError(\"state must be one of [None, 'pretrain', 'finetune']\")\n\n loss, correct = pretext(out, target)\n\n if is_train:\n for optimizer in optimizers:\n optimizer.zero_grad()\n loss.backward()\n for optimizer in optimizers:\n optimizer.step()\n\n total_num += 1\n total_loss += loss.item()\n\n if epochs == 1:\n train_test = 'Test'\n else:\n train_test = 'Train'\n\n val_metric = None\n if loader_val is not None and state != 'pretrain':\n val_metric = self.score(X=loader_val, y=y_val, eval_metric=self.stopping_metric,\n metric_needs_y_pred=self.stopping_metric.needs_pred)\n data_bar.set_description('{} Epoch: [{}/{}] Train Loss: {:.4f} Validation {}: {:.2f}'.format(\n train_test, epoch, epochs, total_loss / total_num, self.stopping_metric.name, val_metric))\n\n if reporter is not None:\n reporter(epoch=epoch+1, validation_performance=val_metric, train_loss=total_loss)\n\n else:\n data_bar.set_description(\n '{} Epoch: [{}/{}] Loss: {:.4f}'.format(train_test, epoch, epochs, total_loss / total_num))\n\n return total_loss / total_num, val_metric\n\n if scheduler is not None:\n scheduler.step()\n return total_loss / total_num\n\n def tt_fit(self, loader_train, loader_val=None, y_val=None, state=None, time_limit=None, reporter=None):\n \"\"\"\n Main training function for TabTransformer\n \"state\" must be one of [None, 'pretrain', 'finetune']\n None: corresponds to purely supervised learning\n pretrain: discriminative task will be a pretext task\n finetune: same as supervised learning except that the model base has\n exponentially decaying learning rate.\n \"\"\"\n import torch\n import torch.nn as nn\n import torch.optim as optim\n from . import pretexts\n\n start_time = time.time()\n pretext_tasks = pretexts.__dict__\n optimizers = []\n lr = self.params['lr']\n weight_decay = self.params['weight_decay']\n epochs = self.params['pretrain_epochs'] if state == 'pretrain' else self.params['epochs']\n epochs_wo_improve = self.params['epochs_wo_improve']\n\n if state is None:\n optimizers = [optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)]\n pretext = pretext_tasks['SupervisedPretext'](self.problem_type, self.device)\n elif state == 'pretrain':\n optimizers = [optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)]\n pretext = pretext_tasks['BERTPretext'](self.cat_feat_origin_cards, self.device, self.params['hidden_dim'])\n elif state == 'finetune':\n base_exp_decay = self.params['base_exp_decay']\n optimizer_fc = [optim.Adam(fc_layer.parameters(), lr=lr, weight_decay=weight_decay) for fc_layer in self.model.fc]\n optimizer_embeds = optim.Adam(self.model.embed.parameters(), lr=lr, weight_decay=weight_decay)\n scheduler = optim.lr_scheduler.ExponentialLR(optimizer_embeds, gamma=base_exp_decay) # TODO: Should we be using this in _epoch()?\n optimizers.extend(optimizer_fc)\n optimizers.append(optimizer_embeds)\n\n pretext = pretext_tasks['SupervisedPretext'](self.problem_type, self.device)\n\n else:\n raise NotImplementedError(\"state must be one of [None, 'pretrain', 'finetune']\")\n\n if self.problem_type == REGRESSION:\n loss_criterion = nn.MSELoss()\n else:\n loss_criterion = nn.CrossEntropyLoss()\n\n best_val_metric = -np.inf # higher = better\n best_val_epoch = 0\n best_loss = np.inf\n\n self._verbosity = self.params.get('verbosity', 2)\n if self._verbosity <= 1:\n verbose_eval = -1\n elif self._verbosity == 2:\n verbose_eval = 50\n elif self._verbosity == 3:\n verbose_eval = 10\n else:\n verbose_eval = 1\n\n for e in range(epochs):\n if e == 0:\n logger.log(15, \"TabTransformer architecture:\")\n logger.log(15, str(self.model))\n\n # Whether or not we want to suppress output based on our verbosity.\n databar_disable = e % verbose_eval != 0\n train_loss, val_metric = self._epoch(net=self.model, loader_train=loader_train, loader_val=loader_val, y_val=y_val,\n optimizers=optimizers, loss_criterion=loss_criterion, \\\n pretext=pretext, state=state, scheduler=None, epoch=e, epochs=epochs,\n databar_disable=databar_disable, reporter=reporter, params=self.params)\n\n # Early stopping for pretrain'ing based on loss.\n if state == 'pretrain':\n if train_loss < best_loss or e == 0:\n if train_loss < best_loss:\n best_loss = train_loss\n best_val_epoch = e\n else:\n if val_metric >= best_val_metric or e == 0:\n if loader_val is not None:\n if not np.isnan(val_metric):\n best_val_metric = val_metric\n\n best_val_epoch = e\n os.makedirs(os.path.dirname(self.path), exist_ok=True)\n torch.save(self.model, self.path + self._temp_file_name)\n\n # If time limit has exceeded or we haven't improved in some number of epochs, stop early.\n if e - best_val_epoch > epochs_wo_improve:\n break\n if time_limit:\n time_elapsed = time.time() - start_time\n time_left = time_limit - time_elapsed\n if time_left <= 0:\n logger.log(20, \"\\tRan out of time, stopping training early.\")\n break\n\n if loader_val is not None:\n try:\n self.model = torch.load(self.path + self._temp_file_name)\n os.remove(self.path + self._temp_file_name)\n except:\n pass\n logger.log(15, \"Best model found in epoch %d\" % best_val_epoch)\n\n def _fit(self, X_train, y_train, X_val=None, y_val=None, X_unlabeled=None, time_limit=None, reporter=None, **kwargs):\n import torch\n\n num_gpus = kwargs.get('num_gpus', None)\n if num_gpus is None:\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n elif num_gpus == 0:\n self.device = torch.device(\"cpu\")\n else:\n self.device = torch.device(\"cuda\")\n\n if num_gpus > 1:\n logger.warning(\"TabTransformer not yet configured to use more than 1 GPU. 'num_gpus' set to >1, but we will be using only 1 GPU.\")\n\n\n if self.problem_type ==REGRESSION:\n self.params['n_classes'] = 1\n elif self.problem_type ==BINARY:\n self.params['n_classes'] = 2\n elif self.problem_type ==MULTICLASS:\n self.params['n_classes'] = y_train.nunique()\n\n train, val, unlab = self._preprocess_train(X_train, X_val, X_unlabeled)\n\n num_cols = len(train.columns)\n if num_cols > self.params['max_columns']:\n raise NotImplementedError(\n f\"This dataset has {num_cols} columns and exceeds 'max_columns' == {self.params['max_columns']}.\\n\"\n f\"Which is set by default to ensure the TabTransformer model will not run out of memory.\\n\"\n f\"If you are confident you will have enough memory, set the 'max_columns' hyperparameter higher and try again.\\n\")\n\n if self.problem_type == REGRESSION:\n train.targets = torch.FloatTensor(list(y_train))\n val.targets = torch.FloatTensor(list(y_val))\n else:\n train.targets = torch.LongTensor(list(y_train))\n val.targets = torch.LongTensor(list(y_val))\n\n batch_size = self.params['batch_size']\n num_workers = self.params['num_workers']\n\n loader_train = train.build_loader(batch_size, num_workers, shuffle=True)\n loader_val = val.build_loader(batch_size, num_workers)\n loader_unlab = unlab.build_loader(batch_size, num_workers) if unlab is not None else None\n\n self.cat_feat_origin_cards = loader_train.cat_feat_origin_cards\n self.params['cat_feat_origin_cards'] = self.cat_feat_origin_cards\n\n self.model = self._get_model()\n\n if X_unlabeled is not None:\n # Can't spend all the time in pretraining, have to split it up.\n pretrain_time_limit = time_limit / 2 if time_limit is not None else time_limit\n pretrain_before_time = time.time()\n self.tt_fit(loader_unlab, loader_val, y_val, state='pretrain', time_limit=pretrain_time_limit, reporter=reporter)\n finetune_time_limit = time_limit - (time.time() - pretrain_before_time) if time_limit is not None else time_limit\n self.tt_fit(loader_train, loader_val, y_val, state='finetune', time_limit=finetune_time_limit, reporter=reporter)\n else:\n self.tt_fit(loader_train, loader_val, y_val, time_limit=time_limit, reporter=reporter)\n\n def _predict_proba(self, X, **kwargs):\n \"\"\"\n X (torch.tensor or pd.dataframe): data for model to give prediction probabilities\n returns: np.array of k-probabilities for each of the k classes. If k=2 we drop the second probability.\n \"\"\"\n import torch\n import torch.nn as nn\n from torch.utils.data import DataLoader\n from torch.autograd import Variable\n\n if isinstance(X, pd.DataFrame):\n # Preprocess here also calls our _preprocess, which creates a TTDataset.\n X = self.preprocess(X, **kwargs)\n loader = X.build_loader(self.params['batch_size'], self.params['num_workers'])\n elif isinstance(X, DataLoader):\n loader = X\n elif isinstance(X, torch.Tensor):\n X = X.rename(columns=self._get_no_period_columns(X))\n loader = X.build_loader(self.params['batch_size'], self.params['num_workers'])\n else:\n raise NotImplementedError(\n \"Attempting to predict against a non-supported data type. \\nNeeds to be a pandas DataFrame, torch DataLoader or torch Tensor.\")\n\n self.model.eval()\n softmax = nn.Softmax(dim=1)\n\n if self.problem_type == REGRESSION:\n outputs = torch.zeros([len(loader.dataset), 1])\n else:\n outputs = torch.zeros([len(loader.dataset), self.num_classes])\n\n iter = 0\n for data, _ in loader:\n if self.device.type == \"cuda\":\n data = data.cuda()\n with torch.no_grad():\n data = Variable(data)\n prob, _ = self.model(data)\n batch_size = len(prob)\n if self.problem_type != REGRESSION:\n prob = softmax(prob)\n\n outputs[iter:(iter + batch_size)] = prob\n iter += batch_size\n\n if self.problem_type == BINARY:\n return outputs[:, 1].cpu().numpy()\n elif self.problem_type == REGRESSION:\n outputs = outputs.flatten()\n\n return outputs.cpu().numpy()\n\n def _get_default_searchspace(self):\n return get_default_searchspace()\n\n # TODO: Consider HPO for pretraining with unlabeled data. (Potential future work)\n # TODO: Does not work correctly when cuda is enabled.\n def _hyperparameter_tune(self, X_train, y_train, X_val, y_val, scheduler_options, **kwargs):\n from .utils import tt_trial\n import torch\n\n time_start = time.time()\n self._set_default_searchspace()\n scheduler_func = scheduler_options[0]\n scheduler_options = scheduler_options[1]\n\n if scheduler_func is None or scheduler_options is None:\n raise ValueError(\"scheduler_func and scheduler_options cannot be None for hyperparameter tuning\")\n\n util_args = dict(\n X_train=X_train,\n y_train=y_train,\n X_val=X_val,\n y_val=y_val,\n model=self,\n time_start=time_start,\n time_limit=scheduler_options['time_out']\n )\n\n params_copy = self.params.copy()\n tt_trial.register_args(util_args=util_args, **params_copy)\n\n scheduler = scheduler_func(tt_trial, **scheduler_options)\n scheduler.run()\n scheduler.join_jobs()\n\n scheduler.get_training_curves(plot=False, use_legend=False)\n\n return self._get_hpo_results(scheduler=scheduler, scheduler_options=scheduler_options, time_start=time_start)\n\n def save(self, path: str = None, verbose=True) -> str:\n import torch\n if path is None:\n path = self.path\n\n params_filepath = path + self.params_file_name\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n temp_model = self.model\n if self.model is not None:\n torch.save(self.model, params_filepath)\n\n self.model = None # Avoiding pickling the weights.\n modelobj_filepath = super().save(path=path, verbose=verbose)\n\n self.model = temp_model\n\n return modelobj_filepath\n\n @classmethod\n def load(cls, path: str, reset_paths=False, verbose=True):\n import torch\n obj: TabTransformerModel = load_pkl.load(path=path + cls.model_file_name, verbose=verbose)\n if reset_paths:\n obj.set_contexts(path)\n\n obj.model = torch.load(path + cls.params_file_name)\n\n return obj\n\n \"\"\"\n List of features to add (Updated by Anthony Galczak 11-19-20):\n \n 1) Allow for saving of pretrained model for future use. This will be done in a future PR as the \n \"pretrain API change\".\n \n 2) Investigate options for when the unlabeled schema does not match the training schema. Currently,\n we do not allow such mismatches and the schemas must match exactly. We can investigate ways to use\n less or more columns from the unlabeled data. This will likely require a design meeting.\n \n 3) Bug where HPO doesn't work when cuda is enabled.\n \"RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method\"\n Update: This will likely be fixed in a future change to HPO in AutoGluon.\n \"\"\"\n" ]
[ [ "torch.device", "numpy.isnan", "torch.nn.MSELoss", "torch.nn.Softmax", "torch.autograd.Variable", "torch.save", "torch.enable_grad", "torch.no_grad", "torch.optim.lr_scheduler.ExponentialLR", "torch.cuda.is_available", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
LuisFMCuriel/ai-traineree
[ "121da3ea48992d9db3ede3634e4e5f48f50f4cc3", "121da3ea48992d9db3ede3634e4e5f48f50f4cc3" ]
[ "ai_traineree/runners/env_runner.py", "ai_traineree/policies.py" ]
[ "import json\nimport logging\nimport os\nimport sys\nimport time\nfrom collections import deque\nfrom pathlib import Path\nfrom typing import Any, Iterable, List, Optional, Tuple\n\nimport numpy as np\nimport torch.multiprocessing as mp\n\nfrom ai_traineree.agents import AgentBase\nfrom ai_traineree.experience import Experience\nfrom ai_traineree.loggers import DataLogger\nfrom ai_traineree.types import RewardType, TaskType\nfrom ai_traineree.utils import save_gif\n\nFRAMES_PER_SEC = 45\n\nlogging.basicConfig(\n stream=sys.stdout, level=logging.INFO, format=\"%(asctime)s %(levelname)-8s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\n\nclass EnvRunner:\n \"\"\"\n EnvRunner, short for Environment Runner, is meant to be used as module that runs your experiments.\n It's expected that the environments are wrapped in a Task which has typical step and act methods.\n The agent can be any agent which *makes sense* as there aren't any checks like whether the output is discrete.\n\n Examples:\n >>> env_runner = EnvRunner(task, agent)\n >>> env_runner.run()\n \"\"\"\n\n logger = logging.getLogger(\"EnvRunner\")\n\n def __init__(self, task: TaskType, agent: AgentBase, max_iterations: int = int(1e5), **kwargs):\n \"\"\"\n Expects the environment to come as the TaskType and the agent as the AgentBase.\n\n Keyword Arguments:\n window_len (int): Length of the score averaging window.\n data_logger: An instance of Data Logger, e.g. TensorboardLogger.\n\n \"\"\"\n self.task = task\n self.agent = agent\n self.max_iterations = max_iterations\n self.model_path = f\"{task.name}_{agent.model}\"\n self.state_dir = \"run_states\"\n\n self.episode = 0\n self.iteration = 0\n self.all_scores = []\n self.all_iterations = []\n self.window_len = kwargs.get(\"window_len\", 100)\n self.scores_window = deque(maxlen=self.window_len)\n self.__images = []\n\n self.data_logger: Optional[DataLogger] = kwargs.get(\"data_logger\")\n self.logger.info(\"DataLogger: %s\", str(self.data_logger))\n if self.data_logger:\n self.data_logger.set_hparams(self.agent.hparams, {})\n\n self._debug_log: bool = bool(kwargs.get(\"debug_log\", False))\n self._actions: List[Any] = []\n self._states: List[Any] = []\n self._rewards: List[Any] = []\n self._dones: List[Any] = []\n\n self.seed(kwargs.get(\"seed\"))\n\n def __str__(self) -> str:\n return f\"EnvRunner<{self.task.name}, {self.agent.model}>\"\n\n def seed(self, seed):\n if isinstance(seed, (int, float)):\n self.agent.seed(seed)\n self.task.seed(seed)\n\n def reset(self):\n \"\"\"Resets the EnvRunner. The task env and the agent are preserved.\"\"\"\n self.episode = 0\n self.all_scores = []\n self.all_iterations = []\n self.scores_window = deque(maxlen=self.window_len)\n\n def interact_episode(\n self,\n eps: float = 0,\n max_iterations: Optional[int] = None,\n render: bool = False,\n render_gif: bool = False,\n log_interaction_freq: Optional[int] = 10,\n full_log_interaction_freq: Optional[int] = 1000,\n ) -> Tuple[RewardType, int]:\n score = 0\n obs = self.task.reset()\n iterations = 0\n max_iterations = max_iterations if max_iterations is not None else self.max_iterations\n done = False\n\n # Only gifs require keeping (S, A, R) list\n if render_gif:\n self.__images = []\n\n while iterations < max_iterations and not done:\n iterations += 1\n self.iteration += 1\n if render:\n self.task.render(\"human\")\n time.sleep(1.0 / FRAMES_PER_SEC)\n\n experience = Experience(obs=obs)\n experience = self.agent.act(experience, eps)\n assert experience.get(\"action\") is not None, \"Act method should update action on experience\"\n action = experience.action\n\n next_obs, reward, done, _ = self.task.step(action)\n self._rewards.append((self.iteration, reward))\n\n if self._debug_log:\n self._actions.append((self.iteration, action))\n self._states.append((self.iteration, obs))\n self._dones.append((self.iteration, done))\n\n score += float(reward)\n if render_gif:\n # OpenAI gym still renders the image to the screen even though it shouldn't. Eh.\n img = self.task.render(mode=\"rgb_array\")\n self.__images.append(img)\n\n experience.update(action=action, reward=reward, next_obs=next_obs, done=done)\n self.agent.step(experience)\n\n # Plot interactions every `log_interaction_freq` iterations.\n # Plot full log (including weights) every `full_log_interaction_freq` iterations.\n if (log_interaction_freq and (iterations % log_interaction_freq) == 0) or (\n full_log_interaction_freq and (self.iteration % full_log_interaction_freq) == 0\n ):\n full_log = full_log_interaction_freq and (iterations % full_log_interaction_freq) == 0\n self.log_data_interaction(full_log=full_log)\n\n # n -> n+1 => S(n) <- S(n+1)\n obs = next_obs\n\n return score, iterations\n\n def run(\n self,\n reward_goal: float = 100.0,\n max_episodes: int = 2000,\n eps_start: float = 1.0,\n eps_end: float = 0.01,\n eps_decay: float = 0.995,\n log_episode_freq: int = 1,\n log_interaction_freq: int = 10,\n gif_every_episodes: Optional[int] = None,\n checkpoint_every=200,\n force_new: bool = False,\n ) -> List[float]:\n \"\"\"\n Evaluates the agent in the environment.\n The evaluation will stop when the agent reaches the `reward_goal` in the averaged last `self.window_len`, or\n when the number of episodes reaches the `max_episodes`.\n\n To help debugging one can set the `gif_every_episodes` to a positive integer which relates to how often a gif\n of the episode evaluation is written to the disk.\n\n Every `checkpoint_every` (default: 200) iterations the Runner will store current state of the runner\n and the agent. These states can be used to resume previous run. By default the runner checks whether\n there is ongoing run for the combination of the environment and the agent.\n\n Parameters:\n reward_goal: Goal to achieve on the average reward.\n max_episode: After how many episodes to stop regardless of the score.\n eps_start: Epsilon-greedy starting value.\n eps_end: Epislon-greeedy lowest value.\n eps_decay: Epislon-greedy decay value, eps[i+1] = eps[i] * eps_decay.\n log_episode_freq: Number of episodes between state logging.\n gif_every_episodes: Number of episodes between storing last episode as a gif.\n checkpoint_every: Number of episodes between storing the whole state, so that\n in case of failure it can be safely resumed from it.\n force_new: Flag whether to resume from previously stored state (False), or to\n start learning from a clean state (True).\n\n Returns:\n All obtained scores from all episodes.\n\n \"\"\"\n self.epsilon = eps_start\n self.reset()\n if not force_new:\n self.load_state(file_prefix=self.model_path)\n mean_scores = []\n epsilons = []\n\n while self.episode < max_episodes:\n self.episode += 1\n render_gif = gif_every_episodes is not None and (self.episode % gif_every_episodes) == 0\n score, iterations = self.interact_episode(\n self.epsilon, render_gif=render_gif, log_interaction_freq=log_interaction_freq\n )\n\n self.scores_window.append(score)\n self.all_iterations.append(iterations)\n self.all_scores.append(score)\n\n mean_scores.append(sum(self.scores_window) / len(self.scores_window))\n epsilons.append(self.epsilon)\n\n self.epsilon = max(eps_end, eps_decay * self.epsilon)\n\n if self.episode % log_episode_freq == 0:\n last_episodes = [self.episode - i for i in range(log_episode_freq)[::-1]]\n self.info(\n episodes=last_episodes,\n iterations=self.all_iterations[-log_episode_freq:],\n scores=self.all_scores[-log_episode_freq:],\n mean_scores=mean_scores[-log_episode_freq:],\n epsilons=epsilons[-log_episode_freq:],\n loss=self.agent.loss,\n )\n\n if render_gif and len(self.__images):\n gif_path = \"gifs/{}_e{}.gif\".format(self.model_path, str(self.episode).zfill(len(str(max_episodes))))\n save_gif(gif_path, self.__images)\n self.__images = []\n\n if mean_scores[-1] >= reward_goal and len(self.scores_window) == self.window_len:\n print(f\"Environment solved after {self.episode} episodes!\\tAverage Score: {mean_scores[-1]:.2f}\")\n self.save_state(self.model_path)\n self.agent.save_state(f\"{self.model_path}_agent.net\")\n break\n\n if self.episode % checkpoint_every == 0:\n self.save_state(self.model_path)\n\n # Store hyper parameters and experiment metrics in logger so that it's easier to compare runs\n if self.data_logger:\n end_metrics = {\n \"hparam/total_iterations\": sum(self.all_iterations),\n \"hparam/total_episodes\": len(self.all_iterations),\n \"hparam/score\": mean_scores[-1],\n }\n self.data_logger.set_hparams(self.agent.hparams, end_metrics, run_name=\"hparams\")\n\n return self.all_scores\n\n def info(self, **kwargs):\n \"\"\"\n Writes out current state into provided loggers.\n Writting to stdout is done through Python's logger, whereas all metrics\n are supposed to be handled via DataLogger. Currently supported are Tensorboard\n and Neptune (neptune.ai). To use one of these `data_logger` is expected.\n \"\"\"\n if self.data_logger is not None:\n self.log_episode_metrics(**kwargs)\n self.log_data_interaction(**kwargs)\n if self.logger is not None:\n self.log_logger(**kwargs)\n\n def log_logger(self, **kwargs):\n \"\"\"Writes out env logs via logger (either stdout or a file).\"\"\"\n episode = kwargs.get(\"episodes\")[-1]\n score = kwargs.get(\"scores\")[-1]\n iteration = kwargs.get(\"iterations\")[-1]\n mean_score = kwargs.get(\"mean_scores\")[-1]\n epsilon = kwargs.get(\"epsilons\")[-1]\n loss = kwargs.get(\"loss\", {})\n line_chunks = [f\"Episode {episode};\"]\n line_chunks += [f\"Iter: {iteration};\"]\n line_chunks += [f\"Current Score: {score:.2f};\"]\n line_chunks += [f\"Average Score: {mean_score:.2f};\"]\n line_chunks += [f\"{loss_name.capitalize()}: {loss_value:10.4f}\" for (loss_name, loss_value) in loss.items()]\n line_chunks += [f\"Epsilon: {epsilon:5.3f};\"]\n line = \"\\t\".join(line_chunks)\n self.logger.info(line.format(**kwargs))\n\n def log_episode_metrics(self, **kwargs):\n \"\"\"Uses DataLogger, e.g. TensorboardLogger, to store env metrics.\"\"\"\n episodes: List[int] = kwargs.get(\"episodes\", [])\n for episode, epsilon in zip(episodes, kwargs.get(\"epsilons\", [])):\n self.data_logger.log_value(\"episode/epsilon\", epsilon, episode)\n\n for episode, mean_score in zip(episodes, kwargs.get(\"mean_scores\", [])):\n self.data_logger.log_value(\"episode/avg_score\", mean_score, episode)\n\n for episode, score in zip(episodes, kwargs.get(\"scores\", [])):\n self.data_logger.log_value(\"episode/score\", score, episode)\n\n for episode, iteration in zip(episodes, kwargs.get(\"iterations\", [])):\n self.data_logger.log_value(\"episode/iterations\", iteration, episode)\n\n def log_data_interaction(self, **kwargs):\n if self.data_logger is None:\n return\n\n if hasattr(self.agent, \"log_metrics\"):\n self.agent.log_metrics(self.data_logger, self.iteration, full_log=kwargs.get(\"full_log\", False))\n else:\n for loss_name, loss_value in kwargs.get(\"loss\", {}).items():\n self.data_logger.log_value(f\"loss/{loss_name}\", loss_value, self.iteration)\n\n while self._debug_log and self._states:\n step, states = self._states.pop(0)\n states = states if isinstance(states, Iterable) else [states]\n self.data_logger.log_values_dict(\"env/states\", {str(i): a for i, a in enumerate(states)}, step)\n\n while self._debug_log and self._actions:\n step, actions = self._actions.pop(0)\n actions = actions if isinstance(actions, Iterable) else [actions]\n self.data_logger.log_values_dict(\"env/action\", {str(i): a for i, a in enumerate(actions)}, step)\n\n while self._debug_log and self._rewards:\n step, rewards = self._rewards.pop(0)\n self.data_logger.log_value(\"env/reward\", float(rewards), step)\n\n while self._debug_log and self._dones:\n step, dones = self._dones.pop(0)\n self.data_logger.log_value(\"env/done\", int(dones), step)\n\n def save_state(self, state_name: str):\n \"\"\"Saves the current state of the runner and the agent.\n\n Files are stored with appended episode number.\n Agents are saved with their internal saving mechanism.\n \"\"\"\n state = {\n \"tot_iterations\": sum(self.all_iterations),\n \"episode\": self.episode,\n \"epsilon\": self.epsilon,\n \"score\": self.all_scores[-1],\n \"average_score\": sum(self.scores_window) / len(self.scores_window),\n \"loss\": self.agent.loss,\n }\n\n Path(self.state_dir).mkdir(parents=True, exist_ok=True)\n self.agent.save_state(f\"{self.state_dir}/{state_name}_e{self.episode}.agent\")\n with open(f\"{self.state_dir}/{state_name}_e{self.episode}.json\", \"w\") as f:\n json.dump(state, f)\n\n def load_state(self, file_prefix: str):\n \"\"\"\n Loads state with the highest episode value for given agent and environment.\n \"\"\"\n try:\n state_files = list(\n filter(lambda f: f.startswith(file_prefix) and f.endswith(\"json\"), os.listdir(self.state_dir))\n )\n recent_episode_num = max([int(f[f.index(\"_e\") + 2 : f.index(\".\")]) for f in state_files])\n state_name = [n for n in state_files if n.endswith(f\"_e{recent_episode_num}.json\")][0][:-5]\n except Exception:\n self.logger.warning(\"Couldn't load state. Forcing restart.\")\n return\n\n self.logger.info(\"Loading saved state under: %s/%s.json\", self.state_dir, state_name)\n with open(f\"{self.state_dir}/{state_name}.json\", \"r\") as f:\n state = json.load(f)\n self.episode = state.get(\"episode\")\n self.epsilon = state.get(\"epsilon\")\n\n self.all_scores.append(state.get(\"score\"))\n self.all_iterations = []\n\n avg_score = state.get(\"average_score\")\n for _ in range(min(self.window_len, self.episode)):\n self.scores_window.append(avg_score)\n\n self.logger.info(\"Loading saved agent state: %s/%s.agent\", self.state_dir, state_name)\n self.agent.load_state(path=f\"{self.state_dir}/{state_name}.agent\")\n self.agent.loss = state.get(\"loss\", 0)\n\n\nclass MultiSyncEnvRunner:\n \"\"\"Execute multiple environments/tasks concurrently with sync steps.\n\n All environments are distributed to separate processes. The MultiSyncEnvRunner\n acts as a manager that sends data between processes.\n\n Currently this class only supports training one agent at a time. The agent\n is expected handle stepping multiple steps at a time.\n \"\"\"\n\n logger = logging.getLogger(\"MultiSyncEnvRunner\")\n\n def __init__(self, tasks: List[TaskType], agent: AgentBase, max_iterations: int = int(1e5), **kwargs):\n \"\"\"\n Expects the environment to come as the TaskType and the agent as the AgentBase.\n\n Keyword Arguments:\n window_len (int): Length of the score averaging window.\n data_logger: An instance of Data Logger, e.g. TensorboardLogger.\n \"\"\"\n self.tasks = tasks\n self.task_num = len(tasks)\n self.num_processes = int(kwargs.get(\"processes\", len(tasks)))\n self.processes = []\n self.parent_conns = []\n self.child_conns = []\n\n self.agent = agent\n self.max_iterations = max_iterations\n self.model_path = f\"{tasks[0].name}_{agent.model}\"\n self.state_dir = \"run_states\"\n\n self.episode = 0\n self.iteration = 0\n self.all_scores = []\n self.all_iterations = []\n self.window_len = kwargs.get(\"window_len\", 100)\n self.scores_window = deque(maxlen=self.window_len)\n\n self.data_logger: Optional[DataLogger] = kwargs.get(\"data_logger\")\n self.logger.info(\"DataLogger: %s\", str(self.data_logger))\n\n def __str__(self) -> str:\n return f\"MultiSyncEnvRunner<{[t.name for t in self.tasks]}, {self.agent.model}>\"\n\n def __del__(self):\n try:\n self.close_all()\n except Exception:\n pass\n\n def reset(self):\n \"\"\"Resets the EnvRunner. The task env and the agent are preserved.\"\"\"\n self.episode = 0\n self.all_scores = []\n self.all_iterations = []\n self.scores_window = deque(maxlen=self.window_len)\n\n @staticmethod\n def step_task(conn, task):\n iteration = 0\n task.reset()\n while True:\n received = conn.recv()\n\n if received == \"STOP\":\n conn.close()\n return\n\n if received == \"RESET\":\n conn.send(task.reset())\n iteration = 0\n continue\n\n t_idx, state, action = received\n iteration += 1\n task_out = task.step(action)\n next_state, reward, done, _ = task_out\n\n conn.send(\n {\n \"idx\": t_idx,\n \"state\": state,\n \"action\": action,\n \"next_state\": next_state,\n \"reward\": reward,\n \"done\": done,\n \"iteration\": iteration,\n }\n )\n\n def init_network(self):\n for p_idx in range(self.num_processes):\n parent_conn, child_conn = mp.Pipe()\n self.parent_conns.append(parent_conn)\n self.child_conns.append(child_conn)\n\n process = mp.Process(target=self.step_task, args=(child_conn, self.tasks[p_idx]))\n self.processes.append(process)\n\n def run(\n self,\n reward_goal: float = 100.0,\n max_episodes: int = 2000,\n max_iterations: int = int(1e6),\n eps_start: float = 1.0,\n eps_end: float = 0.01,\n eps_decay: float = 0.995,\n log_episode_freq: int = 1,\n checkpoint_every=200,\n force_new=False,\n ):\n \"\"\"\n Evaluates the agent in the environment.\n The evaluation will stop when the agent reaches the `reward_goal` in the averaged last `self.window_len`, or\n when the number of episodes reaches the `max_episodes`.\n\n To help debugging one can set the `gif_every_episodes` to a positive integer which relates to how often a gif\n of the episode evaluation is written to the disk.\n\n Parameters:\n reward_goal: Goal to achieve on the average reward.\n max_episode: After how many episodes to stop regardless of the score.\n eps_start: Epsilon-greedy starting value.\n eps_end: Epislon-greeedy lowest value.\n eps_decay: Epislon-greedy decay value, eps[i+1] = eps[i] * eps_decay.\n log_episode_freq: Number of episodes between state logging.\n checkpoint_every: Number of episodes between storing the whole state, so that\n in case of failure it can be safely resumed from it.\n force_new: Flag whether to resume from previously stored state (False), or to\n start learning from a clean state (True).\n\n Returns:\n All obtained scores from all episodes.\n\n \"\"\"\n\n # This method is mainly a wrapper around self._run to make it safer.\n # Somehow better option might be to add decorators but given unsure existance\n # of this class we'll refrain from doing so right now.\n try:\n # Initiate all processes and connections\n self.init_network()\n\n return self._run(\n reward_goal,\n max_episodes,\n max_iterations,\n eps_start,\n eps_end,\n eps_decay,\n log_episode_freq,\n checkpoint_every,\n force_new,\n )\n\n finally:\n # All connections and processes need to be closed regardless of any exception.\n # Don't let get away. Who knows what zombie process are capable of?!\n self.close_all()\n\n def _run(\n self,\n reward_goal: float = 100.0,\n max_episodes: int = 2000,\n max_iterations: int = 1000,\n eps_start: float = 1.0,\n eps_end: float = 0.01,\n eps_decay: float = 0.995,\n log_episode_freq: int = 1,\n checkpoint_every=200,\n force_new=False,\n ):\n self.epsilon = eps_start\n self.reset()\n if not force_new:\n self.load_state(file_prefix=self.model_path)\n\n scores = np.zeros(self.task_num)\n iterations = np.zeros(self.task_num)\n\n observations = np.empty((self.num_processes, self.tasks[0].obs_size), dtype=np.float32)\n next_states = observations.copy()\n actions = np.empty((len(self.tasks), self.tasks[0].action_size), dtype=np.float32)\n dones = np.empty(len(self.tasks))\n rewards = np.empty(len(self.tasks))\n\n # Start processes just before using them\n for process in self.processes:\n process.start()\n\n for idx, conn in enumerate(self.parent_conns):\n conn.send(\"RESET\")\n reset_state = conn.recv()\n observations[idx] = reset_state\n\n mean_scores = []\n epsilons = []\n mean_score = -float(\"inf\")\n\n while self.episode < max_episodes:\n self.iteration += self.task_num\n iterations += 1\n\n experience = Experience(obs=observations)\n experience = self.agent.act(experience, self.epsilon)\n actions = experience.action if self.task_num != 1 else [experience.action]\n assert isinstance(actions, list), \"For many agents needs to be list\"\n experience.update(action=actions)\n\n for t_idx in range(self.task_num):\n self.parent_conns[t_idx].send((t_idx, observations[t_idx], actions[t_idx]))\n\n for t_idx in range(self.task_num):\n obj = self.parent_conns[t_idx].recv()\n\n idx = obj[\"idx\"]\n rewards[idx] = obj[\"reward\"]\n observations[idx] = obj[\"state\"]\n actions[idx] = obj[\"action\"]\n next_states[idx] = obj[\"next_state\"]\n dones[idx] = obj[\"done\"]\n\n iterations[idx] = obj[\"iteration\"]\n scores[idx] += obj[\"reward\"]\n\n # All recently evaluated SARS are passed at the same time\n experience.update(reward=rewards, obs=observations, action=actions, next_obs=next_states, done=dones)\n self.agent.step(experience)\n\n for idx in range(self.task_num):\n if not (dones[idx] or iterations[idx] >= max_iterations):\n continue\n\n self.parent_conns[idx].send(\"RESET\")\n next_states[idx] = self.parent_conns[idx].recv()\n\n self.scores_window.append(scores[idx])\n self.all_scores.append(scores[idx])\n scores[idx] = 0\n\n self.all_iterations.append(iterations[idx])\n\n self.episode += 1\n mean_score: float = sum(self.scores_window) / len(self.scores_window)\n mean_scores.append(mean_score)\n epsilons.append(self.epsilon)\n\n # Log only once per evaluation, and outside s\n if self.episode % log_episode_freq == 0:\n last_episodes = [self.episode - i for i in range(log_episode_freq)[::-1]]\n self.info(\n episodes=last_episodes,\n iterations=self.all_iterations[-log_episode_freq:],\n scores=self.all_scores[-log_episode_freq:],\n mean_scores=mean_scores[-log_episode_freq:],\n epsilons=epsilons[-log_episode_freq:],\n loss=self.agent.loss,\n )\n\n if self.episode % checkpoint_every == 0:\n self.save_state(self.model_path)\n\n observations = next_states\n\n self.epsilon = max(eps_end, eps_decay * self.epsilon)\n\n if mean_score >= reward_goal and len(self.scores_window) == self.window_len:\n print(f\"Environment solved after {self.episode} episodes!\\tAverage Score: {mean_score:.2f}\")\n self.save_state(self.model_path)\n self.agent.save_state(f\"{self.model_path}_agent.net\")\n break\n return self.all_scores\n\n def close_all(self):\n while self.parent_conns:\n self.parent_conns.pop(0).close()\n\n while self.child_conns:\n self.child_conns.pop(0).close()\n\n while self.processes:\n p = self.processes.pop(0)\n p.terminate()\n p.join()\n\n def info(self, **kwargs):\n \"\"\"\n Writes out current state into provided loggers.\n Writting to stdout is done through Python's logger, whereas all metrics\n are supposed to be handled via DataLogger. Currently supported are Tensorboard\n and Neptune (neptune.ai). To use one of these `data_logger` is expected.\n \"\"\"\n if self.data_logger is not None:\n self.log_episode_metrics(**kwargs)\n self.log_data_interaction(**kwargs)\n if self.logger is not None:\n self.log_logger(**kwargs)\n\n def log_logger(self, **kwargs):\n \"\"\"Writes out env logs via logger (either stdout or a file).\"\"\"\n episode = kwargs.get(\"episodes\")[-1]\n score = kwargs.get(\"scores\")[-1]\n iteration = kwargs.get(\"iterations\")[-1]\n loss = kwargs.get(\"loss\", {})\n mean_score = kwargs.get(\"mean_scores\")[-1]\n epsilon = kwargs.get(\"epsilons\")[-1]\n line_chunks = [f\"Episode {episode};\"]\n line_chunks += [f\"Iter: {iteration};\"]\n line_chunks += [f\"Current Score: {score:.2f};\"]\n line_chunks += [f\"Average Score: {mean_score:.2f};\"]\n line_chunks += [f\"{loss_name.capitalize()}: {loss_value:10.4f}\" for (loss_name, loss_value) in loss.items()]\n line_chunks += [f\"Epsilon: {epsilon:5.3f};\"]\n line = \"\\t\".join(line_chunks)\n self.logger.info(line.format(**kwargs))\n\n def log_episode_metrics(self, **kwargs):\n \"\"\"Uses data_logger, e.g. Tensorboard, to store env metrics.\"\"\"\n episodes: List[int] = kwargs.get(\"episodes\", [])\n for episode, epsilon in zip(episodes, kwargs.get(\"epsilons\", [])):\n self.data_logger.log_value(\"episode/epsilon\", epsilon, episode)\n\n for episode, mean_score in zip(episodes, kwargs.get(\"mean_scores\", [])):\n self.data_logger.log_value(\"episode/avg_score\", mean_score, episode)\n\n for episode, score in zip(episodes, kwargs.get(\"scores\", [])):\n self.data_logger.log_value(\"episode/score\", score, episode)\n\n for episode, iteration in zip(episodes, kwargs.get(\"iterations\", [])):\n self.data_logger.log_value(\"episode/iterations\", iteration, episode)\n\n def log_data_interaction(self, **kwargs):\n if self.data_logger and hasattr(self.agent, \"log_metrics\"):\n self.agent.log_metrics(self.data_logger, self.iteration, full_log=kwargs.get(\"full_log\", False))\n\n def save_state(self, state_name: str):\n \"\"\"Saves the current state of the runner and the agent.\n\n Files are stored with appended episode number.\n Agents are saved with their internal saving mechanism.\n \"\"\"\n state = {\n \"tot_iterations\": sum(self.all_iterations),\n \"episode\": self.episode,\n \"epsilon\": self.epsilon,\n \"score\": self.all_scores[-1],\n \"average_score\": sum(self.scores_window) / len(self.scores_window),\n \"loss\": self.agent.loss,\n }\n\n Path(self.state_dir).mkdir(parents=True, exist_ok=True)\n self.agent.save_state(f\"{self.state_dir}/{state_name}_e{self.episode}.agent\")\n with open(f\"{self.state_dir}/{state_name}_e{self.episode}.json\", \"w\") as f:\n json.dump(state, f)\n\n def load_state(self, file_prefix: str):\n \"\"\"\n Loads state with the highest episode value for given agent and environment.\n \"\"\"\n try:\n state_files = list(\n filter(lambda f: f.startswith(file_prefix) and f.endswith(\"json\"), os.listdir(self.state_dir))\n )\n recent_episode_num = max([int(f[f.index(\"_e\") + 2 : f.index(\".\")]) for f in state_files])\n state_name = [n for n in state_files if n.endswith(f\"_e{recent_episode_num}.json\")][0][:-5]\n except Exception:\n self.logger.warning(\"Couldn't load state. Forcing restart.\")\n return\n\n self.logger.info(\"Loading saved state under: %s/%s.json\", self.state_dir, state_name)\n with open(f\"{self.state_dir}/{state_name}.json\", \"r\") as f:\n state = json.load(f)\n self.episode = state.get(\"episode\")\n self.epsilon = state.get(\"epsilon\")\n\n self.all_scores.append(state.get(\"score\"))\n self.all_iterations = []\n\n avg_score = state.get(\"average_score\")\n for _ in range(min(self.window_len, self.episode)):\n self.scores_window.append(avg_score)\n\n self.logger.info(\"Loading saved agent state: %s/%s.agent\", self.state_dir, state_name)\n self.agent.load_state(f\"{self.state_dir}/{state_name}.agent\")\n self.agent.loss = state.get(\"loss\", 0)\n", "import math\nfrom functools import lru_cache\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Beta, Dirichlet, MultivariateNormal, Normal\nfrom torch.distributions.distribution import Distribution\n\nfrom ai_traineree.networks import NetworkType\nfrom ai_traineree.networks.bodies import FcNet\nfrom ai_traineree.types import FeatureType\n\n\nclass PolicyType(NetworkType):\n param_dim: int\n\n\nclass MultivariateGaussianPolicySimple(PolicyType):\n \"\"\"\n Multivariate Gaussian (Normal) Policy.\n\n Simplicity of this class, compared to `MultivariateGaussianPolicy`, is due to\n the assumption that the covariance is sample independent and it is a trainable\n parameter.\n \"\"\"\n\n param_dim = 1\n\n def __init__(\n self, size: int, std_init: float = 1.0, std_min: float = 0.1, std_max: float = 3.0, device=None, **kwargs\n ):\n \"\"\"\n Parameters:\n size: Size of the observation.\n batch_size: Expected size of batch. Helps in memory assignments.\n std_init: (default 2) Initial value for covariance's diagonal. All values start the same.\n std_min: Minimum value for standard deviation.\n std_max: Maximum value for standard deviation.\n device: Device where to allocate memory. CPU or CUDA.\n\n \"\"\"\n super(MultivariateGaussianPolicySimple, self).__init__()\n self.size = size\n self.dist = Normal if size == 1 else MultivariateNormal\n self.std_min = std_min\n self.std_max = std_max\n std_init = float(max(min(std_max, std_init), std_min))\n self.std = nn.Parameter(torch.full((self.size,), std_init, device=device))\n\n @staticmethod\n @lru_cache(maxsize=10)\n def _empty_std(batch_size: int, size: int, device):\n return torch.zeros((batch_size, size, size), device=device)\n\n @staticmethod\n @lru_cache(maxsize=10)\n def diag_idx(batch_size: int, size: int, device):\n return torch.arange(size, device=device).repeat((batch_size, 1, 1))\n\n def forward(self, x) -> Distribution:\n \"\"\"Returns distribution\"\"\"\n self.std.data = torch.clamp(self.std, self.std_min, self.std_max)\n if self.size == 1:\n return self.dist(x.view(-1, 1), scale=self.std.view(-1, 1))\n\n # Distinction here is primarily performance optimization (though it isn't too optimal)\n batch_size = x.shape[0]\n if len(x.shape) == 1 or x.shape[0] == 1:\n new_shape = (1, self.size, 1)\n idx = torch.arange(self.size, device=x.device).view(new_shape)\n std = self._empty_std(batch_size, self.size, x.device).scatter(-1, idx, self.std.repeat(new_shape))\n else:\n std = self.std.repeat((batch_size, 1, 1))\n std = self._empty_std(batch_size, self.size, x.device).scatter(\n 1, self.diag_idx(batch_size, self.size, x.device), std\n )\n return self.dist(x, scale_tril=std)\n\n def act(self, x):\n return x\n\n @staticmethod\n def log_prob(dist, samples) -> torch.Tensor:\n return dist.log_prob(samples)\n\n\nclass MultivariateGaussianPolicy(PolicyType):\n \"\"\"\n Multivariate Gaussian (Normal) Policy.\n\n In contrast to `MultivariateGaussianPolicySimple` it assumes that\n distribution's characteristics are estimated by the network rather\n than optimized by the optimizer.\n Both location and covariance are assumed to be inputs into the policy.\n\n \"\"\"\n\n param_dim = 2\n\n def __init__(self, size: int, std_init: float = 1.0, std_min: float = 0.1, std_max: float = 3.0, device=None):\n \"\"\"\n Parameters:\n size: Observation's dimensionality upon sampling.\n batch_size: Expected size of batch.\n device: Device where to allocate memory. CPU or CUDA.\n \"\"\"\n super(MultivariateGaussianPolicy, self).__init__()\n self.size = size\n self.dist = Normal if size == 1 else MultivariateNormal\n self.std_init = std_init\n self.std_min = std_min\n self.std_max = std_max\n\n @staticmethod\n @lru_cache(maxsize=10)\n def _empty_std(batch_size: int, size: int, device):\n return torch.zeros((batch_size, size, size), device=device)\n\n @staticmethod\n @lru_cache(maxsize=10)\n def diag_idx(batch_size: int, size: int, device):\n return torch.arange(size, device=device).repeat((batch_size, 1, 1)).view(batch_size, size, 1)\n\n def forward(self, x) -> Distribution:\n \"\"\"Returns distribution\"\"\"\n x = x.view(-1, self.param_dim, self.size)\n mu = x[:, 0]\n std = torch.clamp(x[:, 1], self.std_min, self.std_max).unsqueeze(-1)\n if self.size == 1:\n return self.dist(mu.view(-1, 1), scale=std.view(-1, 1))\n\n batch_size = x.shape[0]\n if x.shape[0] == 1:\n idx = torch.arange(self.size, device=x.device).view(1, self.size, 1)\n std = torch.zeros((1, self.size, self.size), device=x.device).scatter(-1, idx, std)\n else:\n std = self._empty_std(batch_size, self.size, x.device).scatter(\n -1, self.diag_idx(batch_size, self.size, x.device), std\n )\n return self.dist(mu, scale_tril=std)\n\n def act(self, x) -> torch.Tensor:\n \"\"\"Deterministic pass. Ignores covariance and returns locations directly.\"\"\"\n return x.view(-1, self.size, self.param_dim)[..., 0]\n\n @staticmethod\n def log_prob(dist, samples) -> torch.Tensor:\n return dist.log_prob(samples)\n\n\nclass GaussianPolicy(PolicyType):\n \"\"\"\n Univariate Gaussian (Normal) Distribution.\n Has two heads; one for location estimate and one for standard deviation.\n \"\"\"\n\n def __init__(self, in_features: FeatureType, out_features: FeatureType, out_scale: float = 1, **kwargs):\n \"\"\"\n Parameters:\n size: Observation's dimensionality upon sampling.\n\n \"\"\"\n super(GaussianPolicy, self).__init__()\n\n self.in_features = in_features\n self.out_features = out_features\n self.out_scale = out_scale\n\n hidden_layers = kwargs.get(\"hidden_layers\")\n self.dist = Normal\n self.mu = FcNet(in_features, out_features, hidden_layers=hidden_layers, **kwargs)\n self.log_std = FcNet(in_features, out_features, hidden_layers=hidden_layers, **kwargs)\n\n self.log_std_min = -10\n self.log_std_max = 2\n\n self._last_dist: Optional[Distribution] = None\n self._last_samples: Optional[torch.Tensor] = None\n\n @property\n def logprob(self) -> Optional[torch.Tensor]:\n if self._last_dist is None or self._last_samples is None:\n return None\n\n # *Note*: The note below is borrowed from the SpinningUp implementation.\n # Please return once not needed.\n # Compute logprob from Gaussian, and then apply correction for Tanh squashing.\n # NOTE: The correction formula is a little bit magic. To get an understanding\n # of where it comes from, check out the original SAC paper (arXiv 1801.01290)\n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n actions = self._last_samples\n logprob = self._last_dist.log_prob(actions).sum(axis=-1)\n logprob -= 2 * (math.log(2) - actions - F.softplus(-2 * actions)).sum(axis=1)\n return logprob.view(-1, 1)\n\n def forward(self, x, deterministic=False) -> torch.Tensor:\n mu = self.mu(x)\n log_std = self.log_std(x)\n log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)\n std = torch.exp(log_std)\n\n if deterministic:\n self._last_dist, self._last_samples = (None, None)\n return mu\n\n self._last_dist = dist = self.dist(mu, std)\n self._last_samples = actions = dist.rsample()\n return self.out_scale * torch.tanh(actions)\n\n\nclass BetaPolicy(PolicyType):\n \"\"\"\n Multivarate generalized version of the Dirichlet (1D) distribution.\n\n Uses torch.distributions.Beta or torch.distributions.Dirichlet\n distirubitions depending on the input size.\n\n https://pytorch.org/docs/stable/distributions.html#beta\n https://pytorch.org/docs/stable/distributions.html#dirichlet\n \"\"\"\n\n param_dim = 2\n\n def __init__(self, size: int, bounds: Tuple[float, float] = (1, float(\"inf\"))):\n \"\"\"\n Parameters:\n size: Observation's dimensionality upon sampling.\n bounds: Beta dist input clamp for both alpha and betas.\n Both concentration are expected to be larger than 1.\n\n \"\"\"\n super(BetaPolicy, self).__init__()\n self.bounds = bounds\n self.action_size = size\n self.dist = Beta if size > 1 else Dirichlet\n\n def forward(self, x) -> Distribution:\n x = x.view(-1, self.action_size, self.param_dim)\n x = torch.clamp(x, self.bounds[0], self.bounds[1])\n dist = self.dist(x[..., 0], x[..., 1])\n return dist\n\n @staticmethod\n def log_prob(dist, samples):\n return dist.log_prob(samples).mean(dim=-1)\n\n\nclass DirichletPolicy(PolicyType):\n\n param_dim = 1\n\n def __init__(self, *, alpha_min: float = 0.05):\n super(DirichletPolicy, self).__init__()\n self.alpha_min = alpha_min\n\n def forward(self, x) -> Distribution:\n x = torch.clamp(x, self.alpha_min)\n return Dirichlet(x)\n\n def log_prob(self, dist: Dirichlet, samples) -> torch.Tensor:\n return dist.log_prob(samples)\n\n\nclass DeterministicPolicy(PolicyType):\n\n param_dim = 1\n\n def __init__(self, action_size):\n super(DeterministicPolicy, self).__init__()\n self.action_size = action_size\n\n def forward(self, x):\n return x\n" ]
[ [ "torch.multiprocessing.Process", "torch.multiprocessing.Pipe", "numpy.empty", "numpy.zeros" ], [ "torch.zeros", "torch.nn.functional.softplus", "torch.arange", "torch.clamp", "torch.distributions.Dirichlet", "torch.full", "torch.tanh", "torch.exp" ] ]
retip94/konin-flats-scraper
[ "7db18da33b858532f698ce20f3a450df68cf6d63" ]
[ "scraper/main.py" ]
[ "import os\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom functools import partial, reduce\nimport time\nimport multiprocessing\nfrom collections import defaultdict\nfrom gatheringMethods import *\nfrom time import localtime, strftime\nimport jinja2\nimport ftplib\nimport random\nimport config_local as cfg\nfrom config import *\nfrom pathlib import Path\n\nwebs = {\n 'sobieraj': {'url': 'http://www.sobieraj-nieruchomosci.pl/', 'url_suffix': '', 'func': sobieraj_parse,\n 'pagination': False},\n 'florczyk': {'url': 'http://florczyk.nieruchomosci.pl/category/mieszkania/', 'url_suffix': '',\n 'func': florczyk_parse,\n 'pagination': False},\n 'abakus': {'url': \"http://abakus.konin.pl/mieszkania\", 'url_suffix': \"\", 'func': abakus_parse, 'pagination': False},\n 'invicus': {'url': \"http://invicus.pl/pl/ogloszenia-w-serwisie/4/mieszkania/\", 'url_suffix': \"\",\n 'func': invicus_parse, 'pagination': True},\n 'lider': {'url': \"http://www.liderkonin.pl/100-mieszkania\", 'url_suffix': \"\", 'func': lider_parse,\n 'pagination': False},\n 'tok': {'url': \"https://www.toknieruchomosci.com.pl/mieszkania\", 'url_suffix': \"\", 'func': tok_parse,\n 'pagination': False},\n 'aba': {\n 'url': \"https://www.abanieruchomosci.pl/szukaj-oferty.html?estate_type=Mieszkania&ad_type=Sprzeda%C5%BC&locality=Konin&searching=yes&page_index=0\",\n 'url_suffix': \"\", 'func': aba_parse,\n 'pagination': False},\n 'zaroda': {\n 'url': \"http://www.zaroda-nieruchomosci.pl/oferta/szukaj?search%5Blocalization%5D=konin&search%5Btransaction%5D=1&search%5Btypes%5D=1\",\n 'url_suffix': \"\", 'func': zaroda_parse,\n 'pagination': False},\n 'trado': {\n 'url': \"http://tradonieruchomosci.pl/wyniki-wyszukiwania/?property_location=any&property_type=mieszkania&title=konin&property_feature=na-sprzedaz&search_nonce=479c85afba\",\n 'url_suffix': \"\", 'func': trado_parse, 'pagination': False},\n 'lm': {'url': \"https://www.lm.pl/ogloszenia/lista/85/\", 'url_suffix': \"/32454206\", 'func': lm_parse,\n 'pagination': True},\n}\n\n\ndef main():\n starting_time = time.time()\n try:\n data = pd.DataFrame()\n for web in webs:\n data = pd.concat([data, scrap(web)], ignore_index=True, sort=False)\n if len(data):\n save_new_data(data)\n render_html()\n except Exception as e:\n print(e)\n logging.error(e)\n logging.info(\"Running time %ds using %d threads\" % ((time.time() - starting_time), THREADS_COUNT))\n\n\n# execute all the functions for one website and return data in dataframe\ndef scrap(web):\n pages_urls = prepare_pages_urls(web)\n init_results = iterate_urls(pages_urls, web, 1)\n # go to more detailed results of just new content\n new_content = extract_new_content(init_results)\n if new_content.empty:\n return pd.DataFrame()\n ads_urls = new_content['Url'].tolist()\n detailed_ads = iterate_urls(ads_urls, web, 2)\n results = new_content.merge(detailed_ads, on=\"Url\")\n return results\n\n\n# get last page from website (if there is any pagination) else return 1\ndef get_last_page(web):\n if not webs[web]['pagination']:\n return 1\n url = webs[web]['url'] + \"1\" + webs[web]['url_suffix']\n try:\n r = requests.get(url, headers=HEADERS, timeout=10)\n if r.status_code != 200:\n raise r.raise_for_status()\n soup = BeautifulSoup(r.content, features=\"lxml\")\n if web == 'lm':\n last_page = soup.find(class_='multicont_news_stronicowanie').find_all('a')[-2].get_text()\n elif web == 'invicus':\n last_page = soup.find(class_='perPage clear').find(class_='links').find_all('a')[-1].get_text()\n else:\n last_page = 1\n return int(last_page)\n except Exception as e:\n logging.info(e)\n return 1\n\n\n# prepare list of urls for each page (if there is any pagination)\ndef prepare_pages_urls(web):\n url = webs[web]['url']\n url_suffix = webs[web]['url_suffix']\n last_page = get_last_page(web)\n if last_page == 1:\n return [url + url_suffix]\n else:\n pages = range(1, last_page + 1)\n return list(map(lambda x: url + str(x) + url_suffix, pages))\n\n\n# using multiprocessing scrap all the urls from list (loop until all are done)\ndef iterate_urls(urls, web, step):\n request_timeout = INIT_REQUEST_TIMEOUT\n results = []\n # breaks when there are no failed requests or timeout rise to maxReqTime\n while len(urls) != 0 and request_timeout <= MAX_REQUEST_TIMEOUT:\n logging.info(web)\n if len(urls) > 8:\n p = multiprocessing.Pool(THREADS_COUNT)\n results += p.map(partial(get_page_soup, timeout=request_timeout, web=web, step=step), urls,\n chunksize=CHUNK_SIZE) # play with chunksize for better performance\n p.terminate()\n p.join()\n else:\n results += map(lambda x: get_page_soup(x, request_timeout, web, step), urls)\n results = [r for r in results if type(r) != str]\n # try again with failed results\n urls = [r for r in results if type(r) == str]\n request_timeout += 1\n return merge_to_dataframe(results)\n\n\n# listOfListsOfDicts -> Dataframe\ndef merge_to_dataframe(lists):\n lists = [x for x in lists if x is not None]\n if len(lists) > 0:\n return pd.DataFrame(reduce(lambda x, y: x + y, lists))\n else:\n return pd.DataFrame(\n {'Url': [], 'Nazwa': [], 'Telefon': [], 'Cena': [], 'Zdjecie': [], 'Powierzchnia': 0.0, 'Piętro': [],\n 'Tresc': [], 'Zrodlo': []})\n\n\n# scraping data from single page (webfunc states for the function to be used to gather data)\ndef get_page_soup(page_url, timeout, web, step):\n gather_method = webs[web]['func']\n logging.info(page_url)\n # in case requests fails\n try:\n r = requests.get(page_url, headers=HEADERS, timeout=timeout)\n if r.status_code != 200:\n raise r.raise_for_status()\n soup = BeautifulSoup(r.content, features=\"lxml\")\n return gather_method(web, soup, step, page_url)\n except Exception as e:\n logging.info(e)\n logging.info('failed...')\n return page_url\n\n\ndef extract_new_content(new_data):\n old_data = pd.read_pickle(DATABASE_FILE)\n new_content = get_new_lines(new_data, old_data)\n new_content = new_content.dropna(axis=1, how='all')\n return new_content\n\n\n# get just new content from scraped data and save it on top of CSV file\ndef save_new_data(new_data: pd.DataFrame):\n timestamp = strftime(\"%H:%M %d-%m-%Y\", localtime())\n timestamp_row = pd.DataFrame(\n {'Url': [''], 'Nazwa': [''], 'Telefon': [''], 'Cena': [0], 'Zdjecie': [''], 'Powierzchnia': 0.0, 'Piętro': [''],\n 'Tresc': ['---'], 'Zrodlo': [timestamp], 'Galeria': [['']]})\n old_data = pd.read_pickle(DATABASE_FILE)\n data = pd.concat([timestamp_row, new_data, old_data], ignore_index=True, sort=False)\n data.to_pickle(DATABASE_FILE)\n return data\n\n\n# Merge data delivered in form of list of lists of dictionaries [[], [], [{},{},{}], [{},{},{}], ...]\ndef merge_results(ads: list):\n whole_data = defaultdict(list)\n for r in list(filter(lambda x: len(x) > 0, ads)):\n for dic in r:\n if type(dic) == dict:\n for key, value in dic.items():\n whole_data[key].append(value)\n df = pd.DataFrame(whole_data)\n return df\n\n\ndef export_to_excel(path: Path, data: pd.DataFrame):\n path = path.joinpath('output.xlsx')\n os.system(\"TASKKILL /F /IM excel.exe\")\n writer = pd.ExcelWriter(str(path), engine=\"xlsxwriter\", date_format='dd.mmm.yyyy')\n data.to_excel(writer, sheet_name='Sheet1', index=False, freeze_panes=(1, 0))\n workbook = writer.book\n worksheet = writer.sheets['Sheet1']\n\n # format excel\n format1 = workbook.add_format({'num_format': '000 000 000', 'align': 'center'})\n worksheet.set_column('A:A', 20) # id\n worksheet.set_column('B:B', 15) # adres\n worksheet.set_column('C:C', 16) # kategoria\n worksheet.set_column('D:D', 12, format1) # telefon\n worksheet.set_column('E:E', 50) # link\n worksheet.set_column('F:F', 50) # link google\n writer.save()\n\n\ndef get_new_lines(df1, df2):\n df3 = pd.concat([df1, df2, df2], ignore_index=True, sort=False).drop_duplicates(subset=['Url'], keep=False)\n df4 = pd.concat([df3, df2, df2], ignore_index=True, sort=False).drop_duplicates(subset=['Nazwa'], keep=False)\n logging.info(f'{len(df4)} new lines')\n return df4\n\n\ndef render_html():\n data = pd.read_pickle(DATABASE_FILE)\n data = data.dropna(axis=0, how='all', thresh=None, subset=None, inplace=False)\n data = data.fillna(\"\")\n # calc square meter unit prices\n m2_price = []\n for i, r in data.iterrows():\n try:\n price = float(r['Cena'])\n area = float(r['Powierzchnia'])\n if area > 0:\n m2_price.append(int(price / area))\n else:\n m2_price.append(\"\")\n except ValueError:\n m2_price.append(\"\")\n data['CenaMetr'] = m2_price\n\n # limit to 270 chars\n data['Tresc'] = data['Tresc'].str[:270]\n\n # if str consist 'ynaj' its probably rent ad\n data = data[~data.Nazwa.str.contains('ynaj')]\n\n template_loader = jinja2.FileSystemLoader(INDEX_TEMPLATE_PATH)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template('index_template.html')\n rendered = template.render(data=data.to_dict('records'),\n version=random.randint(0, 10000)) # version to avoid getting css and js from cache\n with open(OUTPUT_INDEX_PATH, \"w\", encoding='utf-8') as fp:\n fp.write(rendered)\n\n\n# used when script was running on local machine and results were sent to the server\ndef export_html_by_ftp():\n try:\n logging.info('Connecting FTP...')\n session = ftplib.FTP(cfg.FTP_SERVER, cfg.FTP_LOGIN, cfg.FTP_PASSWORD)\n logging.info('''Connected\n Sending file...''')\n file = open(OUTPUT_INDEX_PATH, 'rb') # file to send\n session.storbinary(f'STOR {cfg.FTP_PATH}index.html', file, 102400) # send the file\n file.close() # close file and FTP\n session.quit()\n except Exception as e:\n logging.info(e)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_pickle", "pandas.DataFrame", "pandas.concat" ] ]
shreyaskar123/EHR-Discontinuity
[ "8d2becfd784b9cbe697f8308d60023701971ef5d", "8d2becfd784b9cbe697f8308d60023701971ef5d" ]
[ "EHR_Claims/GBT/Hemorrhage_SMOTE_EHR_CLAIMS_GBT.py", "EHR_Only/BART/Hemorrhage_SMOTE.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n\n\n\n# In[1]:\n\n\nimport pandas as pd\nmedicare = pd.read_csv(\"/netapp2/home/se197/data/CMS/Data/medicare.csv\")\n\n\n\ntrain_set = medicare[medicare.Hospital != 'BWH'] # MGH; n = 204014\nvalidation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither; n = 115726\nimport numpy as np\n\nfifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)\ntrain_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]\ntrain_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]\n\nvalidation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]\nvalidation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]\n\n\n# In[3]:\n\n\npredictor_variable = [\n 'Co_CAD_RC0', 'Co_Embolism_RC0', 'Co_DVT_RC0', 'Co_PE_RC0', 'Co_AFib_RC0',\n 'Co_Hypertension_RC0', 'Co_Hyperlipidemia_RC0', 'Co_Atherosclerosis_RC0',\n 'Co_HF_RC0', 'Co_HemoStroke_RC0', 'Co_IscheStroke_RC0', 'Co_OthStroke_RC0',\n 'Co_TIA_RC0', 'Co_COPD_RC0', 'Co_Asthma_RC0', 'Co_Pneumonia_RC0', 'Co_Alcoholabuse_RC0',\n 'Co_Drugabuse_RC0', 'Co_Epilepsy_RC0', 'Co_Cancer_RC0', 'Co_MorbidObesity_RC0',\n 'Co_Dementia_RC0', 'Co_Depression_RC0', 'Co_Bipolar_RC0', 'Co_Psychosis_RC0',\n 'Co_Personalitydisorder_RC0', 'Co_Adjustmentdisorder_RC0', 'Co_Anxiety_RC0',\n 'Co_Generalizedanxiety_RC0', 'Co_OldMI_RC0', 'Co_AcuteMI_RC0', 'Co_PUD_RC0',\n 'Co_UpperGIbleed_RC0', 'Co_LowerGIbleed_RC0', 'Co_Urogenitalbleed_RC0',\n 'Co_Othbleed_RC0', 'Co_PVD_RC0', 'Co_LiverDisease_RC0', 'Co_MRI_RC0',\n 'Co_ESRD_RC0', 'Co_Obesity_RC0', 'Co_Sepsis_RC0', 'Co_Osteoarthritis_RC0',\n 'Co_RA_RC0', 'Co_NeuroPain_RC0', 'Co_NeckPain_RC0', 'Co_OthArthritis_RC0',\n 'Co_Osteoporosis_RC0', 'Co_Fibromyalgia_RC0', 'Co_Migraine_RC0', 'Co_Headache_RC0',\n 'Co_OthPain_RC0', 'Co_GeneralizedPain_RC0', 'Co_PainDisorder_RC0',\n 'Co_Falls_RC0', 'Co_CoagulationDisorder_RC0', 'Co_WhiteBloodCell_RC0', 'Co_Parkinson_RC0',\n 'Co_Anemia_RC0', 'Co_UrinaryIncontinence_RC0', 'Co_DecubitusUlcer_RC0',\n 'Co_Oxygen_RC0', 'Co_Mammography_RC0', 'Co_PapTest_RC0', 'Co_PSATest_RC0',\n 'Co_Colonoscopy_RC0', 'Co_FecalOccultTest_RC0', 'Co_FluShot_RC0', 'Co_PneumococcalVaccine_RC0' , 'Co_RenalDysfunction_RC0', 'Co_Valvular_RC0', 'Co_Hosp_Prior30Days_RC0',\n 'Co_RX_Antibiotic_RC0', 'Co_RX_Corticosteroid_RC0', 'Co_RX_Aspirin_RC0', 'Co_RX_Dipyridamole_RC0',\n 'Co_RX_Clopidogrel_RC0', 'Co_RX_Prasugrel_RC0', 'Co_RX_Cilostazol_RC0', 'Co_RX_Ticlopidine_RC0',\n 'Co_RX_Ticagrelor_RC0', 'Co_RX_OthAntiplatelet_RC0', 'Co_RX_NSAIDs_RC0',\n 'Co_RX_Opioid_RC0', 'Co_RX_Antidepressant_RC0', 'Co_RX_AAntipsychotic_RC0', 'Co_RX_TAntipsychotic_RC0',\n 'Co_RX_Anticonvulsant_RC0', 'Co_RX_PPI_RC0', 'Co_RX_H2Receptor_RC0', 'Co_RX_OthGastro_RC0',\n 'Co_RX_ACE_RC0', 'Co_RX_ARB_RC0', 'Co_RX_BBlocker_RC0', 'Co_RX_CCB_RC0', 'Co_RX_Thiazide_RC0',\n 'Co_RX_Loop_RC0', 'Co_RX_Potassium_RC0', 'Co_RX_Nitrates_RC0', 'Co_RX_Aliskiren_RC0',\n 'Co_RX_OthAntihypertensive_RC0', 'Co_RX_Antiarrhythmic_RC0', 'Co_RX_OthAnticoagulant_RC0',\n 'Co_RX_Insulin_RC0', 'Co_RX_Noninsulin_RC0', 'Co_RX_Digoxin_RC0', 'Co_RX_Statin_RC0',\n 'Co_RX_Lipid_RC0', 'Co_RX_Lithium_RC0', 'Co_RX_Benzo_RC0', 'Co_RX_ZDrugs_RC0',\n 'Co_RX_OthAnxiolytic_RC0', 'Co_RX_Dementia_RC0', 'Co_RX_Hormone_RC0',\n 'Co_RX_Osteoporosis_RC0', 'Co_N_Drugs_RC0', 'Co_N_Hosp_RC0', 'Co_Total_HospLOS_RC0',\n 'Co_N_MDVisit_RC0', 'Co_RX_AnyAspirin_RC0', 'Co_RX_AspirinMono_RC0', 'Co_RX_ClopidogrelMono_RC0',\n 'Co_RX_AspirinClopidogrel_RC0', 'Co_RX_DM_RC0', 'Co_RX_Antipsychotic_RC0'\n]\n\nco_train_gpop = train_set[predictor_variable]\nco_train_high = train_set_high[predictor_variable]\nco_train_low = train_set_low[predictor_variable]\n\nco_validation_gpop = validation_set[predictor_variable]\nco_validation_high = validation_set_high[predictor_variable]\nco_validation_low = validation_set_low[predictor_variable]\n\n\n# In[4]:\n\n\nout_train_hemorrhage_gpop = train_set['Out_Hemorrhage_RC1']\nout_train_hemorrhage_high = train_set_high['Out_Hemorrhage_RC1']\nout_train_hemorrhage_low = train_set_low['Out_Hemorrhage_RC1']\n\nout_validation_hemorrhage_gpop = validation_set['Out_Hemorrhage_RC1']\nout_validation_hemorrhage_high = validation_set_high['Out_Hemorrhage_RC1']\nout_validation_hemorrhage_low = validation_set_low['Out_Hemorrhage_RC1']\n\n\n# In[5]:\n\n\n'''\nNOT USING THIS\nINSTEAD USING XGBOOST: A FASTER IMPLEMENTATION OF Gradient Boost \nhttps://github.com/dmlc/xgboost/tree/master/python-package\ndef GBT(X,y): \n from sklearn.model_selection import GridSearchCV\n from sklearn.ensemble import GradientBoostingRegressor\n from imblearn.over_sampling import SMOTE\n\n param_grid = [{\n 'learning_rate': [0.05,0.1,0.2],\n 'n_estimators': [100,150,200]\n }]\n \n boost_clf = GradientBoostingRegressor()\n boosting_grid_search = GridSearchCV(estimator = boost_clf, param_grid = param_grid)\n best_clf = boosting_grid_search.fit(X, y)\n return best_clf\n'''\n\n\n# In[6]:\n\n\ndef xgBoost(X,y):\n from xgboost import XGBClassifier\n from sklearn.model_selection import GridSearchCV\n model = XGBClassifier()\n param_grid = [{\n 'max_depth': [2,3],\n 'n_estimators': [60,160],\n }]\n grid_search = GridSearchCV(\n estimator=model,\n param_grid=param_grid,\n n_jobs = 10,\n cv = 5,\n verbose=True\n)\n best_clf = grid_search.fit(X,y)\n return best_clf\n\n\n# In[7]:\n\n\ndef scores(X,y):\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import f1_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import roc_auc_score \n from sklearn.metrics import log_loss\n\n pred = best_clf.predict(X)\n actual = y\n print(accuracy_score(actual,pred),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(f1_score(actual,pred),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(fbeta_score(actual,pred, average = 'macro', beta = 2),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(roc_auc_score(actual, best_clf.predict_proba(X)[:,1]),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(log_loss(actual,best_clf.predict_proba(X)[:,1]),file = open('hem_smote_gbt_ehrc.out', 'a'))\n\n\n# In[8]:\n\n\ndef cross_val(X,y):\n from sklearn.model_selection import KFold\n from sklearn.model_selection import cross_validate\n from sklearn.metrics import log_loss\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n import sklearn\n import numpy as np\n cv = KFold(n_splits=5, random_state=1, shuffle=True)\n log_loss = [] \n auc = [] \n accuracy = []\n f1 = []\n f2 = [] \n for train_index, test_index in cv.split(X):\n X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index]\n model = xgBoost(X_train, y_train)\n #prob = model.predict(X_test) # prob is a vector of probabilities \n #pred = np.round(model.predict(X_test)) # pred is the rounded predictions \n prob = model.predict_proba(X_test)[:,1] # prob is a vector of probabilities \n pred = np.round(model.predict_proba(X_test)[:,1]) # pred is the rounded predictions \n log_loss.append(sklearn.metrics.log_loss(y_test, prob))\n auc.append(sklearn.metrics.roc_auc_score(y_test, prob))\n accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))\n f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))\n f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))\n print(np.mean(accuracy),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(np.mean(f1),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(np.mean(f2),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(np.mean(auc),file = open('hem_smote_gbt_ehrc.out', 'a'))\n print(np.mean(log_loss),file = open('hem_smote_gbt_ehrc.out', 'a'))\n\n\n# # General Population\n\n# In[9]:\n\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_gpop_sm,out_train_hemorrhage_gpop_sm = sm.fit_resample(co_train_gpop,out_train_hemorrhage_gpop)\n\nbest_clf = xgBoost(co_train_gpop_sm, out_train_hemorrhage_gpop_sm)\n\ncross_val(co_train_gpop_sm, out_train_hemorrhage_gpop_sm)\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\nscores(co_train_gpop, out_train_hemorrhage_gpop)\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\nscores(co_validation_gpop, out_validation_hemorrhage_gpop)\n\n\n# In[ ]:\n\n\n\n\n\n# # High Continuity \n\n# In[10]:\n\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_high_sm,out_train_hemorrhage_high_sm = sm.fit_resample(co_train_high,out_train_hemorrhage_high)\nprint(\"high\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\nbest_clf = xgBoost(co_train_high_sm, out_train_hemorrhage_high_sm)\n\ncross_val(co_train_high_sm, out_train_hemorrhage_high_sm)\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\nscores(co_train_high, out_train_hemorrhage_high)\n\nprint()\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\nscores(co_validation_high, out_validation_hemorrhage_high)\n\n\n# # Low Continuity\n# \n\n# In[11]:\n\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_low_sm,out_train_hemorrhage_low_sm = sm.fit_resample(co_train_low,out_train_hemorrhage_low)\n\nbest_clf = xgBoost(co_train_low_sm, out_train_hemorrhage_low_sm)\nprint(\"low\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\ncross_val(co_train_low_sm, out_train_hemorrhage_low_sm)\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\n\nscores(co_train_low, out_train_hemorrhage_low)\n\nprint()\n\nprint(\"\",file = open('hem_smote_gbt_ehrc.out', 'a'))\nscores(co_validation_low, out_validation_hemorrhage_low)\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport pandas as pd\nmedicare = pd.read_csv(\"/netapp2/home/se197/data/CMS/Data/medicare.csv\")\n\n\n# In[4]:\n\n\ntrain_set = medicare[medicare.Hospital != 'BWH'] # MGH\nvalidation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither \nimport numpy as np\n\nfifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)\ntrain_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]\ntrain_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]\n\nvalidation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]\nvalidation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]\n\n\n# In[5]:\n\n\npredictor_variable = [\n 'Co_CAD_R0', 'Co_Embolism_R0', 'Co_DVT_R0', 'Co_PE_R0', 'Co_AFib_R0',\n 'Co_Hypertension_R0', 'Co_Hyperlipidemia_R0', 'Co_Atherosclerosis_R0',\n 'Co_HF_R0', 'Co_HemoStroke_R0', 'Co_IscheStroke_R0', 'Co_OthStroke_R0',\n 'Co_TIA_R0', 'Co_COPD_R0', 'Co_Asthma_R0', 'Co_Pneumonia_R0', 'Co_Alcoholabuse_R0',\n 'Co_Drugabuse_R0', 'Co_Epilepsy_R0', 'Co_Cancer_R0', 'Co_MorbidObesity_R0',\n 'Co_Dementia_R0', 'Co_Depression_R0', 'Co_Bipolar_R0', 'Co_Psychosis_R0',\n 'Co_Personalitydisorder_R0', 'Co_Adjustmentdisorder_R0', 'Co_Anxiety_R0',\n 'Co_Generalizedanxiety_R0', 'Co_OldMI_R0', 'Co_AcuteMI_R0', 'Co_PUD_R0',\n 'Co_UpperGIbleed_R0', 'Co_LowerGIbleed_R0', 'Co_Urogenitalbleed_R0',\n 'Co_Othbleed_R0', 'Co_PVD_R0', 'Co_LiverDisease_R0', 'Co_MRI_R0',\n 'Co_ESRD_R0', 'Co_Obesity_R0', 'Co_Sepsis_R0', 'Co_Osteoarthritis_R0',\n 'Co_RA_R0', 'Co_NeuroPain_R0', 'Co_NeckPain_R0', 'Co_OthArthritis_R0',\n 'Co_Osteoporosis_R0', 'Co_Fibromyalgia_R0', 'Co_Migraine_R0', 'Co_Headache_R0',\n 'Co_OthPain_R0', 'Co_GeneralizedPain_R0', 'Co_PainDisorder_R0',\n 'Co_Falls_R0', 'Co_CoagulationDisorder_R0', 'Co_WhiteBloodCell_R0', 'Co_Parkinson_R0',\n 'Co_Anemia_R0', 'Co_UrinaryIncontinence_R0', 'Co_DecubitusUlcer_R0',\n 'Co_Oxygen_R0', 'Co_Mammography_R0', 'Co_PapTest_R0', 'Co_PSATest_R0',\n 'Co_Colonoscopy_R0', 'Co_FecalOccultTest_R0', 'Co_FluShot_R0', 'Co_PneumococcalVaccine_R0', 'Co_RenalDysfunction_R0', 'Co_Valvular_R0', 'Co_Hosp_Prior30Days_R0',\n 'Co_RX_Antibiotic_R0', 'Co_RX_Corticosteroid_R0', 'Co_RX_Aspirin_R0', 'Co_RX_Dipyridamole_R0',\n 'Co_RX_Clopidogrel_R0', 'Co_RX_Prasugrel_R0', 'Co_RX_Cilostazol_R0', 'Co_RX_Ticlopidine_R0',\n 'Co_RX_Ticagrelor_R0', 'Co_RX_OthAntiplatelet_R0', 'Co_RX_NSAIDs_R0',\n 'Co_RX_Opioid_R0', 'Co_RX_Antidepressant_R0', 'Co_RX_AAntipsychotic_R0', 'Co_RX_TAntipsychotic_R0',\n 'Co_RX_Anticonvulsant_R0', 'Co_RX_PPI_R0', 'Co_RX_H2Receptor_R0', 'Co_RX_OthGastro_R0',\n 'Co_RX_ACE_R0', 'Co_RX_ARB_R0', 'Co_RX_BBlocker_R0', 'Co_RX_CCB_R0', 'Co_RX_Thiazide_R0',\n 'Co_RX_Loop_R0', 'Co_RX_Potassium_R0', 'Co_RX_Nitrates_R0', 'Co_RX_Aliskiren_R0',\n 'Co_RX_OthAntihypertensive_R0', 'Co_RX_Antiarrhythmic_R0', 'Co_RX_OthAnticoagulant_R0',\n 'Co_RX_Insulin_R0', 'Co_RX_Noninsulin_R0', 'Co_RX_Digoxin_R0', 'Co_RX_Statin_R0',\n 'Co_RX_Lipid_R0', 'Co_RX_Lithium_R0', 'Co_RX_Benzo_R0', 'Co_RX_ZDrugs_R0',\n 'Co_RX_OthAnxiolytic_R0', 'Co_RX_Barbiturate_R0', 'Co_RX_Dementia_R0', 'Co_RX_Hormone_R0',\n 'Co_RX_Osteoporosis_R0', 'Co_N_Drugs_R0', 'Co_N_Hosp_R0', 'Co_Total_HospLOS_R0',\n 'Co_N_MDVisit_R0', 'Co_RX_AnyAspirin_R0', 'Co_RX_AspirinMono_R0', 'Co_RX_ClopidogrelMono_R0',\n 'Co_RX_AspirinClopidogrel_R0', 'Co_RX_DM_R0', 'Co_RX_Antipsychotic_R0'\n]\n\n\nco_train_gpop = train_set[predictor_variable]\n \nco_train_high = train_set_high[predictor_variable]\n\nco_train_low = train_set_low[predictor_variable]\n\nco_validation_gpop = validation_set[predictor_variable]\nco_validation_gpop_split = np.array_split(co_validation_gpop, 5) \n\nco_validation_high = validation_set_high[predictor_variable]\nco_validation_high_split = np.array_split(co_validation_high, 5) \n\nco_validation_low = validation_set_low[predictor_variable]\nco_validation_low_split = np.array_split(co_validation_low, 5) \n\n\n# In[6]:\n\n\nout_train_hemorrhage_gpop = train_set['Out_Hemorrhage_RC1']\n\nout_train_hemorrhage_high = train_set_high['Out_Hemorrhage_RC1']\nout_train_hemorrhage_low = train_set_low['Out_Hemorrhage_RC1']\n\nout_validation_hemorrhage_gpop_split = [] \nout_validation_hemorrhage_gpop = validation_set['Out_Hemorrhage_RC1']\nfor parts in co_validation_gpop_split:\n out_validation_hemorrhage_gpop_split.append(out_validation_hemorrhage_gpop[parts.index])\n\nout_validation_hemorrhage_high_split = [] \nout_validation_hemorrhage_high = validation_set_high['Out_Hemorrhage_RC1']\nfor parts in co_validation_high_split:\n out_validation_hemorrhage_high_split.append(out_validation_hemorrhage_high[parts.index])\n\nout_validation_hemorrhage_low_split = [] \nout_validation_hemorrhage_low = validation_set_low['Out_Hemorrhage_RC1']\nfor parts in co_validation_low_split:\n out_validation_hemorrhage_low_split.append(out_validation_hemorrhage_low[parts.index])\n\n\n# In[7]:\n\n\ndef bart(X_train, y_train):\n from bartpy.sklearnmodel import SklearnModel\n from sklearn.model_selection import GridSearchCV\n from bartpy.data import Data\n from bartpy.sigma import Sigma\n param_grid = [{\n 'n_trees': [10,30,50] #\n }]\n model = SklearnModel()\n clf = GridSearchCV(estimator = model, param_grid = param_grid, n_jobs = 10, verbose = True)\n best_clf = clf.fit(X_train, y_train.to_numpy())\n print(best_clf)\n return best_clf \n\n\n# In[8]:\n\n\ndef scores(X_train,y_train, best_clf):\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import f1_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import roc_auc_score \n from sklearn.metrics import log_loss\n import numpy as np\n pred = np.round(best_clf.predict(X_train))\n print(pred)\n actual = y_train\n print(accuracy_score(actual,pred))\n print(f1_score(actual,pred))\n print(fbeta_score(actual,pred, average = 'macro', beta = 2))\n print(roc_auc_score(actual, best_clf.predict(X_train)))\n print(log_loss(actual,best_clf.predict(X_train)))\n\n\n# In[9]:\n\n\ndef cross_val(X,y,Or_X, Or_y):\n from sklearn.model_selection import KFold\n from sklearn.model_selection import cross_validate\n from sklearn.metrics import log_loss\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n import sklearn\n import numpy as np\n cv = KFold(n_splits=5, random_state=1, shuffle=True)\n log_loss = [] \n auc = [] \n accuracy = [] \n f1 = [] \n f2 = [] \n iter = 0\n for train_index, test_index in cv.split(X):\n \n X_train, X_test, y_train, y_test = X.iloc[train_index], Or_X[iter], y.iloc[train_index], Or_y[iter]\n iter = iter + 1\n model = bart(X_train, y_train)\n prob = model.predict(X_test) # prob is a vector of probabilities \n print(prob)\n pred = np.round(prob) # pred is the rounded predictions \n \n log_loss.append(sklearn.metrics.log_loss(y_test, prob))\n auc.append(sklearn.metrics.roc_auc_score(y_test, prob))\n accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))\n f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))\n f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))\n print(np.mean(accuracy),file = open('hrtemh_smote__ehr.out', 'a'))\n print(np.mean(f1),file = open('hrtemh_smote__ehr.out', 'a'))\n print(np.mean(f2),file = open('hrtemh_smote__ehr.out', 'a'))\n print(np.mean(auc),file = open('hrtemh_smote__ehr.out', 'a'))\n print(np.mean(log_loss),file = open('hrtemh_smote__ehr.out', 'a'))\n#co_train_gpop_sm,out_train_hemorrhage_gpop_sm, co_validation_gpop_split, out_validation_hemorrhage_gpop_split\n\n\n# In[10]:\n\n\n\nprint(\"Gpop\",file = open('hrtemh_smote__ehr.out', 'a')) \nimport datetime\nbegin_time = datetime.datetime.now()\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_gpop_sm,out_train_hemorrhage_gpop_sm = sm.fit_resample(co_train_gpop,out_train_hemorrhage_gpop)\n\nbest_clf = bart(co_train_gpop_sm,out_train_hemorrhage_gpop_sm)\n\ncross_val(co_train_gpop_sm,out_train_hemorrhage_gpop_sm, co_validation_gpop_split, out_validation_hemorrhage_gpop_split)\n\n\n# In[11]:\n\n\nscores(co_train_gpop, out_train_hemorrhage_gpop, best_clf)\n\nprint(\"\",file = open('hrtemh_smote__ehr.out', 'a')) \n\nscores(co_validation_gpop,out_validation_hemorrhage_gpop, best_clf)\n\n\n# In[12]:\n\n\nimport datetime\nbegin_time = datetime.datetime.now()\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_low_sm,out_train_hemorrhage_low_sm = sm.fit_resample(co_train_low,out_train_hemorrhage_low)\n\nbest_clf = bart(co_train_low_sm,out_train_hemorrhage_low_sm)\n\ncross_val(co_train_low_sm,out_train_hemorrhage_low_sm, co_validation_low_split, out_validation_hemorrhage_low_split)\n\nprint()\n\nscores(co_train_low, out_train_hemorrhage_low, best_clf)\n\nprint() \n\nscores(co_validation_low,out_validation_hemorrhage_low, best_clf)\n\n\nprint(datetime.datetime.now() - begin_time)\n\n\n# In[13]:\n\n\nimport datetime\nbegin_time = datetime.datetime.now()\n\nfrom imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 42)\nco_train_high_sm,out_train_hemorrhage_high_sm = sm.fit_resample(co_train_high,out_train_hemorrhage_high)\n\nbest_clf = bart(co_train_high_sm,out_train_hemorrhage_high_sm)\n\ncross_val(co_train_high_sm,out_train_hemorrhage_high_sm, co_validation_high_split, out_validation_hemorrhage_high_split)\n\nprint()\n\nscores(co_train_high, out_train_hemorrhage_high, best_clf)\n\nprint() \n\nscores(co_validation_high,out_validation_hemorrhage_high, best_clf)\n\n\nprint(datetime.datetime.now() - begin_time)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.percentile", "numpy.mean", "sklearn.metrics.accuracy_score", "sklearn.model_selection.KFold", "sklearn.metrics.fbeta_score", "sklearn.metrics.log_loss", "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.metrics.f1_score", "sklearn.metrics.roc_auc_score" ], [ "numpy.round", "numpy.percentile", "numpy.mean", "sklearn.model_selection.KFold", "sklearn.metrics.accuracy_score", "sklearn.metrics.fbeta_score", "sklearn.metrics.log_loss", "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.metrics.f1_score", "sklearn.metrics.roc_auc_score", "numpy.array_split" ] ]
robinupham/gaussian_cl_likelihood
[ "91fb635f9360340555eb0c920925f37fda69a8a5" ]
[ "python/like_cl_wishart.py" ]
[ "\"\"\"\nLikelihood module to evaluate the joint likelihood of a set of tomographic 3x2pt power spectra on the full sky\nusing the exact Wishart likelihood.\n\nThe main functions are setup, which should be called once per analysis, and execute, which is called for every new\npoint in parameter space.\n\"\"\"\n\nimport os.path\nimport numpy as np\nimport scipy.stats as stats\n\n\ndef cl_matrix(cls_, n_fields):\n \"\"\"\n Convert a sequence of power spectra into a sequence of Cl matrices.\n\n Args:\n cls_ (2D numpy array): Cls to reshape, with input shape (n_spectra, n_ell).\n n_fields (int): Number of fields, such that n_spectra = n_fields * (n_fields + 1) / 2.\n\n Returns:\n 3D numpy array: Cls with shape (n_ell, n_fields, n_fields).\n \"\"\"\n\n cls_ = np.asarray(cls_)\n n_spectra = n_fields * (n_fields + 1) / 2.\n assert cls_.shape[0] == n_spectra, f'cls_.shape is {cls_.shape}; n_spectra is {n_spectra}'\n\n # Create output array\n n_ells = cls_.shape[1]\n cls_matrices = np.zeros((n_ells, n_fields, n_fields))\n\n # Rotate axes of input so it is now indexed (l index, spectrum index)\n cls_ = cls_.T\n\n # Fill matrices as appropriate\n for l_idx in range(n_ells):\n start_idx = 0\n for diag in range(n_fields):\n stop_idx = start_idx + n_fields - diag\n\n # Add the relevant diagonal, including to lower half for diag > 0\n cls_matrices[l_idx] += np.diag(cls_[l_idx, start_idx:stop_idx], k=diag)\n if diag > 0:\n cls_matrices[l_idx] += np.diag(cls_[l_idx, start_idx:stop_idx], k=-diag)\n\n start_idx = stop_idx\n\n return cls_matrices\n\n\ndef cl_vector(cl_matrices):\n \"\"\"\n The inverse of cl_matrix: takes a sequence of Cl matrices and returns a vector of power spectra.\n\n Args:\n cl_matrices (3D numpy array): Cls to reshape into a vector, with input shape (n_ell, n_fields, n_fields).\n\n Returns:\n 2D numpy array: Cls with shape (n_spectra, n_ell), with n_spectra = n_fields * (n_fields + 1) / 2.\n \"\"\"\n\n # Check input and form output array\n n_ells, n_fields, n_fields_test = cl_matrices.shape\n assert n_fields == n_fields_test\n n_cls = int(n_fields * (n_fields + 1) / 2)\n res = np.full((n_cls, n_ells), np.nan)\n\n # Extract each matrix in turn\n for l_idx, cl_mat in enumerate(cl_matrices):\n start_idx = 0\n for diag in range(n_fields):\n stop_idx = start_idx + n_fields - diag\n res[start_idx:stop_idx, l_idx] = np.diag(cl_mat, k=diag)\n start_idx = stop_idx\n\n # Check every element has been filled\n assert np.all(np.isfinite(res))\n return res\n\n\ndef log_likelihood_single_l(l, theory_cl_matrix, noise_cl_matrix, obs_cl_matrix):\n \"\"\"\n Returns the log-likelihood of a set of Cls for a single l according to the Wishart distribution.\n\n Args:\n l (int): Single l value.\n theory_cl_matrix (2D numpy array): Matrix of theory Cls for this l.\n noise_cl_matrix (2D numpy array): Matrix of noise Cls for this l.\n obs_cl_matrix (2D numpy array): Matrix of observed Cls for this l.\n\n Returns:\n float: Log-likelihood value.\n \"\"\"\n\n # Wishart parameters\n nu = 2 * l + 1\n scale = (theory_cl_matrix + noise_cl_matrix) * 1. / nu\n\n # Wishart is only defined for df >= size of scale\n if nu < scale.shape[0]:\n return 0\n\n return stats.wishart.logpdf(obs_cl_matrix, df=nu, scale=scale)\n\n\ndef joint_log_likelihood(ells, theory_cl_matrices, noise_cl_matrices, obs_cl_matrices, lmax):\n \"\"\"\n Return the joint log-likelihood of a whole observed 3x2pt data vector.\n\n Args:\n ells (1D numpy array): All l values.\n theory_cl_matrices (3D numpy array): Theory Cl matrices, shape (n_ell, n_spectra, n_spectra).\n noise_cl_matrices (3D numpy array): Noise Cl matrices, shape (n_ell, n_spectra, n_spectra).\n obs_cl_matrices (3D numpy array): Observed Cl matrices, shape (n_ell, n_spectra, n_spectra).\n lmax (int): Maximum l to include in the likelihood.\n\n Returns:\n float: Log-likelihood value.\n \"\"\"\n\n log_like = 0\n for i, l in enumerate(ells[ells <= lmax]):\n log_like += log_likelihood_single_l(l, theory_cl_matrices[i], noise_cl_matrices[i], obs_cl_matrices[i])\n\n return log_like\n\n\ndef is_even(x):\n \"\"\"\n True if x is even, false otherwise.\n\n Args:\n x (float): Number to test.\n\n Returns:\n bool: True if even.\n \"\"\"\n return x % 2 == 0\n\n\ndef is_odd(x):\n \"\"\"\n True if x is odd, false otherwise.\n\n Args:\n x (float): Number to test.\n\n Returns:\n bool: True if odd.\n \"\"\"\n return x % 2 == 1\n\n\ndef load_cls(n_zbin, pos_pos_dir, she_she_dir, pos_she_dir, lmax=None, lmin=0):\n \"\"\"\n Given the number of redshift bins and relevant directories, load power spectra (position, shear, cross) in the\n correct order (diagonal / healpy new=True ordering).\n If lmin is supplied, the output will be padded to begin at l=0.\n\n Args:\n n_zbin (int): Number of redshift bins.\n pos_pos_dir (str): Path to directory containing position-position power spectra.\n she_she_dir (str): Path to directory containing shear-shear power spectra.\n pos_she_dir (str): Path to directory containing position-shear power spectra.\n lmax (int, optional): Maximum l to load - if not supplied, will load all lines, which requires the individual\n lmax of each file to be consistent.\n lmin (int, optional): Minimum l supplied. Output will be padded with zeros below this point.\n\n Returns:\n 2D numpy array: All Cls, with different spectra along the first axis and increasing l along the second.\n \"\"\"\n\n # Calculate number of fields assuming 1 position field and 1 shear field per redshift bin\n n_field = 2 * n_zbin\n\n # Load power spectra in 'diagonal order'\n spectra = []\n for diag in range(n_field):\n for row in range(n_field - diag):\n col = row + diag\n\n # Determine whether position-position, shear-shear or position-shear by whether the row and column are even,\n # odd or mixed\n if is_even(row) and is_even(col):\n cl_dir = pos_pos_dir\n elif is_odd(row) and is_odd(col):\n cl_dir = she_she_dir\n else:\n cl_dir = pos_she_dir\n\n # Extract the bins: for pos-pos and she-she the higher bin index goes first, for pos-she pos goes first\n bins = (row // 2 + 1, col // 2 + 1)\n if cl_dir in (pos_pos_dir, she_she_dir):\n bin1 = max(bins)\n bin2 = min(bins)\n else:\n if is_even(row): # even means pos\n bin1, bin2 = bins\n else:\n bin2, bin1 = bins\n\n cl_path = os.path.join(f'bin_{bin1}_{bin2}.txt')\n\n # Load with appropriate ell range\n max_rows = None if lmax is None else (lmax - lmin + 1)\n spec = np.concatenate((np.zeros(lmin), np.loadtxt(cl_path, max_rows=max_rows)))\n spectra.append(spec)\n\n return np.asarray(spectra)\n\n\ndef setup(n_zbin, obs_pos_pos_dir, obs_she_she_dir, obs_pos_she_dir, pos_nl_path, she_nl_path, noise_ell_path, lmax,\n leff_path=None):\n \"\"\"\n Load and precompute everything that is fixed throughout parameter space. This should be called once per analysis,\n prior to any calls to execute.\n\n Args:\n n_zbin (int): Number of redshift bins. It will be assumed that there is one position field and one shear field\n per redshift bin.\n obs_pos_pos_dir (str): Path to the directory containing the observed position-position power spectra.\n obs_she_she_dir (str): Path to the directory containing the observed shear-shear power spectra.\n obs_pos_she_dir (str): Path to the directory containing the observed position-shear power spectra.\n pos_nl_path (str): Path to the position noise power spectrum.\n she_nl_path (str): Path to the shear noise power spectrum.\n noise_ell_path (str): Path to the file containing the ells for the noise power spectra.\n lmax (int): Maximum l to use in the likelihood.\n leff_path (str, optional): Path to ell-ell_effective mapping, to replace each l with its corresponding l_eff\n when calculating the covariance.\n\n Returns:\n dict: Config dictionary to pass to execute.\n \"\"\"\n\n # Calculate number of fields assuming 2 per redshift bin\n n_fields = 2 * n_zbin\n\n # Load obs Cls & ells\n obs_cls = load_cls(n_zbin, obs_pos_pos_dir, obs_she_she_dir, obs_pos_she_dir)\n obs_ell_pos = np.loadtxt(os.path.join(obs_pos_pos_dir, 'ell.txt'))\n obs_ell_she = np.loadtxt(os.path.join(obs_she_she_dir, 'ell.txt'))\n obs_ell_shp = np.loadtxt(os.path.join(obs_pos_she_dir, 'ell.txt'))\n\n # Do some consistency checks within the obs Cls and ells\n assert np.allclose(obs_ell_pos, obs_ell_she)\n assert np.allclose(obs_ell_pos, obs_ell_shp)\n assert np.all([len(spec) == len(obs_ell_pos) for spec in obs_cls])\n\n # Load noise Cls & ells\n pos_nl = np.loadtxt(pos_nl_path)\n she_nl = np.loadtxt(she_nl_path)\n noise_ell = np.loadtxt(noise_ell_path)\n\n # Do some consistency checks within the noise Cls and ells\n assert len(noise_ell) == len(pos_nl)\n assert len(noise_ell) == len(she_nl)\n\n # Force consistent ell range between obs and noise Cls\n lmin = np.amax((np.amin(obs_ell_pos), np.amin(noise_ell)))\n obs_ell_trimmed = obs_ell_pos[(obs_ell_pos >= lmin) & (obs_ell_pos <= lmax)]\n obs_cls = obs_cls[:, (obs_ell_pos >= lmin) & (obs_ell_pos <= lmax)]\n noise_ell_trimmed = noise_ell[(noise_ell >= lmin) & (noise_ell <= lmax)]\n pos_nl = pos_nl[(noise_ell >= lmin) & (noise_ell <= lmax)]\n she_nl = she_nl[(noise_ell >= lmin) & (noise_ell <= lmax)]\n assert np.allclose(obs_ell_trimmed, noise_ell_trimmed)\n\n # Convert obs Cls and noise Cls to matrices\n obs_cl_matrices = cl_matrix(obs_cls, n_fields)\n\n # Convert noise Cls to matrices (all diagonal)\n nl_nonzero = np.array([pos_nl, she_nl]*n_zbin)\n n_cls = int(n_fields * (n_fields + 1) / 2)\n nl_zero = np.zeros((n_cls - n_fields, len(noise_ell_trimmed)))\n nl = np.concatenate((nl_nonzero, nl_zero))\n noise_cl_matrices = cl_matrix(nl, n_fields)\n\n # If a path to a leff mapping is provided, load it to get leff\n if leff_path:\n leff_map = np.loadtxt(leff_path)\n leff_map = leff_map[(leff_map[:, 0] >= lmin) & (leff_map[:, 0] <= lmax)]\n assert np.allclose(leff_map[:, 0], obs_ell_trimmed)\n leff = leff_map[:, 1]\n leff_max = np.amax(leff) + 1e-5 # to avoid floating point error when comparing\n else:\n leff = None\n leff_max = None\n\n # Form config dictionary\n config = {\n 'ells': obs_ell_trimmed,\n 'obs_cl_matrices': obs_cl_matrices,\n 'noise_cl_matrices': noise_cl_matrices,\n 'lmax': lmax,\n 'n_fields': n_fields,\n 'leff': leff,\n 'leff_max': leff_max\n }\n\n return config\n\n\ndef execute(theory_ells, theory_cls, config):\n \"\"\"\n Perform some consistency checks then evaluate the likelihood for particular theory Cls.\n\n Args:\n theory_ell (1D numpy array): Ell range for all of the theory spectra (must be consistent between spectra).\n theory_cl (2D numpy array): Theory power spectra, in diagonal ordering, with shape (n_spectra, n_ell).\n config (dict): Config dictionary returned by setup.\n\n Returns:\n float: log-likelihood value.\n \"\"\"\n\n # Pull fixed (model Cl-independent) parameters from config\n ells = config['ells']\n obs_cl_matrices = config['obs_cl_matrices']\n noise_cl_matrices = config['noise_cl_matrices']\n lmin = np.amax((np.amin(theory_ells), np.amin(ells)))\n lmax = config['lmax']\n n_fields = config['n_fields']\n leff = config['leff']\n leff_max = config['leff_max']\n\n # Convert theory Cls into matrices\n theory_cl_matrices = cl_matrix(theory_cls, n_fields)\n\n # Force the two ell ranges to match (or throw error)\n lmin = np.amax((np.amin(theory_ells), np.amin(ells)))\n ell_keep = ells >= lmin\n ells_trimmed = ells[ell_keep]\n if len(ells_trimmed) < len(ells):\n obs_cl_matrices = obs_cl_matrices[ell_keep]\n noise_cl_matrices = noise_cl_matrices[ell_keep]\n theory_ells_trimmed = theory_ells[(theory_ells >= lmin) & (theory_ells <= lmax)]\n theory_cl_matrices = theory_cl_matrices[(theory_ells >= lmin) & (theory_ells <= lmax)]\n assert np.allclose(ells_trimmed, theory_ells_trimmed)\n\n # Apply leff mapping\n if leff is not None:\n ells_trimmed = leff\n lmax = leff_max\n\n # Evaluate the likelihood\n return joint_log_likelihood(ells_trimmed, theory_cl_matrices, noise_cl_matrices, obs_cl_matrices, lmax)\n" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.array", "numpy.asarray", "numpy.zeros", "numpy.allclose", "numpy.loadtxt", "numpy.amax", "numpy.isfinite", "numpy.amin", "scipy.stats.wishart.logpdf", "numpy.diag" ] ]
akutkin/SACA
[ "b866b6d9465310d4cd5bb4d2e92595d918b681d0" ]
[ "vlbi_errors/model_bllac_ra.py" ]
[ "import numpy as np\nfrom uv_data import UVData\nfrom components import ModelImageComponent\nfrom model import Model\nfrom from_fits import create_model_from_fits_file\nfrom utils import mas_to_rad\nfrom stats import LnLikelihood\nfrom spydiff import import_difmap_model\nfrom scipy.optimize import minimize, fmin\n\n\n# uv_file = '/home/ilya/github/bck/jetshow/uvf/0716+714_raks01xg_C_LL_0060s_uva.fits'\nuv_file = '/home/ilya/github/bck/jetshow/uvf/2200+420_K_SVLBI.uvf'\nuvdata_ext = UVData(uv_file)\nuvdata_orig = UVData(uv_file)\n# clean_difmap('2200+420_K_SVLBI.uvf', 'bllac_cc.fits', 'I', (8192, 0.0035),\n# path='/home/ilya/github/bck/jetshow/uvf/',\n# path_to_script='/home/ilya/github/vlbi_errors/difmap/final_clean_nw',\n# show_difmap_output=True)\ncomps = import_difmap_model('/home/ilya/github/bck/jetshow/uvf/ell_c_ell.mdl')\next_model = Model(stokes='I')\next_model.add_component(comps[-1])\n# cc_fits = '/home/ilya/github/vlbi_errors/vlbi_errors/bllac_cc.fits'\n# fig = uvdata_ext.uvplot()\n# ccmodel = create_model_from_fits_file(cc_fits)\n# ccmodel.filter_components_by_r(r_max_mas=0.15)\nuvdata_ext.substitute([ext_model])\nuvdata_core = uvdata_orig - uvdata_ext\n# uvdata_core.save('/home/ilya/github/vlbi_errors/vlbi_errors/bllac_core.uvf')\n\n# Set up ModelImage component\nimage = '/home/ilya/github/bck/jetshow/cmake-build-debug/map_i.txt'\nimage = np.loadtxt(image)\nimsize = 1734\nimsize = (imsize, imsize)\nmas_in_pix = 0.00253\ny, z = np.meshgrid(np.arange(imsize[0]), np.arange(imsize[1]))\ny = y - imsize[0] / 2. + 0.5\nz = z - imsize[0] / 2. + 0.5\ny_mas = y * mas_in_pix\nz_mas = z * mas_in_pix\ny_rad = mas_to_rad * y_mas\nz_rad = mas_to_rad * z_mas\nimage[image < 0] = 0\nimage[image > 10.0] = 0\nimage[image < np.percentile(image[image > 0].ravel(), 90)] = 0\nicomp = ModelImageComponent(image, y_rad[0, :], z_rad[:, 0])\nmodel = Model(stokes='I')\nmodel.add_component(icomp)\nuv = uvdata_core.uv\n\nlnlik = LnLikelihood(uvdata_core, model, average_freq=True, amp_only=False)\n\nimport emcee\n\n\ndef lnprior(p):\n if not 1.0 < p[0] < 5.0:\n return -np.inf\n if not -20 < p[1] < 20:\n return -np.inf\n if not -20 < p[2] < 20:\n return -np.inf\n if not 0.1 < p[3] < 2.0:\n return -np.inf\n if not 0.0 < p[4] < 2*np.pi:\n return -np.inf\n return 0.0\n\n\ndef lnpost(p):\n lp = lnprior(p)\n if not np.isfinite(lp):\n return -np.inf\n else:\n return lnlik(p) + lp\n\n\np0 = [1.0, 0.0, 0.0, 1.0, 3.14]\nfrom emcee.utils import sample_ball\nndim = 5\nnwalkers = 24\np = sample_ball(p0, [0.2, 3, 3, 0.2, 0.5], nwalkers)\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, threads=4)\npos, prob, state = sampler.run_mcmc(p, 20)\nprint(\"Reseting sampler\")\nsampler.reset()\npos, lnp, _ = sampler.run_mcmc(pos, 50)\n\n # for angle in np.linspace(0, 2*np.pi, 12):\n# print(angle, lnlik(np.array([1., 0, 0, 1., angle])))\n#\n# from hyperopt import hp, fmin, tpe, STATUS_OK, Trials\n# import hyperopt\n#\n#\n# def objective(space):\n# neglnlik = -lnlik(np.array([space['flux'], space['x'], space['y'], space['scale'], space['angle']]))\n# print(\"Negative lnlike: {}\".format(neglnlik))\n# return {'loss': neglnlik, 'status': STATUS_OK}\n#\n#\n# space = {'flux': hp.loguniform('flux', -0.69, 2.0),\n# 'x': hp.uniform('x', -20, 20),\n# 'y': hp.uniform('y', -20, 20),\n# 'scale': hp.loguniform('scale', -2.3, 0.69),\n# 'angle': hp.uniform('angle', 0, 2*np.pi)}\n#\n# trials = Trials()\n# best = fmin(fn=objective,\n# space=space,\n# algo=tpe.suggest,\n# max_evals=300,\n# trials=trials)\n#\n# print(hyperopt.space_eval(space, best))\n\n\n# p_ml = fmin(lambda p: -lnlik(p), model.p)\n\n# # TODO: Implement analitical grad of likelihood (it's gaussian)\n# fit = minimize(lambda p: -lnlik(p), model.p, method='L-BFGS-B',\n# options={'factr': 10**12, 'eps': 0.2, 'disp': True},\n# bounds=[(0.5, 2), (-20, 20), (-20, 20), (0.5, 2),\n# (2.4, 2.9)])\n# if fit['success']:\n# print(\"Succesful fit!\")\n# p_ml = fit['x']\n# print(p_ml)\n# fig.savefig('/home/ilya/github/bck/jetshow/uvf_mf_adds/ra.png',\n# bbox_inches='tight', dpi=300)" ]
[ [ "numpy.loadtxt", "numpy.arange", "numpy.isfinite" ] ]
Axeln78/ogb
[ "c7a3f06d1c9d1d506e52467ed07ec9dcdbc2b583" ]
[ "ogb/io/read_graph_raw.py" ]
[ "import pandas as pd\nimport os.path as osp\nimport os\nimport numpy as np\nfrom ogb.utils.url import decide_download, download_url, extract_zip\nfrom tqdm import tqdm\n\n### reading raw files from a directory.\ndef read_csv_graph_raw(raw_dir, add_inverse_edge = True, additional_node_files = [], additional_edge_files = []):\n '''\n raw_dir: path to the raw directory\n add_inverse_edge (bool): whether to add inverse edge or not\n\n return: graph_list, which is a list of graphs.\n Each graph is a dictionary, containing edge_index, edge_feat, node_feat, and num_nodes\n edge_feat and node_feat are optional: if a graph does not contain it, we will have None.\n\n additional_node_files and additional_edge_files must be in the raw directory.\n \n the name should be {additional_node_file, additional_edge_file}.csv.gz\n the length should be num_nodes or num_edges\n\n \n '''\n\n print('Loading necessary files...')\n # loading necessary files\n try:\n edge = pd.read_csv(osp.join(raw_dir, \"edge.csv.gz\"), compression=\"gzip\", header = None).values.T.astype(np.int64) # (2, num_edge) numpy array\n num_node_list = pd.read_csv(osp.join(raw_dir, \"num-node-list.csv.gz\"), compression=\"gzip\", header = None).values.T[0].astype(np.int64).tolist() # (num_graph, ) python list\n num_edge_list = pd.read_csv(osp.join(raw_dir, \"num-edge-list.csv.gz\"), compression=\"gzip\", header = None).values.T[0].astype(np.int64).tolist() # (num_edge, ) python list\n except:\n raise RuntimeError(\"No necessary file\")\n\n try:\n node_feat = pd.read_csv(osp.join(raw_dir, \"node-feat.csv.gz\"), compression=\"gzip\", header = None).values\n if 'int' in str(node_feat.dtype):\n node_feat = node_feat.astype(np.int64)\n else:\n # float\n node_feat = node_feat.astype(np.float32)\n except:\n node_feat = None\n\n try:\n edge_feat = pd.read_csv(osp.join(raw_dir, \"edge-feat.csv.gz\"), compression=\"gzip\", header = None).values\n if 'int' in str(edge_feat.dtype):\n edge_feat = edge_feat.astype(np.int64)\n else:\n #float\n edge_feat = edge_feat.astype(np.float32)\n\n except:\n edge_feat = None\n\n\n additional_node_info = {} \n for additional_file in additional_node_files:\n temp = pd.read_csv(osp.join(raw_dir, additional_file + \".csv.gz\"), compression=\"gzip\", header = None).values\n if 'int' in str(temp.dtype):\n additional_node_info[additional_file] = temp.astype(np.int64)\n else:\n # float\n additional_node_info[additional_file] = temp.astype(np.float32)\n\n additional_edge_info = {} \n for additional_file in additional_edge_files:\n temp = pd.read_csv(osp.join(raw_dir, additional_file + \".csv.gz\"), compression=\"gzip\", header = None).values\n if 'int' in str(temp.dtype):\n additional_edge_info[additional_file] = temp.astype(np.int64)\n else:\n # float\n additional_edge_info[additional_file] = temp.astype(np.float32)\n\n\n graph_list = []\n num_node_accum = 0\n num_edge_accum = 0\n\n print('Processing graphs...')\n for num_node, num_edge in tqdm(zip(num_node_list, num_edge_list), total=len(num_node_list)):\n\n graph = dict()\n\n ### handling edge\n if add_inverse_edge:\n ### duplicate edge\n duplicated_edge = np.repeat(edge[:, num_edge_accum:num_edge_accum+num_edge], 2, axis = 1)\n duplicated_edge[0, 1::2] = duplicated_edge[1,0::2]\n duplicated_edge[1, 1::2] = duplicated_edge[0,0::2]\n\n graph[\"edge_index\"] = duplicated_edge\n\n if edge_feat is not None:\n graph[\"edge_feat\"] = np.repeat(edge_feat[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)\n else:\n graph[\"edge_feat\"] = None\n\n for key, value in additional_edge_info.items():\n graph[key] = np.repeat(value[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)\n\n else:\n graph[\"edge_index\"] = edge[:, num_edge_accum:num_edge_accum+num_edge]\n\n if edge_feat is not None:\n graph[\"edge_feat\"] = edge_feat[num_edge_accum:num_edge_accum+num_edge]\n else:\n graph[\"edge_feat\"] = None\n\n for key, value in additional_edge_info.items():\n graph[key] = value[num_edge_accum:num_edge_accum+num_edge]\n\n num_edge_accum += num_edge\n\n ### handling node\n if node_feat is not None:\n graph[\"node_feat\"] = node_feat[num_node_accum:num_node_accum+num_node]\n else:\n graph[\"node_feat\"] = None\n\n for key, value in additional_node_info.items():\n graph[key] = value[num_node_accum:num_node_accum+num_node]\n\n\n graph[\"num_nodes\"] = num_node\n num_node_accum += num_node\n\n graph_list.append(graph)\n\n return graph_list\n\n\n\nif __name__ == \"__main__\":\n pass\n\n\n" ]
[ [ "numpy.repeat" ] ]
savannahwild/colony-com
[ "55164b51b8ec3fc2023f29dc14ab387de5cd3159" ]
[ "examples/example_simple.py" ]
[ "from plate import Plate\nfrom species import Species\nimport numpy as np\nimport helper_functions\n\n\ndef main():\n ## experimental parameters\n D = 3E-3 # nutrient diffusion coeff (#mm2/min)\n rho_n = 0.3 # consumption rate of nutrients by X\n rc = 6E-3 # growth rate of X on N\n Dc = 1E-5 # cell diffusion coefficient\n w = 1\n Da = 0.03\n rho_A = 0.1 # production rate of AHL\n\n environment_size = (20, 20)\n plate = Plate(environment_size)\n\n ## add nutrient to the plate\n U_N = np.ones(environment_size)\n N = Species(\"N\", U_N)\n def N_behaviour(species, params):\n ## unpack params\n D, rho_n, Dc, rc, w = params\n n = D * helper_functions.ficks(species['N'], w) - rho_n * species['N'] * species['X']\n return n\n N.set_behaviour(N_behaviour)\n plate.add_species(N)\n\n ## add one strain to the plate\n U_X = np.zeros(environment_size)\n X_pos = [[10., 10.]]\n X_radius = 0.75\n X_coordinates = helper_functions.get_node_coordinates(X_pos,\n X_radius,\n environment_size[0],\n environment_size[1],\n w)\n rows = X_coordinates[:, 0]\n cols = X_coordinates[:, 1]\n U_X[rows, cols] = 0.001\n strain = Species(\"X\", U_X)\n def X_behaviour(species, params):\n ## unpack params\n D, rho_n, Dc, rc, w = params\n x = Dc * helper_functions.ficks(species['X'], w) + rc * species['N'] * species['X']\n return x\n strain.set_behaviour(X_behaviour)\n plate.add_species(strain)\n\n ## run the experiment\n params = (D, rho_n, Dc, rc, w)\n sim = plate.run(t_final = 1000,\n dt = .1,\n params = params)\n\n ## plotting\n tp = np.arange(0, 18, 2)\n plate.plot_simulation(sim, tp)\n\nmain()" ]
[ [ "numpy.ones", "numpy.arange", "numpy.zeros" ] ]
gwaygenomics/2018_05_30_ResistanceMechanisms_Kapoor
[ "62d31c133fbf02954c28cade32319b37ccc0f359" ]
[ "0.generate-profiles/scripts/nbconverted/normalize-flawed-plate-design.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Correct for flawed plate design\n# \n# We collected many plates in batches 9 and 10, but unfortunately, we applied only one perturbation per plate. This prevents us from using our standard per-plate normalization strategy.\n# \n# We perform a different normalization strategy here.\n\n# In[1]:\n\n\nimport pathlib\nimport numpy as np\nimport pandas as pd\n\nimport statsmodels.api as sm\nfrom sklearn import decomposition\nfrom statsmodels.formula.api import ols\n\nimport plotnine as gg\n\nfrom pycytominer import normalize\nfrom pycytominer.cyto_utils import infer_cp_features\n\n\n# In[2]:\n\n\ndef test_batch_effect_contribution(df, n_components, pca_columns, model_formula):\n features = infer_cp_features(df)\n meta_features = infer_cp_features(df, metadata=True)\n\n feature_df = df.loc[:, features]\n\n pca = decomposition.PCA(n_components=n_components).fit(feature_df)\n pca_batch_df = pca.transform(feature_df)\n\n pca_batch_df = pd.concat(\n [\n df.loc[:, meta_features],\n pd.DataFrame(pca_batch_df, columns=pca_columns),\n ], axis=\"columns\"\n )\n \n melt_df = pd.melt(\n pca_batch_df,\n id_vars=meta_features,\n value_vars=pca_columns,\n var_name=\"pca_component\",\n value_name=\"pca_value\"\n )\n \n anova_results = []\n for pca_component in pca_columns:\n subset_melt_df = melt_df.query(\"pca_component == @pca_component\")\n\n # Setup model\n model = ols(model_formula, data=subset_melt_df).fit()\n\n # Generate ANOVA table\n anova_table = (\n sm.stats.anova_lm(model, typ=2)\n .reset_index()\n .rename({\"index\": \"factor\"}, axis=\"columns\")\n .assign(pca=pca_component)\n )\n anova_results.append(anova_table)\n\n anova_results = pd.concat(anova_results).reset_index(drop=True).dropna()\n anova_results = anova_results.assign(neg_log_p=-np.log10(anova_results.loc[:, \"PR(>F)\"]), batch=batch)\n\n anova_results.pca = pd.Categorical(anova_results.pca, categories=pca_columns)\n anova_results = anova_results.assign(component_number=[int(x.split(\"_\")[1]) for x in anova_results.pca])\n return anova_results\n\n\n# In[3]:\n\n\n# Load profiles\nprofile_dir = pathlib.Path(\"profiles/\")\nstandard_file_suffix = \"_normalized.csv.gz\"\n\nprofile_data = {\n \"2019_11_22_Batch7\": {\n \"plates\": [],\n \"file_suffix\": standard_file_suffix\n },\n \"2020_07_02_Batch8\": {\n \"plates\": [],\n \"file_suffix\": standard_file_suffix\n },\n \"2020_08_24_Batch9\": {\n \"plates\": [],\n \"file_suffix\": standard_file_suffix\n },\n \"2020_09_08_Batch10\": {\n \"plates\": [],\n \"file_suffix\": standard_file_suffix\n },\n}\n\nfor batch in profile_data:\n batch_dir = pathlib.Path(f\"{profile_dir}/{batch}\")\n \n for plate_file in batch_dir.iterdir():\n plate_name = plate_file.name\n \n if \".DS_Store\" in plate_name:\n continue\n \n file_suffix = profile_data[batch][\"file_suffix\"]\n plate_file = pathlib.Path(f\"{plate_file}/{plate_name}{file_suffix}\")\n plate_df = pd.read_csv(plate_file)\n\n plate_df = (\n plate_df\n .assign(\n Metadata_batch=batch,\n Metadata_clone_type=\"Resistant\",\n Metadata_profile_number=[\n f\"profile_{plate_name}_{batch}_{x}\" for x in range(0, plate_df.shape[0])\n ]\n )\n )\n \n plate_df.loc[plate_df.Metadata_clone_number.str.contains(\"WT\"), \"Metadata_clone_type\"] = \"Sensitive\"\n\n profile_data[batch][\"plates\"].append(plate_df)\n \n# Combine profiles\nfor batch in profile_data:\n profile_data[batch][\"plates\"] = pd.concat(profile_data[batch][\"plates\"]).reset_index(drop=True)\n\n\n# In[4]:\n\n\n# Detect the impact of batch - is it necessary to adjust?\nn_components = 20\npca_columns = [f\"pca_{x}\" for x in range(0, n_components)]\nmodel_formula = \"pca_value ~ Metadata_clone_number + Metadata_clone_type + Metadata_treatment + Metadata_Plate + Metadata_treatment * Metadata_Plate\"\n\nanova_results_full = []\nfor batch in profile_data:\n df = profile_data[batch][\"plates\"]\n anova_results = test_batch_effect_contribution(\n df, n_components, pca_columns, model_formula\n )\n anova_results_full.append(anova_results)\n \nanova_results_full_df = pd.concat(anova_results_full).reset_index(drop=True)\n\nprint(anova_results_full_df.shape)\nanova_results_full_df.head()\n\n\n# In[5]:\n\n\nnormalized_gg = (\n gg.ggplot(anova_results_full_df, gg.aes(x=\"component_number\", y=\"F\", color=\"factor\")) +\n gg.geom_point(gg.aes(size=\"neg_log_p\")) +\n gg.geom_line() +\n gg.theme_bw() +\n gg.xlab(\"PCA Component\") +\n gg.ylab(\"ANOVA F Statistic\") +\n gg.facet_wrap(\"~batch\") +\n gg.theme(strip_background=gg.element_rect(colour=\"black\", fill=\"#fdfff4\")) +\n gg.ggtitle(\"Existing Normalized Profiles\")\n)\n\noutput_file = pathlib.Path(\"figures_batch_effect/normalized_pca_anova_batch_effects.png\")\nnormalized_gg.save(output_file, height=4, width=6)\n\nnormalized_gg\n\n\n# ## Try combining \"matched\" plates before normalization\n\n# In[6]:\n\n\nfile_suffix = \"_augmented.csv.gz\"\n\nmatched_plates = {\n \"2020_08_24_Batch9\": {\n \"CB5083\": {\n \"plates\": [\"218775\", \"218774\", \"218697\", \"218696\"],\n \"data\": []\n },\n \"ixazomib\": {\n \"plates\": [\"218699\", \"218698\"],\n \"data\":[]\n },\n },\n \"2020_09_08_Batch10\": {\n \"CB5083\": {\n \"plates\": [\"218852\", \"218853\", \"218856\", \"218857\"],\n \"data\": []\n },\n \"ixazomib\": {\n \"plates\": [\"218854\", \"218855\", \"218858\", \"218859\"],\n \"data\": []\n }\n }\n}\n\nfor batch in matched_plates:\n compound_matches = matched_plates[batch]\n for compound in compound_matches:\n batch_dir = pathlib.Path(f\"{profile_dir}/{batch}\")\n \n for plate_name in compound_matches[compound][\"plates\"]:\n if \".DS_Store\" in plate_name:\n continue\n\n plate_file = pathlib.Path(f\"{batch_dir}/{plate_name}/{plate_name}{file_suffix}\")\n matched_plates[batch][compound][\"data\"].append(pd.read_csv(plate_file).assign(Metadata_batch=batch))\n\n# Combine profiles\nfor batch in matched_plates:\n compound_matches = matched_plates[batch]\n for compound in compound_matches:\n matched_plates[batch][compound][\"data\"] = (\n pd.concat(matched_plates[batch][compound][\"data\"])\n .reset_index(drop=True)\n )\n\n\n# In[7]:\n\n\n# Normalize profiles\nfor batch in matched_plates:\n compound_matches = matched_plates[batch]\n for compound in compound_matches:\n df = matched_plates[batch][compound][\"data\"]\n normalized_data = normalize(\n profiles=df,\n features=\"infer\",\n meta_features=\"infer\",\n samples=\"all\",\n method=\"standardize\"\n )\n \n matched_plates[batch][compound][\"normalized_data\"] = normalized_data\n\n\n# In[8]:\n\n\n# Detect the impact of batch - is it necessary to adjust?\nn_components = 20\npca_columns = [f\"pca_{x}\" for x in range(0, n_components)]\nmodel_formula = \"pca_value ~ Metadata_clone_number + Metadata_treatment + Metadata_Plate + Metadata_treatment * Metadata_Plate\"\n\nanova_results_full_new_normalized = []\nfor batch in matched_plates:\n compound_matches = matched_plates[batch]\n for compound in compound_matches:\n df = matched_plates[batch][compound][\"normalized_data\"]\n \n anova_results = test_batch_effect_contribution(\n df, n_components, pca_columns, model_formula\n )\n \n anova_results_full_new_normalized.append(anova_results.assign(compound=compound))\n \nanova_results_full_new_normalized_df = pd.concat(anova_results_full_new_normalized).reset_index(drop=True)\n\nprint(anova_results_full_new_normalized_df.shape)\nanova_results_full_new_normalized_df.head()\n\n\n# In[9]:\n\n\nnaive_attempt_gg = (\n gg.ggplot(\n anova_results_full_new_normalized_df,\n gg.aes(x=\"component_number\", y=\"F\", color=\"factor\")\n ) +\n gg.geom_point(gg.aes(size=\"neg_log_p\")) +\n gg.geom_line() +\n gg.theme_bw() +\n gg.xlab(\"PCA Component\") +\n gg.ylab(\"ANOVA F Statistic\") +\n gg.facet_grid(\"compound~batch\") +\n gg.theme(strip_background=gg.element_rect(colour=\"black\", fill=\"#fdfff4\")) +\n gg.ggtitle(\"Concatenate Augmented and Standardize\")\n)\n\noutput_file = pathlib.Path(\"figures_batch_effect/naive_correction_pca_anova_batch_effects.png\")\nnaive_attempt_gg.save(output_file, height=4, width=6)\n\nnaive_attempt_gg\n\n" ]
[ [ "pandas.DataFrame", "pandas.Categorical", "pandas.concat", "numpy.log10", "pandas.melt", "sklearn.decomposition.PCA", "pandas.read_csv" ] ]
OpenGridMap/power-grid-detection
[ "221fcf0461dc869c8c64b11fa48596f83c20e1c8" ]
[ "utils/img/helpers.py" ]
[ "import os\n\nimport numpy as np\nfrom PIL import Image\nfrom shapely.geometry import Polygon, MultiPolygon\n\nimport config\nfrom utils.dataset.annotations import annotations_iter, get_rect_from_annotation\nfrom utils.geo.coordinate import Coordinate\n\n\ndef crop_rect(im_src, x, y, width, height, dest_path=None):\n try:\n box = get_coord_from_rect_box(x, y, width, height)\n im = Image.new(im_src.mode, (width, height))\n cropped_region = im_src.crop(box)\n im.paste(cropped_region, (0, 0))\n\n if dest_path is not None:\n if not os.path.exists(dest_path):\n im.save(dest_path, 'JPEG')\n else:\n print('%s already exists' % dest_path)\n return im\n except Exception as e:\n print(e)\n raise e\n\n\ndef get_coord_from_rect_box(x, y, width, height):\n return map(int, [x, y, x + width, y + height])\n\n\ndef get_polygon_from_rect_box(x, y, width, height):\n return Polygon([\n (x, y),\n (x + width, y),\n (x + width, y + height),\n (x, y + height)\n ])\n\n\ndef get_polygon_from_coord(x1, y1, x2, y2):\n return get_polygon_from_rect_box(x1, y1, x2 - x1, y2 - y1)\n\n\ndef crop_annotated_region(im_src, annotation, path):\n x, y, width, height = get_rect_from_annotation(annotation)\n crop_rect(im_src, x, y, width, height, path)\n\n\ndef crop_negative_samples(im_src, annotations, samples_per_image, basename, samples_dir):\n boxes = [get_rect_from_annotation(annotation) for annotation in annotations_iter(annotations)]\n annotated_regions = get_multipolygon_from_boxes(boxes)\n\n z = int(basename.split('_')[-1])\n tiles_count = Coordinate.get_tiles_count(z)\n n_samples = 0\n\n # width, height = boxes[0][-2:]\n width, height = 140, 140\n\n while True:\n x, y = np.random.randint(0, im_src.size[0], (2,))\n rect = get_polygon_from_rect_box(x, y, width, height)\n\n if not rect.intersects(annotated_regions):\n if 0 <= x <= tiles_count[0] * 256 - width and 0 <= y <= tiles_count[1] * 256 - height:\n path = os.path.join(samples_dir, '%s_%d.jpg' % (basename, n_samples))\n crop_rect(im_src, x, y, width, height, path)\n n_samples += 1\n\n boxes.append([x, y, width, height])\n annotated_regions = get_multipolygon_from_boxes(boxes)\n\n if n_samples >= samples_per_image:\n break\n\n\ndef get_multipolygon_from_boxes(boxes):\n return MultiPolygon([get_polygon_from_rect_box(*box) for box in boxes])\n\n\ndef crop_positive_sample_windows(im_src, annotation, basename, window_res=(48, 48), step_size=12):\n positive_samples_dir = os.path.join(config.positive_samples_dir)\n # positive_samples_dir = os.path.join(config.positive_samples_dir, str(window_res[0]))\n x, y, width, height = get_rect_from_annotation(annotation)\n\n if not os.path.exists(positive_samples_dir):\n os.makedirs(positive_samples_dir)\n\n if width < window_res[0]:\n diff = window_res[0] - width\n\n width_offset = diff\n x_offset = - diff / 2\n\n width += width_offset\n x += x_offset\n\n if x + width > im_src.size[0]:\n diff = x + width - im_src.size[0]\n x -= diff\n\n if x < 0:\n diff = x\n x -= diff\n\n if height < window_res[1]:\n diff = window_res[1] - height\n\n height_offset = diff\n y_offset = - diff / 2\n\n height += height_offset\n y += y_offset\n\n if y + height > im_src.size[1]:\n diff = y + height - im_src.size[1]\n y -= diff\n\n if y < 0:\n diff = y\n y -= diff\n\n im = crop_rect(im_src, x, y, width, height)\n\n for x_w, y_w, img in sliding_window(np.asarray(im), window_res, step_size):\n if img.shape[0] == window_res[0] and img.shape[1] == window_res[1]:\n filename = '%s_%d_%d.jpg' % (basename, x_w, y_w)\n path = os.path.join(positive_samples_dir, filename)\n\n im_window = Image.fromarray(img)\n im_window.save(path, 'JPEG')\n\n\ndef crop_negative_sample_windows(im_src, annotations, basename, samples_per_image, window_res=(48, 48)):\n negative_samples_dir = os.path.join(config.negative_samples_dir)\n # negative_samples_dir = os.path.join(config.negative_samples_dir, str(window_res[0]))\n boxes = [get_rect_from_annotation(annotation) for annotation in annotations_iter(annotations)]\n annotated_regions = MultiPolygon([get_polygon_from_rect_box(*box) for box in boxes])\n n_samples = 0\n\n if not os.path.exists(negative_samples_dir):\n os.makedirs(negative_samples_dir)\n\n while True:\n x, y = np.random.randint(0, im_src.size[0], (2,))\n rect = get_polygon_from_rect_box(x, y, *window_res)\n\n if not rect.intersects(annotated_regions) and 0 <= x <= 768 - window_res[0] and 0 <= y <= 768 - window_res[0]:\n path = os.path.join(negative_samples_dir, '%s_%d.jpg' % (basename, n_samples))\n crop_rect(im_src, x, y, window_res[0], window_res[1], path)\n n_samples += 1\n\n if n_samples >= samples_per_image:\n break\n\n\ndef sliding_window(image, window_res, step_size):\n # slide a window across the image\n for y in range(0, image.shape[0], step_size):\n for x in range(0, image.shape[1], step_size):\n # yield the current window\n yield (x, y, image[y:y + window_res[1], x:x + window_res[0]])\n" ]
[ [ "numpy.random.randint", "numpy.asarray" ] ]
virajkanwade/spyder
[ "fee3df9d922f4ff66103218f54bc41d99e2fae53" ]
[ "spyder/plugins/plots/widgets/tests/test_plots_widgets.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright © Spyder Project Contributors\n#\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n# -----------------------------------------------------------------------------\n\n\"\"\"\nTests for the widgets used in the Plots plugin.\n\"\"\"\n\n# Standard library imports\nimport os.path as osp\nfrom unittest.mock import Mock\n\n# Third party imports\nimport pytest\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport numpy as np\nfrom qtpy.QtWidgets import QApplication, QStyle\nfrom qtpy.QtGui import QPixmap\nfrom qtpy.QtCore import Qt\n\n# Local imports\nfrom spyder.plugins.plots.widgets.figurebrowser import (FigureBrowser,\n FigureThumbnail)\n\n\n# =============================================================================\n# ---- Fixtures\n# =============================================================================\[email protected]\ndef figbrowser(qtbot):\n \"\"\"An empty figure browser widget fixture.\"\"\"\n figbrowser = FigureBrowser()\n figbrowser.set_shellwidget(Mock())\n figbrowser.setup(mute_inline_plotting=True, show_plot_outline=False,\n auto_fit_plotting=False)\n qtbot.addWidget(figbrowser)\n figbrowser.show()\n figbrowser.setMinimumSize(700, 500)\n return figbrowser\n\n\n# =============================================================================\n# ---- Helper functions\n# =============================================================================\ndef create_figure(figname):\n \"\"\"Create a matplotlib figure, save it to disk and return its data.\"\"\"\n # Create and save to disk a figure with matplotlib.\n fig = Figure()\n canvas = FigureCanvasAgg(fig)\n ax = fig.add_axes([0.15, 0.15, 0.7, 0.7])\n fig.set_size_inches(6, 4)\n ax.plot(np.random.rand(10), '.', color='red')\n fig.savefig(figname)\n\n # Read back and return the binary data from the file.\n with open(figname, \"rb\") as img:\n fig = img.read()\n return fig\n\n\ndef add_figures_to_browser(figbrowser, nfig, tmpdir, fmt='image/png'):\n \"\"\"\n Create and add bitmap figures to the figure browser. Also return a list\n of the created figures data.\n \"\"\"\n fext = '.svg' if fmt == 'image/svg+xml' else '.png'\n figs = []\n for i in range(nfig):\n figname = osp.join(str(tmpdir), 'mplfig' + str(i) + fext)\n figs.append(create_figure(figname))\n figbrowser._handle_new_figure(figs[-1], fmt)\n\n assert len(figbrowser.thumbnails_sb._thumbnails) == nfig\n assert figbrowser.thumbnails_sb.get_current_index() == nfig - 1\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[-1]\n assert figbrowser.figviewer.figcanvas.fig == figs[-1]\n\n return figs\n\n\ndef png_to_qimage(png):\n \"\"\"Return a QImage from the raw data of a png image.\"\"\"\n qpix = QPixmap()\n qpix.loadFromData(png, 'image/png'.upper())\n return qpix.toImage()\n\n\n# =============================================================================\n# ---- Tests\n# =============================================================================\[email protected]\[email protected](\"fmt, fext\",\n [('image/png', '.png'), ('image/svg+xml', '.svg')])\ndef test_handle_new_figures(figbrowser, tmpdir, fmt, fext):\n \"\"\"\n Test that the figure browser widget display correctly new figures in\n its viewer and thumbnails scrollbar.\n \"\"\"\n assert len(figbrowser.thumbnails_sb._thumbnails) == 0\n assert figbrowser.thumbnails_sb.current_thumbnail is None\n assert figbrowser.figviewer.figcanvas.fig is None\n\n for i in range(3):\n figname = osp.join(str(tmpdir), 'mplfig' + str(i) + fext)\n fig = create_figure(figname)\n figbrowser._handle_new_figure(fig, fmt)\n assert len(figbrowser.thumbnails_sb._thumbnails) == i + 1\n assert figbrowser.thumbnails_sb.get_current_index() == i\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == fig\n assert figbrowser.figviewer.figcanvas.fig == fig\n\n\[email protected](\"fmt, fext\",\n [('image/png', '.png'),\n ('image/svg+xml', '.svg'),\n ('image/svg+xml', '.png')])\ndef test_save_figure_to_file(figbrowser, tmpdir, mocker, fmt, fext):\n \"\"\"\n Test saving png and svg figures to file with the figure browser.\n \"\"\"\n fig = add_figures_to_browser(figbrowser, 1, tmpdir, fmt)[0]\n expected_qpix = QPixmap()\n expected_qpix.loadFromData(fig, fmt.upper())\n\n # Save the figure to disk with the figure browser.\n saved_figname = osp.join(str(tmpdir), 'spyfig' + fext)\n mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',\n return_value=(saved_figname, fext))\n\n figbrowser.save_figure()\n saved_qpix = QPixmap()\n saved_qpix.load(saved_figname)\n\n assert osp.exists(saved_figname)\n assert expected_qpix.toImage() == saved_qpix.toImage()\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_save_all_figures(figbrowser, tmpdir, mocker, fmt):\n \"\"\"\n Test saving all figures contained in the thumbnail scrollbar in batch\n into a single directory.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n\n # Save all figures, but cancel the dialog to get a directory.\n mocker.patch(\n 'spyder.plugins.plots.widgets.figurebrowser.getexistingdirectory',\n return_value=None)\n fignames = figbrowser.save_all_figures()\n assert fignames is None\n\n # Save all figures.\n mocker.patch(\n 'spyder.plugins.plots.widgets.figurebrowser.getexistingdirectory',\n return_value=str(tmpdir.mkdir('all_saved_figures')))\n fignames = figbrowser.save_all_figures()\n assert len(fignames) == len(figs)\n for fig, figname in zip(figs, fignames):\n expected_qpix = QPixmap()\n expected_qpix.loadFromData(fig, fmt.upper())\n saved_qpix = QPixmap()\n saved_qpix.load(figname)\n\n assert osp.exists(figname)\n assert expected_qpix.toImage() == saved_qpix.toImage()\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_close_current_figure(figbrowser, tmpdir, fmt):\n \"\"\"\n Test that clearing the current figure works as expected.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 2, tmpdir, fmt)\n\n # Remove the first figure.\n figbrowser.close_figure()\n assert len(figbrowser.thumbnails_sb._thumbnails) == 1\n assert figbrowser.thumbnails_sb.get_current_index() == 0\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[0]\n assert figbrowser.figviewer.figcanvas.fig == figs[0]\n\n # Remove the last figure.\n figbrowser.close_figure()\n assert len(figbrowser.thumbnails_sb._thumbnails) == 0\n assert figbrowser.thumbnails_sb.get_current_index() == -1\n assert figbrowser.thumbnails_sb.current_thumbnail is None\n assert figbrowser.figviewer.figcanvas.fig is None\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_close_all_figures(figbrowser, tmpdir, fmt):\n \"\"\"\n Test that clearing all figures displayed in the thumbnails scrollbar\n works as expected.\n \"\"\"\n add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n\n # Close all previously opened figures.\n figbrowser.close_all_figures()\n assert len(figbrowser.thumbnails_sb._thumbnails) == 0\n assert figbrowser.thumbnails_sb.get_current_index() == -1\n assert figbrowser.thumbnails_sb.current_thumbnail is None\n assert figbrowser.figviewer.figcanvas.fig is None\n assert len(figbrowser.thumbnails_sb.findChildren(FigureThumbnail)) == 0\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_close_one_thumbnail(figbrowser, tmpdir, fmt):\n \"\"\"\n Test the thumbnail is removed from the GUI.\n \"\"\"\n # Add two figures to the browser\n add_figures_to_browser(figbrowser, 2, tmpdir, fmt)\n assert len(figbrowser.thumbnails_sb.findChildren(FigureThumbnail)) == 2\n\n # Remove the first figure\n figures = figbrowser.thumbnails_sb.findChildren(FigureThumbnail)\n figbrowser.thumbnails_sb.remove_thumbnail(figures[0])\n\n assert len(figbrowser.thumbnails_sb.findChildren(FigureThumbnail)) == 1\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_go_prev_next_thumbnail(figbrowser, tmpdir, fmt):\n \"\"\"\n Test go to previous and next thumbnail actions.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n\n # Circle through the open figures with go_next_thumbnail and\n # go_previous_thumbnail.\n figbrowser.go_next_thumbnail()\n assert figbrowser.thumbnails_sb.get_current_index() == 0\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[0]\n assert figbrowser.figviewer.figcanvas.fig == figs[0]\n\n figbrowser.go_previous_thumbnail()\n assert figbrowser.thumbnails_sb.get_current_index() == 2\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[2]\n assert figbrowser.figviewer.figcanvas.fig == figs[2]\n\n figbrowser.go_previous_thumbnail()\n assert figbrowser.thumbnails_sb.get_current_index() == 1\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[1]\n assert figbrowser.figviewer.figcanvas.fig == figs[1]\n\n\ndef test_scroll_to_item(figbrowser, tmpdir, qtbot):\n \"\"\"Test scroll to the item of ThumbnailScrollBar.\"\"\"\n nfig = 10\n add_figures_to_browser(figbrowser, nfig, tmpdir, 'image/png')\n figbrowser.setFixedSize(500, 500)\n\n for __ in range(nfig // 2):\n figbrowser.go_next_thumbnail()\n qtbot.wait(500)\n\n scene = figbrowser.thumbnails_sb.scene\n\n spacing = scene.verticalSpacing()\n height = scene.itemAt(0).sizeHint().height()\n height_view = figbrowser.thumbnails_sb.scrollarea.viewport().height()\n\n expected = (spacing * (nfig // 2)) + (height * (nfig // 2 - 1)) - \\\n ((height_view - height) // 2)\n\n vsb = figbrowser.thumbnails_sb.scrollarea.verticalScrollBar()\n assert vsb.value() == expected\n\n\ndef test_scroll_down_to_newest_plot(figbrowser, tmpdir, qtbot):\n \"\"\"\n Test that the ThumbnailScrollBar is scrolled to the newest plot after\n it is added to it.\n\n Test that covers spyder-ide/spyder#10914.\n \"\"\"\n figbrowser.setFixedSize(500, 500)\n\n nfig = 8\n for i in range(8):\n newfig = create_figure(\n osp.join(str(tmpdir), 'new_mplfig{}.png'.format(i)))\n figbrowser._handle_new_figure(newfig, 'image/png')\n qtbot.wait(500)\n\n # Assert that the scrollbar range was updated correctly and that it's\n # value was set to its maximum.\n height_view = figbrowser.thumbnails_sb.scrollarea.viewport().height()\n scene = figbrowser.thumbnails_sb.scene\n spacing = scene.verticalSpacing()\n height = scene.itemAt(0).sizeHint().height()\n\n expected = (spacing * (nfig - 1)) + (height * nfig) - height_view\n vsb = figbrowser.thumbnails_sb.scrollarea.verticalScrollBar()\n assert vsb.value() == expected\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_mouse_clicking_thumbnails(figbrowser, tmpdir, qtbot, fmt):\n \"\"\"\n Test mouse clicking on thumbnails.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n for i in [1, 0, 2]:\n qtbot.mouseClick(\n figbrowser.thumbnails_sb._thumbnails[i].canvas, Qt.LeftButton)\n assert figbrowser.thumbnails_sb.get_current_index() == i\n assert figbrowser.thumbnails_sb.current_thumbnail.canvas.fig == figs[i]\n assert figbrowser.figviewer.figcanvas.fig == figs[i]\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_save_thumbnails(figbrowser, tmpdir, qtbot, mocker, fmt):\n \"\"\"\n Test saving figures by clicking on the thumbnail icon.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n fext = '.svg' if fmt == 'image/svg+xml' else '.png'\n\n # Select and save the second thumbnail of the scrollbar.\n figname = osp.join(str(tmpdir), 'figname' + fext)\n mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',\n return_value=(figname, fext))\n figbrowser.thumbnails_sb.set_current_index(1)\n qtbot.mouseClick(figbrowser.savefig_btn, Qt.LeftButton)\n\n expected_qpix = QPixmap()\n expected_qpix.loadFromData(figs[1], fmt.upper())\n saved_qpix = QPixmap()\n saved_qpix.load(figname)\n\n assert osp.exists(figname)\n assert expected_qpix.toImage() == saved_qpix.toImage()\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_close_thumbnails(figbrowser, tmpdir, qtbot, mocker, fmt):\n \"\"\"\n Test closing figures by clicking on the thumbnail icon.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, fmt)\n\n # Select and close the second thumbnail of the scrollbar.\n figbrowser.thumbnails_sb.set_current_index(1)\n qtbot.mouseClick(figbrowser.closefig_btn, Qt.LeftButton)\n del figs[1]\n\n assert len(figbrowser.thumbnails_sb._thumbnails) == len(figs)\n assert figbrowser.thumbnails_sb._thumbnails[0].canvas.fig == figs[0]\n assert figbrowser.thumbnails_sb._thumbnails[1].canvas.fig == figs[1]\n\n\ndef test_copy_png_to_clipboard(figbrowser, tmpdir):\n \"\"\"\n Test copying png figures to the clipboard.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, 'image/png')\n clipboard = QApplication.clipboard()\n\n # Copy the current figure (last thumbnail) to the clipboard.\n figbrowser.copy_figure()\n assert clipboard.image() == png_to_qimage(figs[-1])\n\n # Copy the first thumbnail to the clipboard.\n figbrowser.go_next_thumbnail()\n figbrowser.copy_figure()\n assert clipboard.image() == png_to_qimage(figs[0])\n\n\ndef test_copy_svg_to_clipboard(figbrowser, tmpdir):\n \"\"\"\n Test copying svg figures to the clipboard.\n \"\"\"\n figs = add_figures_to_browser(figbrowser, 3, tmpdir, 'image/svg+xml')\n clipboard = QApplication.clipboard()\n\n # Copy the current figure (last thumbnail) to the clipboard.\n figbrowser.copy_figure()\n assert clipboard.mimeData().data('image/svg+xml') == figs[-1]\n\n # Copy the first thumbnail to the clipboard.\n figbrowser.go_next_thumbnail()\n figbrowser.copy_figure()\n assert clipboard.mimeData().data('image/svg+xml') == figs[0]\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_zoom_figure_viewer(figbrowser, tmpdir, fmt):\n \"\"\"\n Test zooming in and out the figure diplayed in the figure viewer.\n \"\"\"\n fig = add_figures_to_browser(figbrowser, 1, tmpdir, fmt)[0]\n figcanvas = figbrowser.figviewer.figcanvas\n\n # Set `Fit plots to windows` to False before the test.\n figbrowser.change_auto_fit_plotting(False)\n\n # Calculate original figure size in pixels.\n qpix = QPixmap()\n qpix.loadFromData(fig, fmt.upper())\n fwidth, fheight = qpix.width(), qpix.height()\n\n assert figbrowser.zoom_disp.value() == 100\n assert figcanvas.width() == fwidth\n assert figcanvas.height() == fheight\n\n # Zoom in and out the figure in the figure viewer.\n scaling_factor = 0\n scaling_step = figbrowser.figviewer._scalestep\n for zoom_step in [1, 1, -1, -1, -1]:\n if zoom_step == 1:\n figbrowser.zoom_in()\n elif zoom_step == -1:\n figbrowser.zoom_out()\n scaling_factor += zoom_step\n scale = scaling_step**scaling_factor\n\n assert (figbrowser.zoom_disp.value() ==\n np.round(int(fwidth * scale) / fwidth * 100))\n assert figcanvas.width() == int(fwidth * scale)\n assert figcanvas.height() == int(fheight * scale)\n\n\[email protected](\"fmt\", ['image/png', 'image/svg+xml'])\ndef test_autofit_figure_viewer(figbrowser, tmpdir, fmt):\n \"\"\"\n Test figure diplayed when `Fit plots to window` is True.\n \"\"\"\n fig = add_figures_to_browser(figbrowser, 1, tmpdir, fmt)[0]\n figviewer = figbrowser.figviewer\n figcanvas = figviewer.figcanvas\n\n # Calculate original figure size in pixels.\n qpix = QPixmap()\n qpix.loadFromData(fig, fmt.upper())\n fwidth, fheight = qpix.width(), qpix.height()\n\n # Test when `Fit plots to window` is set to True.\n # Otherwise, test should fall into `test_zoom_figure_viewer`\n figbrowser.change_auto_fit_plotting(True)\n\n size = figviewer.size()\n style = figviewer.style()\n width = (size.width() -\n style.pixelMetric(QStyle.PM_LayoutLeftMargin) -\n style.pixelMetric(QStyle.PM_LayoutRightMargin))\n height = (size.height() -\n style.pixelMetric(QStyle.PM_LayoutTopMargin) -\n style.pixelMetric(QStyle.PM_LayoutBottomMargin))\n if (fwidth / fheight) > (width / height):\n new_width = int(width)\n new_height = int(width / fwidth * fheight)\n else:\n new_height = int(height)\n new_width = int(height / fheight * fwidth)\n\n assert figcanvas.width() == new_width\n assert figcanvas.height() == new_height\n assert (figbrowser.zoom_disp.value() ==\n round(figcanvas.width() / fwidth * 100))\n\n\nif __name__ == \"__main__\":\n pytest.main()\n" ]
[ [ "matplotlib.figure.Figure", "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.random.rand" ] ]
xingzix/Membership_Inference
[ "35f064e8fc584e3ff1e06ee5659874f14576cc30" ]
[ "code/svm.py" ]
[ "from sklearn.svm import LinearSVR\n\ndef sklearn_train_svm(X, y, C):\n return LinearSVR(C=C).fit(X, y)" ]
[ [ "sklearn.svm.LinearSVR" ] ]
michelle-aubin/pytorch-YOLOv4
[ "35472dee8d519e00bc369c2c221b08fc279a508f" ]
[ "save_darknet_as_pytorch.py" ]
[ "from tool import darknet2pytorch\nimport torch\nfrom tool.torch_utils import *\n\n# load weights from darknet format\nmodel = darknet2pytorch.Darknet('yolov4-obj.cfg', inference=True)\nmodel.load_weights('yolov4-obj_best.weights')\n\n# save weights to pytorch format\ntorch.save(model.state_dict(), 'yolov4-obj.pth')\n\n# reload weights from pytorch format\nmodel_pt = darknet2pytorch.Darknet('yolov4-obj.cfg', inference=True)\nmodel_pt.load_state_dict(torch.load('yolov4-obj.pth'))" ]
[ [ "torch.load" ] ]
shadowleaves/deep_learning
[ "4cb9cc79c706c611b35cb420bf87a5e245325494" ]
[ "theano/imdb.py" ]
[ "from __future__ import print_function\n# from six.moves import xrange\nimport six.moves.cPickle as pickle\n\nimport gzip\nimport os\n\nimport numpy\nimport theano\n\n\ndef prepare_data(seqs, labels, maxlen=None):\n \"\"\"Create the matrices from the datasets.\n\n This pad each sequence to the same lenght: the lenght of the\n longuest sequence or maxlen.\n\n if maxlen is set, we will cut all sequence to this maximum\n lenght.\n\n This swap the axis!\n \"\"\"\n # x: a list of sentences\n lengths = [len(s) for s in seqs]\n\n if maxlen is not None:\n new_seqs = []\n new_labels = []\n new_lengths = []\n for l, s, y in zip(lengths, seqs, labels):\n if l < maxlen:\n new_seqs.append(s)\n new_labels.append(y)\n new_lengths.append(l)\n lengths = new_lengths\n labels = new_labels\n seqs = new_seqs\n\n if len(lengths) < 1:\n return None, None, None\n\n n_samples = len(seqs)\n maxlen = numpy.max(lengths)\n\n x = numpy.zeros((maxlen, n_samples)).astype('int64')\n x_mask = numpy.zeros((maxlen, n_samples)).astype(theano.config.floatX)\n for idx, s in enumerate(seqs):\n x[:lengths[idx], idx] = s\n x_mask[:lengths[idx], idx] = 1.\n\n return x, x_mask, labels\n\n\ndef get_dataset_file(dataset, default_dataset, origin):\n '''Look for it as if it was a full path, if not, try local file,\n if not try in the data directory.\n\n Download dataset if it is not present\n\n '''\n data_dir, data_file = os.path.split(dataset)\n if data_dir == \"\" and not os.path.isfile(dataset):\n # Check if dataset is in the data directory.\n new_path = os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"data\",\n dataset\n )\n if os.path.isfile(new_path) or data_file == default_dataset:\n dataset = new_path\n\n if (not os.path.isfile(dataset)) and data_file == default_dataset:\n from six.moves import urllib\n print('Downloading data from %s' % origin)\n urllib.request.urlretrieve(origin, dataset)\n\n return dataset\n\n\ndef load_data(path=\"imdb.pkl\", n_words=100000, valid_portion=0.1, maxlen=None,\n sort_by_len=True):\n '''Loads the dataset\n\n :type path: String\n :param path: The path to the dataset (here IMDB)\n :type n_words: int\n :param n_words: The number of word to keep in the vocabulary.\n All extra words are set to unknow (1).\n :type valid_portion: float\n :param valid_portion: The proportion of the full train set used for\n the validation set.\n :type maxlen: None or positive int\n :param maxlen: the max sequence length we use in the train/valid set.\n :type sort_by_len: bool\n :name sort_by_len: Sort by the sequence lenght for the train,\n valid and test set. This allow faster execution as it cause\n less padding per minibatch. Another mechanism must be used to\n shuffle the train set at each epoch.\n\n '''\n\n #############\n # LOAD DATA #\n #############\n\n # Load the dataset\n path = get_dataset_file(\n path, \"imdb.pkl\",\n \"http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl\")\n\n if path.endswith(\".gz\"):\n f = gzip.open(path, 'rb')\n else:\n f = open(path, 'rb')\n\n train_set = pickle.load(f)\n test_set = pickle.load(f)\n f.close()\n if maxlen:\n new_train_set_x = []\n new_train_set_y = []\n for x, y in zip(train_set[0], train_set[1]):\n if len(x) < maxlen:\n new_train_set_x.append(x)\n new_train_set_y.append(y)\n train_set = (new_train_set_x, new_train_set_y)\n del new_train_set_x, new_train_set_y\n\n # split training set into validation set\n train_set_x, train_set_y = train_set\n n_samples = len(train_set_x)\n sidx = numpy.random.permutation(n_samples)\n n_train = int(numpy.round(n_samples * (1. - valid_portion)))\n valid_set_x = [train_set_x[s] for s in sidx[n_train:]]\n valid_set_y = [train_set_y[s] for s in sidx[n_train:]]\n train_set_x = [train_set_x[s] for s in sidx[:n_train]]\n train_set_y = [train_set_y[s] for s in sidx[:n_train]]\n\n train_set = (train_set_x, train_set_y)\n valid_set = (valid_set_x, valid_set_y)\n\n def remove_unk(x):\n return [[1 if w >= n_words else w for w in sen] for sen in x]\n\n test_set_x, test_set_y = test_set\n valid_set_x, valid_set_y = valid_set\n train_set_x, train_set_y = train_set\n\n train_set_x = remove_unk(train_set_x)\n valid_set_x = remove_unk(valid_set_x)\n test_set_x = remove_unk(test_set_x)\n\n def len_argsort(seq):\n return sorted(range(len(seq)), key=lambda x: len(seq[x]))\n\n if sort_by_len:\n sorted_index = len_argsort(test_set_x)\n test_set_x = [test_set_x[i] for i in sorted_index]\n test_set_y = [test_set_y[i] for i in sorted_index]\n\n sorted_index = len_argsort(valid_set_x)\n valid_set_x = [valid_set_x[i] for i in sorted_index]\n valid_set_y = [valid_set_y[i] for i in sorted_index]\n\n sorted_index = len_argsort(train_set_x)\n train_set_x = [train_set_x[i] for i in sorted_index]\n train_set_y = [train_set_y[i] for i in sorted_index]\n\n train = (train_set_x, train_set_y)\n valid = (valid_set_x, valid_set_y)\n test = (test_set_x, test_set_y)\n\n return train, valid, test\n" ]
[ [ "numpy.max", "numpy.random.permutation", "numpy.zeros", "numpy.round" ] ]
Unathi-Skosana/qiskit-aqua
[ "e13f66eda6d8b819a6f132319a2bac819941f6b1" ]
[ "qiskit/aqua/algorithms/minimum_eigen_solvers/main.py" ]
[ "if __name__ == '__main__':\n import numpy as np\n from qiskit import Aer\n from qiskit.aqua.algorithms.minimum_eigen_solvers import VQSD\n from qiskit.quantum_info.states import Statevector\n from qiskit.aqua.components.initial_states import Custom\n from qiskit.aqua.components.optimizers import COBYLA\n from qiskit.aqua.operators import MatrixOperator\n from qiskit.aqua.algorithms.eigen_solvers import NumPyEigensolver\n from qiskit.aqua.components.variational_forms import RY\n from qiskit.quantum_info import partial_trace\n\n num_ancillae = 1\n state_vector = np.sqrt(6/10) * Statevector.from_label('+1+') \\\n + np.sqrt(4/10) * Statevector.from_label('-0-')\n initial_state = Custom(state_vector.num_qubits,\n state_vector=state_vector.data)\n vqsd_obj = VQSD(initial_state, q=.25, num_ancillae=num_ancillae,\n quantum_instance=Aer.get_backend(\"qasm_simulator\"),\n optimizer=COBYLA(), var_form=RY(initial_state._num_qubits -\n num_ancillae,\n depth=2))\n result = vqsd_obj.run(shots=1000)\n\n print(\"=== VQSD ===\")\n print(result.eigenvalue)\n print(result.eigenstate)\n\n print(\"== Exact ===\")\n density_mat = state_vector.to_operator()\n subsystem_density_mat = \\\n partial_trace(MatrixOperator(density_mat.data).dense_matrix, [2])\n exact_sys = NumPyEigensolver(MatrixOperator(subsystem_density_mat.data), k=4).run()\n eigvals = exact_sys['eigenvalues']\n eigvecs = exact_sys['eigenstates']\n\n print(eigvals)\n print(eigvecs)\n\n ss = 0\n for i in range(0, 4):\n projector = np.eye(4) - \\\n np.outer(np.transpose(np.conj(result.eigenstate[i])),\n result.eigenstate[i])\n s = projector @ subsystem_density_mat.data @ result.eigenstate[i]\n ss += np.inner(s,s)\n print(\"=== Error ===\")\n print(ss)\n" ]
[ [ "numpy.inner", "numpy.conj", "numpy.sqrt", "numpy.eye" ] ]
akleb/multipoint
[ "e7a25006b143b427874aef49f5579c1e798793f1" ]
[ "tests/reg_tests/test_MPSparse.py" ]
[ "import unittest\nimport numpy as np\nimport copy\nfrom mpi4py import MPI\nfrom multipoint import multiPointSparse\nfrom pyoptsparse import Optimization\n\ngcomm = MPI.COMM_WORLD\n\n\ndef set1_obj(x):\n rank = gcomm.rank\n g1_drag = x[\"v1\"] ** 2 * (rank + 1)\n g1_lift = x[\"v1\"] * 2 * 3.14159 * (rank + 1)\n g1_thick = np.ones((5, 1))\n funcs = {\"set1_lift\": g1_lift, \"set1_drag\": g1_drag, \"set1_thickness\": g1_thick, \"fail\": False}\n return funcs\n\n\ndef set1_sens(x, funcs):\n rank = gcomm.rank\n g1_drag_deriv = {\"v1\": 2 * x[\"v1\"] * (rank + 1), \"v2\": 0}\n g1_lift_deriv = {\"v1\": 2 * 3.14159 * (rank + 1), \"v2\": 0}\n g1_thick_deriv = {\"v1\": np.zeros((5, 1)), \"v2\": np.zeros((5, 1))}\n funcsSens = {\"set1_lift\": g1_lift_deriv, \"set1_drag\": g1_drag_deriv, \"set1_thickness\": g1_thick_deriv}\n return funcsSens\n\n\ndef set2_obj(x):\n funcs = {}\n g2_drag = x[\"v2\"] ** 3\n funcs = {\"set2_drag\": g2_drag}\n return funcs\n\n\ndef set2_sens(x, funcs):\n g2_drag_deriv = {\"v1\": 0, \"v2\": 3 * x[\"v2\"] ** 2}\n funcsSens = {\"set2_drag\": g2_drag_deriv}\n return funcsSens\n\n\ndef objCon(funcs, printOK):\n tmp = np.average(funcs[\"set1_drag\"])\n funcs[\"total_drag\"] = tmp + funcs[\"set2_drag\"]\n # if printOK:\n # print(funcs)\n return funcs\n\n\n# we create a fake optimization problem to test\nSET_NAMES = [\"set1\", \"set2\"]\nCOMM_SIZES = {\"set1\": [1, 1], \"set2\": [1]}\nSET_FUNC_HANDLES = {\"set1\": [set1_obj, set1_sens], \"set2\": [set2_obj, set2_sens]}\nDVS = [\"v1\", \"v2\"]\nSET_FUNCS = {\"set1\": [\"set1_lift\", \"set1_drag\", \"set1_thickness\"], \"set2\": [\"set2_drag\"]}\nALL_FUNCS = [i for s in sorted(SET_FUNCS.keys()) for i in SET_FUNCS[s]]\nOBJECTIVE = \"total_drag\"\nCONS = []\nALL_OBJCONS = [OBJECTIVE] + CONS\n\n\nclass TestMPSparse(unittest.TestCase):\n N_PROCS = 3\n\n def setUp(self):\n # construct MP\n self.MP = multiPointSparse(gcomm)\n for setName in SET_NAMES:\n comm_size = COMM_SIZES[setName]\n self.MP.addProcessorSet(setName, nMembers=len(comm_size), memberSizes=comm_size)\n\n self.comm, self.setComm, self.setFlags, self.groupFlags, self.ptID = self.MP.createCommunicators()\n\n for setName in SET_NAMES:\n self.MP.addProcSetObjFunc(setName, SET_FUNC_HANDLES[setName][0])\n self.MP.addProcSetSensFunc(setName, SET_FUNC_HANDLES[setName][1])\n\n # construct optProb\n optProb = Optimization(\"multipoint test\", self.MP.obj)\n for dv in DVS:\n optProb.addVar(dv)\n optProb.addObj(\"total_drag\")\n self.MP.setObjCon(objCon)\n self.MP.setOptProb(optProb)\n\n def test_createCommunicators(self):\n # check that setFlags have the right keys\n self.assertEqual(set(SET_NAMES), set(self.setFlags.keys()))\n\n # test that setName and groupFlags are correct\n setName = self.MP.getSetName()\n counter = {}\n for name in self.setFlags.keys():\n counter[name] = 0\n if name == setName:\n counter[name] += 1\n self.assertTrue(self.setFlags[name])\n else:\n self.assertFalse(self.setFlags[name])\n counter[name] = gcomm.allreduce(counter[name], MPI.SUM)\n self.assertEqual(counter[\"set1\"], 2)\n self.assertEqual(counter[\"set2\"], 1)\n\n # test groupFlags, ptID, and comm sizes\n # groupFlags should be all false except for one entry, whose index matches ptID\n self.assertEqual(self.setComm.size, len(self.groupFlags))\n self.assertEqual(self.comm.size, 1)\n self.assertTrue(self.groupFlags[self.ptID])\n # if we set the true entry to false, then the whole thing should be all false\n tmpGroupFlags = copy.copy(self.groupFlags)\n tmpGroupFlags[self.ptID] = False\n self.assertFalse(np.any(tmpGroupFlags))\n\n def test_obj_sens(self):\n x = {}\n x[\"v1\"] = 5\n x[\"v2\"] = 2\n\n funcs, fail = self.MP.obj(x)\n self.assertFalse(fail)\n funcsSens, fail = self.MP.sens(x, funcs)\n self.assertFalse(fail)\n\n # check that funcs contains all the funcs, objective, and constraints\n self.assertTrue(set(ALL_FUNCS).union(ALL_OBJCONS).issubset(funcs.keys()))\n # check that funcSens contains all the objective and constraints derivs\n self.assertEquals(set(ALL_OBJCONS), set(funcsSens.keys()))\n # check that the derivs are wrt all DVs\n for val in funcsSens.values():\n self.assertEquals(set(DVS), set(val.keys()))\n" ]
[ [ "numpy.average", "numpy.any", "numpy.ones", "numpy.zeros" ] ]
pseudoyim/conda_build_dojo
[ "8ddb4f958909461d551ca202c61a76c5508eac54" ]
[ "dojo/utils.py" ]
[ "'''\nUtilities for dojo commands.\n'''\nimport io\nimport json\nimport os\nimport pandas as pd\nimport requests\nimport sys\nimport yaml\nfrom collections import Counter\nfrom colorama import Fore, Back, Style\nfrom datetime import datetime\nfrom dojo import ROOT_DIR, LESSONS_DIR\nfrom pathlib import Path\nfrom tabulate import tabulate\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\n\n\nPROGRESS_COLUMNS = ['lesson_name', 'start_timestamp', 'lesson_index', 'note']\n\n\ndef add_lesson_yaml(new_lesson_path):\n # Add lesson yaml in new lesson dir.\n save_path = os.path.join(new_lesson_path, 'lesson.yaml')\n with open(save_path, 'w') as lesson_yaml:\n lesson_yaml.write(LESSON_YAML_TEMPLATE)\n print('Created new lesson.yaml template.')\n\n\ndef download_package(url, destination_path):\n '''\n Adapted from jpmds/workflow/download.py\n '''\n # Make sure the directory path exists for each channel and subdir.\n pkg_path_parent_dir_parts = destination_path.split('/')[:-1]\n pkg_path_parent_dir = '/' + os.path.join(*pkg_path_parent_dir_parts)\n Path(pkg_path_parent_dir).mkdir(parents=True, exist_ok=True)\n\n path_pieces = url.split('/')\n basename = path_pieces[-1]\n channel = path_pieces[-3]\n\n print(f' Downloading: {basename} from: {channel}')\n\n r = requests.get(url, stream=True)\n\n # The easiest way to notify the user that something went wrong is to \n # terminate, loudly. this will raise an HTTPError if \n # 400 <= status_code < 600, otherwise, no-op.\n r.raise_for_status()\n\n with open(destination_path, 'wb') as f:\n # Updated to follow: \n # https://requests.readthedocs.io/en/master/user/quickstart/#raw-response-content\n # shutil.copyfileobj(r.raw, f)\n for chunk in r.iter_content(chunk_size=128):\n f.write(chunk)\n\n\ndef get_latest():\n '''\n Looks at the last row in history.csv to see if there's an active lesson.\n If there is, return that lesson's name and current step index.\n Else, tell the user they need to start a lesson.\n '''\n df_history = pd.read_csv(f'{ROOT_DIR}/history.csv', index_col=False)\n\n if len(df_history.index) == 0:\n # i.e. there are no rows in the history.csv\n print('You have no lesson history. Please start a lesson to begin one.')\n sys.exit(1)\n \n else:\n # Get latest active lesson from history.csv\n last_row = list(df_history.tail(1).values)[0]\n\n # Check if the last record has \"active = True\".\n active_status = last_row[-2]\n if active_status is True:\n latest_lesson_name = last_row[1]\n else:\n print('No active lesson. Please start one.')\n sys.exit(1)\n\n # Get latest row from <lesson>/progress.csv\n # Looks like: [lesson_name, start_timestamp, lesson_index, note]\n latest_row = get_lesson_progress(latest_lesson_name)\n\n return latest_row[0], latest_row[2]\n\n\ndef get_timestamp_for_file():\n ts_format = '%Y%m%d_%H%M%S'\n now = datetime.utcnow()\n return now.strftime(ts_format)\n\n\ndef get_timestamp_for_action():\n ts_format = '%Y-%m-%d %H:%M:%S'\n now = datetime.utcnow()\n return now.strftime(ts_format) + ' UTC'\n\n\n##############\n# TAGS #\n##############\n\ndef search_tag(search_tag):\n '''\n Searches each lesson's lesson.yaml to see if they match\n for the given tag.\n '''\n # Final list of lists that looks like:\n # Title LessonName Objectives MatchingTag \n results = []\n\n # Load every lesson.yaml\n from glob import glob\n all_lesson_paths = glob(os.path.join(LESSONS_DIR,'*'))\n for lesson_path in all_lesson_paths:\n lesson_name = lesson_path.split('/')[-1]\n lesson_specs = load_lesson_specs(lesson_name)\n tags = lesson_specs['tags']\n\n # Search for tag.\n for tag in tags:\n if search_tag.lower() in tag.lower():\n title = lesson_specs['title']\n objectives = ' * '.join(str(obj) for obj in lesson_specs['objectives'])\n match = [title, lesson_name, objectives, tag]\n results.append(match)\n\n if not results:\n print(f'No results for: \"{search_tag}\"')\n sys.exit()\n\n print(Fore.CYAN + f'\\nSearch results for: \"{search_tag}\"')\n print(tabulate(sorted(results), headers=['Title', 'Lesson Name', 'Objectives', 'Matching Tag'], maxcolwidths=[30, 30, 30, 30], tablefmt=\"grid\"))\n print(Style.RESET_ALL) \n\n\n#################\n# HISTORY #\n#################\n\ndef load_history():\n '''\n Returns the history.csv as a df.\n If it doesn't exist (e.g. following a `dojo clean`),\n then a new one shall be created and returned.\n '''\n history_path = os.path.join(ROOT_DIR, 'history.csv')\n if not os.path.exists(history_path):\n columns = ['timestamp', 'lesson_name', 'action', 'active', 'completed']\n df = pd.DataFrame(columns=columns)\n return df\n return pd.read_csv(os.path.join(ROOT_DIR, 'history.csv'), index_col=False)\n\n\ndef update_history(lesson_name, action):\n '''\n Adds a row to history.csv, recording the lesson_name\n and whether it was started, stopped, or completed \n (and boolean of whether that lesson is active or not).\n '''\n df_history = load_history()\n ts = get_timestamp_for_action()\n\n if action == 'completed':\n active = False\n completed = True\n elif action == 'stop':\n active = False\n completed = False\n else:\n active = True\n completed = False\n\n new_row = {'timestamp': ts,\n 'lesson_name': lesson_name,\n 'action': action,\n 'active': active,\n 'completed': completed\n }\n df_history = df_history.append(new_row, ignore_index=True)\n df_history.to_csv(os.path.join(ROOT_DIR, 'history.csv'), index=False)\n\n\n#################\n# LESSONS #\n#################\n\ndef load_curriculum_specs():\n curriculum_yaml_path = os.path.join(ROOT_DIR, 'curriculum.yaml')\n\n try:\n with open(curriculum_yaml_path, mode='r') as curriculum_specs:\n import yaml # i.e. pyyaml\n return yaml.safe_load(curriculum_specs)\n except FileNotFoundError:\n print(f'ERROR: curriculum.yaml not found.')\n sys.exit(1)\n\n\ndef load_lesson_specs(lesson_name):\n '''\n Gets specs from the lesson.yaml\n '''\n lesson_yaml_path = os.path.join(LESSONS_DIR, lesson_name, 'lesson.yaml')\n\n try:\n with open(lesson_yaml_path, mode='r') as lesson_specs:\n import yaml # i.e. pyyaml\n return yaml.safe_load(lesson_specs)\n except FileNotFoundError:\n print(f'ERROR: lesson.yaml for {lesson_name} not found.')\n sys.exit(1)\n\n\ndef show_lessons(status=None):\n # Columns:\n # topic, title, lesson_name, objectives, author(s), tags\n curriculum_specs = load_curriculum_specs()\n\n results = []\n for topic, lessons in curriculum_specs['topics'].items():\n for lesson_name in lessons:\n lesson_specs = load_lesson_specs(lesson_name)\n title = lesson_specs['title']\n objectives = ' * '.join(str(obj) for obj in lesson_specs['objectives'])\n authors = ', '.join(str(author) for author in lesson_specs['authors'])\n tags = '; '.join(str(tag) for tag in lesson_specs['tags'])\n\n df_history = load_history()\n df_completed = df_history[(df_history.lesson_name == lesson_name) & (df_history.completed == True)]\n if df_completed.empty:\n completed = False\n else:\n completed = True\n\n result_row = [topic, title, lesson_name, objectives, authors, tags, completed]\n results.append(result_row)\n\n columns = ['Topic', 'Title', 'Lesson name', 'Objectives', 'Author(s)', 'Tags', 'Completed']\n df_results = pd.DataFrame(results, columns=columns)\n\n if status == 'authors':\n authors_column = df_results['Author(s)'].tolist()\n authors_for_lesson = [i.split(', ') for i in authors_column]\n author_instances = [name for lesson in authors_for_lesson for name in lesson]\n author_count = Counter(author_instances).most_common()\n print(Fore.YELLOW + '\\nAuthors and the number of lessons they\\'ve written')\n print('=================================================')\n for tally in author_count:\n print(f'{tally[0]}: {tally[1]}')\n print(Style.RESET_ALL)\n sys.exit(0)\n\n elif status == 'done':\n df_results_done = df_results[df_results['Completed'] == True]\n final = df_results_done.values.tolist()\n\n elif status == 'not_done':\n df_results_not_done = df_results[df_results['Completed'] == False]\n final = df_results_not_done.values.tolist()\n\n else:\n final = df_results.values.tolist()\n\n if len(final) == 0:\n if status == 'done':\n print('You have not completed any lessons. Begin your journey today!')\n sys.exit(0)\n elif status == 'not_done':\n print('You have completed all of the available lesssons. How about you create one of your own now? ;D')\n sys.exit(0)\n\n print(Fore.CYAN + tabulate(sorted(final), headers=columns, maxcolwidths=[30, 30, 30, 30, 30, 30, 30], tablefmt=\"grid\"))\n print(' Start a lesson by running: dojo start <Lesson name>')\n print(Style.RESET_ALL)\n\ndef create_lesson_progress(lesson_name):\n ts = get_timestamp_for_action()\n row = [lesson_name, ts, 0, '']\n df = pd.DataFrame([row], columns=PROGRESS_COLUMNS)\n df.to_csv(f'{LESSONS_DIR}/{lesson_name}/progress.csv', index=False)\n\n\ndef get_all_lesson_progress(lesson_name):\n '''\n Returns all progress.csv as a df.\n '''\n return pd.read_csv(f'{LESSONS_DIR}/{lesson_name}/progress.csv', index_col=False) \n\n\ndef get_lesson_progress(lesson_name):\n '''\n Returns only the last row of progress.csv as a list.\n '''\n df = pd.read_csv(f'{LESSONS_DIR}/{lesson_name}/progress.csv', index_col=False)\n # Return the last row.\n return df.values[-1].tolist()\n\n\ndef update_lesson_progress(lesson_name, step_index, note=''):\n ts = get_timestamp_for_action()\n # By 'update', we're just adding a row.\n df = pd.read_csv(f'{LESSONS_DIR}/{lesson_name}/progress.csv', index_col=False)\n new_row = [lesson_name, ts, step_index, note]\n df_new_row = pd.DataFrame([new_row], columns=PROGRESS_COLUMNS)\n df = df.append(df_new_row, ignore_index=True)\n df.to_csv(f'{LESSONS_DIR}/{lesson_name}/progress.csv', index=False) \n\n\n###################\n# TEMPLATES #\n###################\n\nLESSON_YAML_TEMPLATE = '''# PLEASE ADD VALUES FOR ALL KEYS BELOW.\n\n# IMPORTANT: If your lesson requires a snapshot of a channel(s) in a certain \n# state (e.g. missing some dependencies for python-3.9), then please make\n# sure to add the URLs for the necessary packages in a `dojo_channels_pkgs.txt`\n# file under the lesson directory. (See README for more details.) For example:\n#\n# dojo/\n# |---- lessons/\n# |---- 001_version_bump/\n# |---- dojo_channels_pkgs.txt\n# |---- lesson.yaml\n\n# The lesson title.\n# Example: \"How to do a version bump\"\ntitle: \n\n# The person(s) who wrote this lesson.\nauthors: \n - AUTHOR NAME\n\n# Learning objectives. \n# Each objective should help complete this sentence: \n# \"By the end of this lesson, the learner will be able to...\"\nobjectives: \n - \"EXAMPLE OBJECTIVE\"\n\n# Tags.\n# Learners can search for this lesson using tags entered here.\ntags: []\n\n# Package name and version the learner will be attempting to build.\n# Example: numpy-1.16.0\ntarget_package:\n\n# Target platform the package will be built for.\n# Examples: linux-64, osx-64, win-64, noarch\ntarget_platform: \n\n# URL to the feedstock (use HTTPS, not SSH, to avoid the need for a key).\n# Example: https://github.com/AnacondaRecipes/tqdm-feedstock.git\nfeedstock_url: \n\n# The specific commit hash from the feedstock repo that should be initially \n# checked out. This acts as the starting point for the learner, a \"snapshot\"\n# in time from which they will complete their lesson objectives.\ncommit: \n\n# Lesson prompts (or steps).\n# List the propmpts/steps the learner should go through.\n# You can also pose questions and answers (for example, one prompt is \n# a question, and the subsequent prompt is the answer to that question).\n# BONUS: Provide hints to the learner (just tell them the hint will be \n# revealed in the next step if they want it).\nprompts:\n - EXAMPLE - Open the meta.yaml...\n - EXAMPLE - Increment the build number...\n - |\n EXAMPLE step with multiline.\n\n Additional lines.\n\n'''\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
qcwthu/Lifelong-Fewshot-Language-Learning
[ "cf7d17ce7de6a707d929d0542b3d5e639569855f" ]
[ "convertmodel.py" ]
[ "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config, load_tf_weights_in_t5\nfrom transformers.utils import logging\nimport tensorflow as tf\nlogging.set_verbosity_info()\n\ndef convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path, model_name):\n config = T5Config.from_pretrained(model_name)\n print(f\"Building PyTorch model from configuration: {config}\")\n model = T5ForConditionalGeneration(config)\n load_tf_weights_in_t5(model, config, tf_checkpoint_path)\n\n # Save pytorch-model\n print(f\"Save PyTorch model to {pytorch_dump_path}\")\n model.save_pretrained(pytorch_dump_path)\n\nif __name__ == \"__main__\":\n # savepath_prefix = [\"/data/qin/lm_adapted_t5model/torch_ckpt/small\",\"/data/qin/lm_adapted_t5model/torch_ckpt/base\",\n # \"/data/qin/lm_adapted_t5model/torch_ckpt/large\",\"/data/qin/lm_adapted_t5model/torch_ckpt/xl\",\n # \"/data/qin/lm_adapted_t5model/torch_ckpt/xxl\"]\n savepath_prefix = [\"./lm_adapted_t5model/torch_ckpt/large\"]\n for path in savepath_prefix:\n if not os.path.exists(path):\n os.mkdir(path)\n #modeltype = [\"google/t5-v1_1-small\", \"google/t5-v1_1-base\", \"google/t5-v1_1-large\", \"google/t5-v1_1-xl\", \"google/t5-v1_1-xxl\"]\n modeltype = [\"google/t5-v1_1-large\"]\n loadpath_prefix = \"./lm_adapted_t5model/\"\n # ckptpath = [loadpath_prefix+\"t5.1.1.lm100k.small/\",loadpath_prefix+\"t5.1.1.lm100k.base/\",loadpath_prefix+\"t5.1.1.lm100k.large/\",\n # loadpath_prefix+\"t5.1.1.lm100k.xl/\",loadpath_prefix+\"t5.1.1.lm100k.xxl/\"]\n ckptpath = [loadpath_prefix + \"t5.1.1.lm100k.large/\"]\n for i in range(len(modeltype)):\n print(i)\n tf_vars = tf.train.list_variables(ckptpath[i])\n convert_tf_checkpoint_to_pytorch(ckptpath[i], savepath_prefix[i], modeltype[i])\n" ]
[ [ "tensorflow.train.list_variables" ] ]
turing-usp/Turing-Talks
[ "cb9b85b70e6a53d2eafd9593759ef828d7dddf3d" ]
[ "Redes Neurais/Autoencoder/testing.py" ]
[ "import Autoencoder\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.datasets as datasets\nimport matplotlib.pyplot as plt\n\n# Getting random sample from testing set\nto_tensor = torchvision.transforms.ToTensor()\ntest_data = datasets.CIFAR10(root='./dataset', train=False, download=True, transform=to_tensor)\ntest_dataloader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=True)\nsample = next(iter(test_dataloader))[0]\n\n# Displaying original sample image\nimg1 = sample.numpy()[0].transpose(1, 2, 0)\nfig, axes = plt.subplots(3, 1)\naxes[0].imshow(img1)\n\n# Loading Autoencoder\ndevice = torch.device('gpu' if torch.cuda.is_available() else 'cpu')\nnet = Autoencoder.Autoencoder()\nloaded = torch.load('neuralnet', map_location=device)\nnet.load_state_dict(loaded)\nnet.eval()\n\n# Encoding image and displaying it\nencoded = net.encode(sample)\nimg2 = encoded.detach().numpy()[0].transpose(1, 2, 0)\naxes[1].imshow(img2)\n\n# Decoding image and displaying it\ndecoded = net.decode(encoded)\nimg3 = decoded.detach().numpy()[0].transpose(1, 2, 0)\naxes[2].imshow(img3)\n\n# Calculating and printing loss\ncriterion = nn.MSELoss()\nprint(\"Calculated loss: {:3.6f}\".format(float(criterion(decoded, sample))))\n\naxes[0].title.set_text('3 Channel Original image (32x32)')\naxes[1].title.set_text('3 Channel Encoded image (15x15)')\naxes[2].title.set_text('3 Channel Recovered image (32x32)')\n\naxes[0].set_yticks([])\naxes[0].set_xticks([])\naxes[1].set_yticks([])\naxes[1].set_xticks([])\naxes[2].set_yticks([])\naxes[2].set_xticks([])\n\nplt.show()\n" ]
[ [ "torch.nn.MSELoss", "matplotlib.pyplot.subplots", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "matplotlib.pyplot.show" ] ]
ion-g-ion/paper-cme-tt
[ "274e1c32d8d58a33e57e3ce47f662e512e59161a", "274e1c32d8d58a33e57e3ce47f662e512e59161a" ]
[ "code/simplegene_tt_convergence.py", "code/3stage_parameters2.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 9 10:01:49 2020\n\n@author: ion\n\"\"\"\n\nimport tensorflow as tf\nimport t3f\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom CME import CME\nimport timeit\nimport scipy.integrate\nimport numba\nimport scipy.sparse\nfrom tt_extra import mat_to_tt\nimport tt\nimport tt.amen\nfrom ttInt import ttInt\n\n# define reaction \nrates = np.array([0.015,0.002,0.1,0.01])\nPre =np.array( [[1,0],[1,0],[0,0],[0,1]])\nPost = np.array([[1,1],[0,0],[1,0],[0,0]])\nProps = [ lambda x: x[:,0], lambda x: x[:,0] , lambda x: x[:,0]*0+1 , lambda x: x[:,1] ]\n\n\n\n# construct the model and the CME operator\nN = [80,120] # state truncation\nmdl = CME(N, Pre,Post,rates,Props)\n\nmdl.construct_generator2(to_tf=False)\n\nA_tt = mdl.construct_generator_tt()\n\nInitial = [2,4]\nP0 = np.zeros(N)\nP0[Initial[0],Initial[1]] = 1.0\nP0_tt = tt.tensor(P0)\n\ndT = 128\nNt = 8\ntime = np.arange(Nt+1) * dT\n\n# Reference solution\nprint('Reference solution...')\ntme_ode45 = timeit.time.time()\nmdl.construct_generator2(to_tf=False)\nGen = mdl.gen\ndef func(t,y):\n return Gen.dot(y)\n\n# solve CME\nres = scipy.integrate.solve_ivp(func,[0,time[-1]],P0.flatten(),t_eval=time,max_step=dT/10000)\nPt = res.y.reshape(N+[-1])\nP_ref = Pt[:,:,-1]\ntme_ode45 = timeit.time.time() - tme_ode45\n\n\n# convergence test\nprint('Implicit Euler...')\nerr_implicit = []\nrefinements_implicit = [16,32,64,128,256,512]\nfor nt in refinements_implicit:\n \n fwd_int = ttInt(A_tt, epsilon = 1e-9, N_max = nt, dt_max = 100.0)\n\n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n\n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_implicit.append(err)\n print('nt ',nt,' error inf ',err)\n \n \n# convergence test\nprint('Crank Nicolson...')\nerr_cn = []\nrefinements_cn = [16,32,64,128,256,512]\nfor nt in refinements_cn:\n \n fwd_int = ttInt(A_tt, epsilon = 1e-11, N_max = nt, dt_max = 100.0,method='crank–nicolson')\n\n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n\n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_cn.append(err)\n print('nt ',nt,' error inf ',err)\n \n# convergence test\nprint('Cheby...')\nerr_ch = []\nrefinements_ch = [2,4,6,8,10,12,14,16,18,20,22,24,28,32]\nfor nt in refinements_ch:\n \n fwd_int = ttInt(A_tt, epsilon = 1e-14, N_max = nt, dt_max = 1000.0,method='cheby')\n\n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n P_tt = P_tt.round(1e-14)\n\n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_ch.append(err)\n print('nt ',nt,' error inf ',err)\n \n# convergence test\nprint('Legendre...')\nerr_le = []\nrefinements_le = [2,4,6,8,10,12,14,16,18,20,22,24,28,32]\nfor nt in refinements_le:\n \n fwd_int = ttInt(A_tt, epsilon = 1e-14, N_max = nt, dt_max = 1000.0,method='legendre')\n\n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n P_tt = P_tt.round(1e-14)\n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_le.append(err)\n print('nt ',nt,' error inf ',err)\n \n# convergence test\nprint('Epsilon of the solver...')\nerr_eps = []\nrefinements_epsilon = 10.0 ** (-np.arange(1,11))\nfor eps in refinements_epsilon:\n \n fwd_int = ttInt(A_tt, epsilon = eps, N_max = 16, dt_max = 1000.0,method='cheby')\n\n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n\n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_eps.append(err)\n print('epsilon ',eps,' error inf ',err)\n \n\nprint('Epsilon vs Nt ...')\nrefinements_epsilon_2 = 10.0 ** (-np.arange(1,13))\nrefinements_ch2 = [2,3,4,5,6,7,8]\nerr_eps_ch = []\nfor eps in refinements_epsilon_2:\n err_temp = []\n for nt in refinements_ch2:\n fwd_int = ttInt(A_tt, epsilon = eps, N_max = nt, dt_max = 1000.0,method='cheby')\n \n P_tt = P0_tt\n for i in range(Nt):\n P_tt = fwd_int.solve(P_tt, dT, intervals = 1)\n \n P = P_tt.full().reshape(N)\n \n err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))\n err_temp.append(err)\n print('epsilon ',eps,' nt ',nt,' error inf ',err)\n err_eps_ch.append(err_temp)\n \n \n \n#%% plots\nimport tikzplotlib\nplt.figure()\nplt.loglog(refinements_implicit,err_implicit)\nplt.loglog(refinements_cn[:-1],err_cn[:-1])\nplt.loglog(refinements_ch[:],err_ch[:])\n# plt.loglog(refinements_le[:],err_le[:])\nplt.xlabel(r'$N_t$')\nplt.ylabel(r'max relative error')\nplt.grid()\nplt.legend(['Implicit Euler','Crank-Nicolson','Chebyshev'])\ntikzplotlib.save('convergence_Nt.tex')\n\n# plt.figure()\n# plt.loglog(dT/np.array(refinements_implicit),np.array(err_implicit))\n# plt.loglog(dT/np.array(refinements_cn)[:-1],np.array(err_cn)[:-1])\n# plt.xlabel(r'$\\Delta t$ [s]')\n# plt.ylabel(r'max relative error')\n# plt.grid()\n# plt.legend(['Implicit Euler','Crank-Nicolson'])\n# tikzplotlib.save('convergence_dt.tex')\n\nplt.figure()\nplt.loglog(refinements_epsilon,err_eps)\nplt.xlabel(r'$\\epsilon$')\nplt.ylabel(r'max relative error')\nplt.grid()\ntikzplotlib.save('convergence_eps.tex')\n\nplt.figure()\nplt.loglog(dT/np.array(refinements_ch2),np.array(err_eps_ch).transpose())\nplt.xlabel(r'$\\Delta t$ [s]')\nplt.ylabel(r'max relative error')\nplt.legend([r'$\\epsilon=$'+str(eps) for eps in refinements_epsilon_2])\nplt.grid()\ntikzplotlib.save('convergence_eps_multiple.tex')\n\nplt.figure()\nplt.loglog(np.array(refinements_epsilon_2),np.array(err_eps_ch))\nplt.xlabel(r'$\\epsilon$')\nplt.ylabel(r'max relative error')\nplt.legend([r'$T=$'+str(tmp)+'' for tmp in np.array(refinements_ch2)])\nplt.grid()\ntikzplotlib.save('convergence_Nt_multiple.tex')", "\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 9 20:08:32 2020\n\n@author: ion\n\"\"\"\n\n\n\nimport tt\nimport scipy.io\nimport numpy as np\nfrom CME import CME,Gillespie,CompleteObservations,Observations_grid\nimport matplotlib.pyplot as plt\nimport scipy.integrate\nimport tt.amen\nimport timeit\nimport sys\nimport scipy.interpolate\nimport scipy.stats\nfrom mpl_toolkits import mplot3d\nfrom ttInt import ttInt\nimport datetime\nfrom tt_aux import *\nimport pickle\nimport tikzplotlib\n\ndef lagrange (x ,i , xm ):\n \"\"\"\n Evaluates the i-th Lagrange polynomial at x\n based on grid data xm\n \"\"\"\n n=len( xm )-1\n y=1 \n for j in range ( n+1 ):\n if i!=j:\n y*=( x-xm[j])/( xm[i]-xm[j])\n return y\n\ndef points_weights(a,b,nl):\n pts,ws = np.polynomial.legendre.leggauss(Nl)\n pts = 0.5 * (b-a) * (pts+1) + a\n ws = (b-a) / 2 *ws\n return pts, ws\n\ndef gamma_params(mode,var):\n beta = (mode+np.sqrt(mode**2+4*var))/(2*var)\n alpha = mode * beta + 1\n return alpha,beta\n\n\n\n# species are G,M,P,G*\nrates = np.array([4.0,10.0,1.0,0.2,0.6,1.0])\nPre =np.array( [[1,0,0,0],[0,1,0,0],[0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])\nPost = np.array([[1,1,0,0],[0,1,1,0],[0,0,0,0],[0,0,0,1],[1,0,1,0],[0,0,0,0]])\nProps = [ lambda x: x[:,0], lambda x: x[:,1] , lambda x: x[:,1] , lambda x: x[:,0]*x[:,2] , lambda x: x[:,3], lambda x: x[:,2] ]\n\n\n# construct the model and the CME operator\nN = [4, 32, 128 ,4] # state truncation\n# N = [2, 16, 64 ,2] # state truncation\nInitial = [1,0,0,0]\nmdl_true = CME(N, Pre,Post,rates,Props)\nx0 = np.zeros(N)\nx0[tuple(Initial)] = 1.0\n\nqtt = True\n\n\n# Set up model\nmdl = CME(N, Pre,Post,rates*0+1,Props)\nAtts = mdl.construct_generator_tt(as_list = True)\n\nNl = 64\nmult = 4\nparam_range = [(0,r*5) for r in rates[:-1]]\npts1, ws1 = points_weights(param_range[0][0],param_range[0][1],Nl)\npts2, ws2 = points_weights(param_range[1][0],param_range[1][1],Nl)\npts3, ws3 = points_weights(param_range[2][0],param_range[2][1],Nl)\npts4, ws4 = points_weights(param_range[3][0],param_range[3][1],Nl)\npts5, ws5 = points_weights(param_range[4][0],param_range[4][1],Nl)\n\nA_tt = tt.kron(Atts[0] , tt.kron(tt.matrix(np.diag(pts1)),tt.eye([Nl]*4)) ) \\\n + tt.kron(Atts[1] , tt.kron(tt.kron(tt.eye([Nl]),tt.matrix(np.diag(pts2))),tt.eye([Nl]*3)) ) \\\n + tt.kron(Atts[2] , tt.kron(tt.kron(tt.eye([Nl]*2),tt.matrix(np.diag(pts3))),tt.eye([Nl]*2)) ) \\\n + tt.kron(Atts[3] , tt.kron(tt.kron(tt.eye([Nl]*3),tt.matrix(np.diag(pts4))),tt.eye([Nl]*1)) ) \\\n + tt.kron(Atts[4] , tt.kron(tt.eye([Nl]*4),tt.matrix(np.diag(pts5))) ) \\\n + tt.kron(Atts[5], tt.eye([Nl]*5) )*rates[5]\n\nA_tt = A_tt.round(1e-10,20)\n\nNo = 64\n# Nt = 64\ndT = 0.2\nNbs = 8\ntime_observation = np.arange(No)*dT\n\n\n#%% Get observation\nnp.random.seed(34548)\n\ntime_observation = np.arange(No)*dT\n\nsigma = 0.3\n\n\n# reaction_time,reaction_jumps,reaction_indices = Gillespie(np.array(Initial),time_observation[-1],Pre,Post-Pre,rates)\n# observations = Observations_grid(time_observation, reaction_time, reaction_jumps)\n# observations_noise = observations+np.random.normal(0,sigma,observations.shape)\n\nwith open(r\"3stage2_64.pickle\", \"rb\") as input_file:\n dct = pickle.load(input_file) \n\nNo = dct['time_observation'].size\ntime_observation = dct['time_observation']\nreaction_time = dct['reaction_time']\nreaction_jumps = dct['reaction_jumps']\nreaction_indices = dct['reaction_indices']\nobservations = dct['observations']\nobservations_noise = dct['observations_noise']\ndT = time_observation[1]-time_observation[0]\nsigma = dct['sigma']\nsample_posterior_mcmc = dct['sample']\n\nplt.figure()\nplt.plot(np.repeat(reaction_time,2)[1:],np.repeat(reaction_jumps[:,0],2)[:-1],'b')\nplt.plot(np.repeat(reaction_time,2)[1:],np.repeat(reaction_jumps[:,1],2)[:-1],'r') \nplt.plot(np.repeat(reaction_time,2)[1:],np.repeat(reaction_jumps[:,2],2)[:-1],'g')\nplt.plot(np.repeat(reaction_time,2)[1:],np.repeat(reaction_jumps[:,3],2)[:-1],'c') \nplt.scatter(time_observation,observations_noise[:,0],c='k',marker='x',s=20)\nplt.scatter(time_observation,observations_noise[:,1],c='k',marker='x',s=20)\nplt.scatter(time_observation,observations_noise[:,2],c='k',marker='x',s=20)\nplt.scatter(time_observation,observations_noise[:,3],c='k',marker='x',s=20)\nplt.xlabel('t [s]')\nplt.ylabel('#individuals')\nplt.legend(['G','M','P','G*','observations'])\ntikzplotlib.save('./../results/3stage_45_sample.tex')\nplt.pause(0.05)\n\n\ngamma_pdf = lambda x,a,b : x**(a-1) * np.exp(-b*x)\n\n\n#%% Loops\n# IC\nP = tt.kron(tt.tensor(x0),tt.ones([Nl]*5))\n# Prior \n# alpha_prior, beta_prior = gamma_params(rates,rates / np.array([1000,250,25,900]))\nmu = rates[:-1]*np.array([1.5,1.5,1.5,1.0,1.0])\nvar = rates[:-1] * np.array([4/3, 5, 0.25, 0.04, 0.2])\nalpha_prior = mu**2/var\nbeta_prior = mu/var\nPt = tt.tensor( gamma_pdf(pts1,alpha_prior[0],beta_prior[0]) )\nPt = tt.kron(Pt, tt.tensor( gamma_pdf(pts2,alpha_prior[1],beta_prior[1]) ) )\nPt = tt.kron(Pt, tt.tensor( gamma_pdf(pts3,alpha_prior[2],beta_prior[2]) ) )\nPt = tt.kron(Pt, tt.tensor( gamma_pdf(pts4,alpha_prior[3],beta_prior[3]) ) )\nPt = tt.kron(Pt, tt.tensor( gamma_pdf(pts5,alpha_prior[4],beta_prior[4]) ) )\n\n# Pt = tt.tensor(np.ones([Nl,Nl]))\nWS = tt.kron(tt.kron(tt.kron(tt.tensor(ws1),tt.tensor(ws2)),tt.kron(tt.tensor(ws3),tt.tensor(ws4))) , tt.tensor(ws5) )\nZ = tt.sum(Pt*WS)\nPt = Pt * (1/Z)\nPt_prior = Pt \nP = tt.kron(tt.tensor(x0),Pt)\nP = P * (1/tt.sum(P*tt.kron(tt.ones(N),WS)))\nplt.figure()\nplt.plot(pts1,gamma_pdf(pts1,alpha_prior[0],beta_prior[0]))\nplt.figure()\nplt.plot(pts2,gamma_pdf(pts2,alpha_prior[1],beta_prior[1]))\nplt.figure()\nplt.plot(pts3,gamma_pdf(pts3,alpha_prior[2],beta_prior[2]))\nplt.figure()\nplt.plot(pts4,gamma_pdf(pts4,alpha_prior[3],beta_prior[3]))\nplt.figure()\nplt.plot(pts5,gamma_pdf(pts5,alpha_prior[4],beta_prior[4]))\nplt.pause(0.05)\n\n\n#%% integrator \n\nif qtt:\n A_qtt = ttm2qttm(A_tt)\n fwd_int = ttInt(A_qtt, epsilon = 1e-5, N_max = 8, dt_max = 1.0,method='cheby')\n ws_qtt = tt2qtt(WS)\n P = tt2qtt(P)\nelse:\n fwd_int = ttInt(A_tt, epsilon = 1e-6, N_max = 64, dt_max = 1.0,method='crank–nicolson')\n \nPts = []\nPjs_fwd = []\nprint('Starting...')\ntme_total = datetime.datetime.now()\ntensor_size = 0\nfor i in range(1,No):\n \n y = observations_noise[i,:]\n\n \n PO = tt.tensor(np.exp(-0.5*(y[0]-np.arange(N[0]))**2/sigma**2))\n PO = tt.kron(PO, tt.tensor(np.exp(-0.5*(y[1]-np.arange(N[1]))**2/sigma**2)))\n PO = tt.kron(PO, tt.tensor(np.exp(-0.5*(y[2]-np.arange(N[2]))**2/sigma**2)))\n PO = tt.kron(PO, tt.tensor(np.exp(-0.5*(y[3]-np.arange(N[3]))**2/sigma**2)))\n\n PO = PO * (1/tt.sum(PO))\n PO = tt.kron(PO,tt.ones([Nl]*5))\n \n if qtt: PO = tt2qtt(PO)\n \n print('new observation ',i,'/',No,' at time ',time_observation[i],' ',y)\n \n tme = datetime.datetime.now()\n P = fwd_int.solve(P, dT, intervals = Nbs,qtt = qtt,verb = False)\n tme = datetime.datetime.now() - tme\n \n print('\\tmax rank ',max(P.r))\n Ppred = P\n Ppost = PO * Ppred\n Ppost = Ppost.round(1e-8)\n print('\\tmax rank (after observation) ',max(Ppost.r))\n \n if tensor_size<tt_size(Ppost): tensor_size = tt_size(Ppost)\n \n if not qtt:\n Ppost = Ppost * (1/tt.sum(Ppost * tt.kron(tt.ones(N),WS)))\n Pt = tt.sum(tt.sum(tt.sum(tt.sum(Ppost,0),0),0),0) \n Z = tt.sum(Pt*WS)\n Pt = Pt * (1/Z)\n Pt = Pt.round(1e-10)\n \n else:\n Ppost = Ppost * (1/tt.sum(Ppost * tt.kron(tt.ones(int(np.sum(np.log2(N)))*[2]),ws_qtt)))\n Pt = Ppost\n for i in range(int(np.sum(np.log2(N)))): Pt = tt.sum(Pt,0) \n Z = tt.sum(Pt*ws_qtt)\n Pt = Pt * (1/Z)\n Pt = Pt.round(1e-10)\n \n# print(Pt.r)\n Pts.append(Pt.copy())\n \n if qtt: Pt = qtt2tt(Pt,[Nl]*5)\n \n E1 = tt.sum(Pt * tt.kron(tt.tensor(pts1),tt.ones([Nl]*4)) * WS)\n E2 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]),tt.tensor(pts2)),tt.ones([Nl]*3)) * WS)\n E3 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]*2),tt.tensor(pts3)),tt.ones([Nl]*2)) * WS)\n E4 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]*3),tt.tensor(pts4)),tt.ones([Nl])) * WS)\n E5 = tt.sum(Pt * tt.kron(tt.ones([Nl]*4),tt.tensor(pts5)) * WS)\n E = np.array([E1,E2,E3,E4,E5])\n \n V1 = tt.sum(Pt * tt.kron(tt.tensor(pts1**2),tt.ones([Nl]*4)) * WS) - E1**2\n V2 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]),tt.tensor(pts2**2)),tt.ones([Nl]*3)) * WS) - E2**2\n V3 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]*2),tt.tensor(pts3**2)),tt.ones([Nl]*2)) * WS) - E3**2\n V4 = tt.sum(Pt * tt.kron(tt.kron(tt.ones([Nl]*3),tt.tensor(pts4**2)),tt.ones([Nl])) * WS) - E4**2\n V5 = tt.sum(Pt * tt.kron(tt.ones([Nl]*4),tt.tensor(pts5**2)) * WS) - E5**2\n V = np.array([V1,V2,V3,V4,V5])\n\n print('\\tExpected value computed posterior ' ,E)\n print('\\tVariance computed posterior ' ,V)\n\n P = Ppost\n print('\\tposterior size ',sum([elem.size for elem in P.to_list(P)])*8 / 1000000,' MB')\n print('\\telapsed ',tme)\n \n # P12 = \n\n \n # plt.pause(0.05)\n\nPt_fwd = Pt\n\ntme_total = datetime.datetime.now() - tme_total\n\n\n\n#%% show \nprint()\nprint('Total time ',tme_total)\nprint('Maximum size ',tensor_size*8/1e6,' MB')\nPost = Pt_fwd\nPrior = Pt_prior\n\nnburn = 20000\n\nxs_tt = tt_meshgrid([pts1,pts2,pts3,pts4,pts5])\n\nPt = Pt_fwd\n\nE = np.array([tt.dot(x_tt*Pt,WS) for x_tt in xs_tt])\n\n\nV = np.array([tt.dot(Pt*xs_tt[i],xs_tt[i]*WS)-E[i]**2 for i in range(5)])\n\nimport pyswarm\n\ndef goal_function(thetuta):\n\n L1 = np.array([ lagrange(thetuta[0],i,pts1) for i in range(pts1.size) ] )\n L2 = np.array([ lagrange(thetuta[1],i,pts2) for i in range(pts2.size) ] )\n L3 = np.array([ lagrange(thetuta[2],i,pts3) for i in range(pts3.size) ] )\n L4 = np.array([ lagrange(thetuta[3],i,pts4) for i in range(pts4.size) ] )\n L5 = np.array([ lagrange(thetuta[4],i,pts5) for i in range(pts5.size) ] )\n\n val = tt.dot(Post,tt.mkron(tt.tensor(L1.flatten()), tt.tensor(L2.flatten()), tt.tensor(L3.flatten()), tt.tensor(L4.flatten()), tt.tensor(L5.flatten())))\n return -val\n\ntheta_mode, _ = pyswarm.pso(goal_function, np.array(param_range)[:,0], np.array(param_range)[:,1])\n\nprint('Exact rates: ',rates)\nprint('')\nprint('Expected value computed posterior ' ,E)\nprint('Variance computed posterior ' ,V)\nprint('Computed modes: ',theta_mode)\nprint('')\nprint('Expected MCMC posterior ' ,np.mean(sample_posterior_mcmc[nburn:,:],0))\nprint('Variance MCMC posterior ' ,np.std(sample_posterior_mcmc[nburn:,:],0))\nprint('')\nprint('Relative absolute error exp ',np.abs(np.mean(sample_posterior_mcmc[nburn:,:],0)-E)/E * 100,' %')\nprint('Relative absolute error var ',np.abs(np.std(sample_posterior_mcmc[nburn:,:],0)**2-V)/V * 100,' %')\nprint('')\nprint('Expected value prior ' ,alpha_prior/beta_prior)\nprint('Variance computed prior ' ,alpha_prior/beta_prior/beta_prior)\nprint('')\n\n\nthetas = [np.linspace(param_range[i][0],param_range[i][1]/1.5,256) for i in range(5)]\n\nL1 = np.array([ lagrange(thetas[0],i,pts1) for i in range(pts1.size) ] )\nL2 = np.array([ lagrange(thetas[1],i,pts2) for i in range(pts2.size) ] )\nL3 = np.array([ lagrange(thetas[2],i,pts3) for i in range(pts3.size) ] )\nL4 = np.array([ lagrange(thetas[3],i,pts4) for i in range(pts4.size) ] )\nL5 = np.array([ lagrange(thetas[4],i,pts5) for i in range(pts5.size) ] )\n\n\n\ndef mode_prod(t,Ms):\n cores = t.to_list(t)\n for i in range(len(cores)):\n cores[i] = np.einsum('ijk,jl->ilk',cores[i],Ms[i])\n return tt.tensor().from_list(cores) \n \nPost1 = mode_prod(Post,[L1,ws2.reshape([-1,1]),ws3.reshape([-1,1]),ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPost2 = mode_prod(Post,[ws1.reshape([-1,1]),L2,ws3.reshape([-1,1]),ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPost3 = mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),L3,ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPost4 = mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),ws3.reshape([-1,1]),L4,ws5.reshape([-1,1])]).full().flatten()\nPost5 = mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),ws3.reshape([-1,1]),ws4.reshape([-1,1]),L5]).full().flatten()\n\n\nPrior1 = mode_prod(Prior,[L1,ws2.reshape([-1,1]),ws3.reshape([-1,1]),ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPrior2 = mode_prod(Prior,[ws1.reshape([-1,1]),L2,ws3.reshape([-1,1]),ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPrior3 = mode_prod(Prior,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),L3,ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full().flatten()\nPrior4 = mode_prod(Prior,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),ws3.reshape([-1,1]),L4,ws5.reshape([-1,1])]).full().flatten()\nPrior5 = mode_prod(Prior,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),ws3.reshape([-1,1]),ws4.reshape([-1,1]),L5]).full().flatten()\n\nPriors = [Prior1, Prior2, Prior3, Prior4, Prior5]\n\n\nplt.figure()\nplt.plot(thetas[0],Post1)\nplt.plot(thetas[0],Prior1,'g--')\nplt.hist(sample_posterior_mcmc[nburn:,0],bins=128,density=True,color='c',alpha=0.4)\nplt.axvline(rates[0],c='r',linestyle=':')\nplt.xlabel(r'$\\theta_1$')\nplt.ylabel(r'probability density')\nplt.legend(['Posterior','Prior','True parameter'])\n\nplt.figure()\nplt.plot(thetas[1],Post2)\nplt.plot(thetas[1],Prior2,'g--')\nplt.hist(sample_posterior_mcmc[nburn:,1],bins=128,density=True,color='c',alpha=0.4)\n# plt.scatter(pts2,pts2*0)\nplt.axvline(rates[1],c='r',linestyle=':')\nplt.xlabel(r'$\\theta_2$')\nplt.ylabel(r'probability density')\nplt.legend(['Posterior','Prior','True parameter'])\n\nplt.figure()\nplt.plot(thetas[2],Post3)\nplt.plot(thetas[2],Prior3,'g--')\nplt.hist(sample_posterior_mcmc[nburn:,2],bins=128,density=True,color='c',alpha=0.4)\nplt.axvline(rates[2],c='r',linestyle=':')\nplt.xlabel(r'$\\theta_3$')\nplt.ylabel(r'probability density')\nplt.legend(['Posterior','Prior','True parameter'])\n\nplt.figure()\nplt.plot(thetas[3],Post4)\nplt.plot(thetas[3],Prior4,'g--')\nplt.hist(sample_posterior_mcmc[nburn:,3],bins=128,density=True,color='c',alpha=0.4)\nplt.axvline(rates[3],c='r',linestyle=':')\nplt.xlabel(r'$\\theta_4$') \nplt.ylabel(r'probability density')\nplt.legend(['Posterior','Prior','True parameter'])\n\nplt.figure()\nplt.plot(thetas[4],Post5)\nplt.plot(thetas[4],Prior5,'g--')\nplt.hist(sample_posterior_mcmc[nburn:,4],bins=128,density=True,color='c',alpha=0.4)\nplt.axvline(rates[4],c='r',linestyle=':')\nplt.xlabel(r'$\\theta_4$')\nplt.ylabel(r'probability density')\nplt.legend(['Posterior','Prior','True parameter'])\n\n\n# 2d plots\n\nPost12 = np.squeeze(mode_prod(Post,[L1,L2,ws3.reshape([-1,1]),ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full())\nPost13 = np.squeeze(mode_prod(Post,[L1,ws2.reshape([-1,1]),L3,ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full())\nPost14 = np.squeeze(mode_prod(Post,[L1,ws2.reshape([-1,1]),ws3.reshape([-1,1]),L4,ws5.reshape([-1,1])]).full())\nPost15 = np.squeeze(mode_prod(Post,[L1,ws2.reshape([-1,1]),ws3.reshape([-1,1]),ws4.reshape([-1,1]),L5]).full())\nPost23 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),L2,L3,ws4.reshape([-1,1]),ws5.reshape([-1,1])]).full())\nPost24 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),L2,ws3.reshape([-1,1]),L4,ws5.reshape([-1,1])]).full())\nPost25 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),L2,ws3.reshape([-1,1]),ws4.reshape([-1,1]),L5]).full())\nPost34 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),L3,L4,ws5.reshape([-1,1])]).full())\nPost35 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),L3,ws4.reshape([-1,1]),L5]).full())\nPost45 = np.squeeze(mode_prod(Post,[ws1.reshape([-1,1]),ws2.reshape([-1,1]),ws3.reshape([-1,1]),L4,L5]).full())\n\nPosteriors = [[Post1, Post12.transpose(), Post13.transpose(), Post14.transpose(), Post15.transpose()],\n [Post12, Post2, Post23.transpose(), Post24.transpose(), Post25.transpose()],\n [Post13, Post23, Post3, Post34.transpose(), Post35.transpose()],\n [Post14, Post24, Post34, Post4, Post45.transpose()],\n [Post15, Post25, Post35, Post45, Post5]]\nplt.figure()\n\nk = 0\nfor i in range(5):\n for j in range(5):\n k += 1\n if i==j:\n plt.subplot(5, 5, k)\n plt.plot(thetas[i], Posteriors[i][i]/np.max(Posteriors[i][i])*np.max(thetas[i]))\n plt.plot(thetas[i],Priors[i]/np.max(Posteriors[i][i])*np.max(thetas[i]) ,'g--')\n count, bins = np.histogram(sample_posterior_mcmc[nburn:,i],bins=128,density=True)\n count = count/np.max(Posteriors[i][i])*np.max(thetas[i])\n plt.hist(bins[:-1], bins, weights=count,color='c',alpha=0.4)\n plt.axvline(rates[i],c='r',linestyle=':',linewidth=1)\n \n else:\n plt.subplot(5, 5, k)\n plt.contourf(np.meshgrid(thetas[i],thetas[j])[1],np.meshgrid(thetas[i],thetas[j])[0], Posteriors[i][j],cmap='gray_r',levels =32)\n plt.axvline(rates[j],c='r',linestyle=':',linewidth=1)\n plt.axhline(rates[i],c='r',linestyle=':',linewidth=1)\n \n \n if i==4: plt.xlabel(r'$\\theta_'+str(j+1)+'$')\n if j==0: plt.ylabel(r'$\\theta_'+str(i+1)+'$')\n \n if j>0: plt.yticks([])\n if i<4: plt.xticks([])\n " ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.loglog" ], [ "numpy.meshgrid", "numpy.exp", "numpy.mean", "matplotlib.pyplot.xticks", "numpy.max", "numpy.histogram", "numpy.arange", "numpy.sqrt", "matplotlib.pyplot.subplot", "numpy.array", "numpy.zeros", "matplotlib.pyplot.axhline", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.hist", "numpy.einsum", "matplotlib.pyplot.axvline", "numpy.log2", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.pause", "matplotlib.pyplot.ylabel", "numpy.repeat", "matplotlib.pyplot.scatter", "numpy.linspace", "numpy.polynomial.legendre.leggauss", "numpy.diag" ] ]
KaihuaTang/ResNet-Pytorch-Face-Recognition
[ "73b77ef86ec7e6c0589c60f74e5c10a69b0366a2" ]
[ "ResNet.py" ]
[ "import torch.nn as nn\r\nimport torch\r\nimport math\r\n\r\ndef resnet50(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-50 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet([3, 4, 6, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(torch.load(model.modelPath))\r\n return model\r\n\r\ndef resnet101(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-101 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet([3, 4, 23, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(torch.load(model.modelPath))\r\n return model\r\n\r\nclass ResNet(nn.Module):\r\n \"\"\"\r\n block: A sub module\r\n \"\"\"\r\n def __init__(self, layers, num_classes=1000, model_path=\"model.pkl\"):\r\n super(ResNet, self).__init__()\r\n self.inplanes = 64\r\n self.modelPath = model_path\r\n self.conv1 = nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3,\r\n bias = False)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.relu = nn.ReLU(inplace = True)\r\n self.maxpool = nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1)\r\n self.stack1 = self.make_stack(64, layers[0])\r\n self.stack2 = self.make_stack(128, layers[1], stride=2)\r\n self.stack3 = self.make_stack(256, layers[2], stride=2)\r\n self.stack4 = self.make_stack(512, layers[3], stride=2)\r\n self.avgpool = nn.AvgPool2d(7, stride = 1)\r\n self.fc = nn.Linear(512 * Bottleneck.expansion, num_classes)\r\n # initialize parameters\r\n self.init_param()\r\n\r\n def init_param(self):\r\n # The following is initialization\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2./n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.Linear):\r\n n = m.weight.shape[0] * m.weight.shape[1]\r\n m.weight.data.normal_(0, math.sqrt(2./n))\r\n m.bias.data.zero_()\r\n\r\n def make_stack(self, planes, blocks, stride = 1):\r\n downsample = None\r\n layers = []\r\n \r\n if stride != 1 or self.inplanes != planes * Bottleneck.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.inplanes, planes * Bottleneck.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * Bottleneck.expansion),\r\n )\r\n\r\n layers.append(Bottleneck(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * Bottleneck.expansion\r\n for i in range(1, blocks):\r\n layers.append(Bottleneck(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.maxpool(x)\r\n\r\n x = self.stack1(x)\r\n x = self.stack2(x)\r\n x = self.stack3(x)\r\n x = self.stack4(x)\r\n\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n\r\n return x\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.load" ] ]
teslakit/teslak
[ "1495bfa2364ddbacb802d145b456a35213abfb7c" ]
[ "teslakit/numerical_models/swan/geo.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom math import radians, degrees, sin, cos, asin, acos, sqrt, atan2, pi\nimport numpy as np\n\ndef gc_distance(lat1, lon1, lat2, lon2):\n 'Calculate great circle distance and azimuth (exact. parsed ml)'\n\n # distance\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n a = sin((lat2-lat1)/2)**2 + cos(lat1) * cos(lat2) * sin((lon2-lon1)/2)**2;\n if a < 0: a = 0\n if a > 1: a = 1\n\n r = 1\n rng = r * 2 * atan2(sqrt(a), sqrt(1-a))\n rng = degrees(rng)\n\n # azimuth\n az = atan2(\n cos(lat2) * sin(lon2-lon1),\n cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(lon2-lon1)\n )\n if lat1 <= -pi/2: az = 0\n if lat2 >= pi/2: az = 0\n if lat2 <= -pi/2: az = pi\n if lat1 >= pi/2: az = pi\n\n az = az % (2*pi)\n az = degrees(az)\n\n return rng, az\n\ndef shoot(lon, lat, azimuth, maxdist=None):\n \"\"\"Shooter Function\n Original javascript on http://williams.best.vwh.net/gccalc.htm\n Translated to python by Thomas Lecocq\n \"\"\"\n glat1 = lat * np.pi / 180.\n glon1 = lon * np.pi / 180.\n s = maxdist / 1.852\n faz = azimuth * np.pi / 180.\n\n EPS= 0.00000000005\n if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)):\n print(\"Only N-S courses are meaningful, starting at a pole!\")\n\n a=6378.13/1.852\n f=1/298.257223563\n r = 1 - f\n tu = r * np.tan(glat1)\n sf = np.sin(faz)\n cf = np.cos(faz)\n if (cf==0):\n b=0.\n else:\n b=2. * np.arctan2 (tu, cf)\n\n cu = 1. / np.sqrt(1 + tu * tu)\n su = tu * cu\n sa = cu * sf\n c2a = 1 - sa * sa\n x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))\n x = (x - 2.) / x\n c = 1. - x\n c = (x * x / 4. + 1.) / c\n d = (0.375 * x * x - 1.) * x\n tu = s / (r * a * c)\n y = tu\n c = y + 1\n while (np.abs (y - c) > EPS):\n\n sy = np.sin(y)\n cy = np.cos(y)\n cz = np.cos(b + y)\n e = 2. * cz * cz - 1.\n c = y\n x = e * cy\n y = e + e - 1.\n y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *\n d / 4. - cz) * sy * d + tu\n\n b = cu * cy * cf - su * sy\n c = r * np.sqrt(sa * sa + b * b)\n d = su * cy + cu * sy * cf\n glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi\n c = cu * cy - su * sy * cf\n x = np.arctan2(sy * sf, c)\n c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.\n d = ((e * cy * c + cz) * sy * c + y) * sa\n glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi\n\n baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)\n\n glon2 *= 180./np.pi\n glat2 *= 180./np.pi\n baz *= 180./np.pi\n\n return (glon2, glat2, baz)\n" ]
[ [ "numpy.sin", "numpy.tan", "numpy.arctan2", "numpy.sqrt", "numpy.cos", "numpy.abs" ] ]
Axel13fr/kmall
[ "c48fce9d891e186a99d1fc76255db449b1bdbe48" ]
[ "KMALL/kmall.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA python class to read Kongsberg KMALL data format for swath mapping\nbathymetric echosounders.\n\"\"\"\nimport pandas as pd\nimport sys\nimport numpy as np\nimport struct\nimport datetime\nimport argparse\nimport os\nimport re\nimport bz2\nimport copy\nimport math\nfrom scipy import stats\n\n\nclass kmall():\n \"\"\" A class for reading a Kongsberg KMALL data file. \"\"\"\n\n def __init__(self, filename=None):\n self.verbose = 0\n self.filename = filename\n self.FID = None\n self.file_size = None\n self.header_size = None\n self.Index = None\n\n self.pingDataCheck = None\n self.navDataCheck = None\n \n self.datagram_ident_search = self._build_startbytesearch()\n self.read_methods = [method_name for method_name in dir(self) if method_name[0:4] == 'read']\n \n self.datagram_ident = None\n self.datagram_data = None\n self.read_method = None\n self.eof = False\n\n # According to KMALL documentation:\n self.MAX_DATAGRAM_SIZE = 64000\n self.HEADER_STRUCT_FORMAT = '1I4s2B1H2I'\n self.HEADER_STRUCT_SIZE = struct.calcsize(self.HEADER_STRUCT_FORMAT)\n self.PART_STRUCT_SIZE = struct.calcsize(\"2H\")\n self.HEADER_AND_PART_SIZE =self.HEADER_STRUCT_SIZE + self.PART_STRUCT_SIZE\n # A datagram is made of a header, a partition structure and the data, ended with a 4bytes\n # integer which repeats the message size. The data part to split shall have a max length of:\n self.MAX_DATA_SIZE = self.MAX_DATAGRAM_SIZE - self.HEADER_AND_PART_SIZE - 4\n\n def __del__(self):\n if self.FID:\n self.FID.close()\n\n def decode_datagram(self):\n \"\"\"\n Assumes the file pointer is at the correct position to read the size of the dgram and the identifier\n \n Stores the datagram identifier and the read method as attributes. read method is the name of the class\n method that we would use to read the datagram\n \"\"\"\n self.datagram_ident = None\n self.read_method = None\n if self.FID is None:\n self.OpenFiletoRead()\n if self.file_size is None: # need file size to determine end of file, init if not done already\n filelen = self._initialize_sequential_read(0, 0)\n\n num_bytes = self.FID.read(4)\n dgram = self.FID.read(4)\n if not self.FID.tell() == self.file_size: # end of file\n self.FID.seek(-8, 1)\n is_valid_identifier = self.datagram_ident_search.search(dgram, 0)\n # dgram passes first check, starts with # and is 3 capital letters after\n if is_valid_identifier:\n # now compare dgram identifier with the last three letters of each read method to find the right one\n self.datagram_ident = dgram[-3:].decode()\n read_method = [rm for rm in self.read_methods if rm[-3:] == self.datagram_ident]\n if not len(read_method) > 1:\n self.read_method = read_method[0]\n else:\n raise ValueError('Found multiple valid read methods for {}: {}'.format(dgram, read_method))\n else:\n raise ValueError('Did not find valid datagram identifier: {}'.format(dgram))\n else:\n self.eof = True\n \n def read_datagram(self):\n \"\"\"\n Reads the datagram data and stores the data in self.datagram_data\n Will always translate the installation parameters record (translate=True)\n \n To get the first record:\n \n km = kmall.kmall(r\"C:\\\\Users\\\\zzzz\\\\Downloads\\\\0007_20190513_154724_ASVBEN.kmall\")\n km.decode_datagram()\n km.read_datagram()\n \n Or to get the first MRZ record:\n \n km = kmall.kmall(r\"C:\\\\Users\\\\zzzz\\\\Downloads\\\\0007_20190513_154724_ASVBEN.kmall\")\n while not km.eof:\n km.decode_datagram()\n if km.datagram_ident != 'MRZ':\n km.skip_datagram()\n else:\n km.read_datagram()\n break\n \n \"\"\"\n if self.read_method is not None: # is None when decode fails or is at the end of file\n if self.read_method in ['read_EMdgmIIP', 'read_EMdgmIOP']:\n self.datagram_data = getattr(self, self.read_method)(translate=True)\n else:\n self.datagram_data = getattr(self, self.read_method)()\n\n def skip_datagram(self):\n \"\"\"\n After decoding, use this to skip to the next datagram if you don't want to read this one\n \"\"\"\n if self.read_method is not None:\n format_to_unpack = \"1I\"\n numbytes = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))[0]\n self.FID.seek(numbytes - struct.Struct(format_to_unpack).size, 1)\n\n def read_first_datagram(self, datagram_identifier):\n \"\"\"\n Uses read_datagram to quickly read the first instance of a datagram in a file\n\n datagram_identifier is a 3 letter string identifier, ex: 'IIP' or 'MRZ'\n \"\"\"\n self.datagram_data = None\n self.eof = False\n \n if self.FID is None:\n self.OpenFiletoRead()\n else:\n self.FID.seek(0)\n\n while not self.eof:\n self.decode_datagram()\n if self.datagram_ident != datagram_identifier:\n self.skip_datagram()\n else:\n self.read_datagram()\n break\n if self.datagram_data is None:\n print('Unable to find {} in file'.format(datagram_identifier))\n return self.datagram_data\n\n def read_EMdgmHeader(self):\n \"\"\"\n Read general datagram header.\n :return: A dictionary containing EMdgmHeader ('header').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = self.HEADER_STRUCT_FORMAT\n self.header_size = struct.Struct(format_to_unpack).size\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Datagram length in bytes. The length field at the start (4 bytes) and end\n # of the datagram (4 bytes) are included in the length count.\n dg['numBytesDgm'] = fields[0]\n # Array of length 4. Multibeam datagram type definition, e.g. #AAA\n dg['dgmType'] = fields[1]\n # Datagram version.\n dg['dgmVersion'] = fields[2]\n # System ID. Parameter used for separating datagrams from different echosounders\n # if more than one system is connected to SIS/K-Controller.\n dg['systemID'] = fields[3]\n # Echo sounder identity, e.g. 122, 302, 710, 712, 2040, 2045, 850.\n dg['echoSounderID'] = fields[4]\n # UTC time in seconds + Nano seconds remainder. Epoch 1970-01-01.\n dg['dgtime'] = fields[5] + fields[6] / 1.0E9\n dg['dgdatetime'] = datetime.datetime.utcfromtimestamp(dg['dgtime'])\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmIIP(self, translate=False):\n \"\"\"\n Read #IIP - installation parameters and sensor format settings.\n\n If translate is True, the returned install_txt will be a dict with human readable key: value pairs.\n self.read_datagram will always use translate=True\n\n :return: A dictionary containging EMdgmIIP.\n \"\"\"\n # LMD tested.\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n\n format_to_unpack = \"3H1B\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of body part struct. Used for denoting size of rest of the datagram.\n dg['numBytesCmnPart'] = fields[0]\n # Information. For future use.\n dg['info'] = fields[1]\n # Status. For future use.\n dg['status'] = fields[2]\n\n # Installation settings as text format. Parameters separated by ; and lines separated by , delimiter.\n tmp = self.FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)\n i_text = tmp.decode('UTF-8')\n\n if translate:\n i_text = self.translate_installation_parameters_todict(i_text)\n dg['install_txt'] = i_text\n\n # remainder = total bytes - (header bytes + data bytes)\n expected_unknown_size = dg['header']['numBytesDgm'] - (self.header_size + dg['numBytesCmnPart'])\n\n # Skip unknown fields.\n self.FID.seek(expected_unknown_size, 1)\n\n return dg\n\n def read_EMdgmIOP(self, translate=False):\n \"\"\"\n Read #IOP - runtime parameters, exactly as chosen by operator in K-Controller/SIS menus.\n\n If translate is True, the returned runtime_txt will be a dict with human readable key: value pairs.\n self.read_datagram will always use translate=True\n\n :return: A dictionary containing EMdgmIOP.\n \"\"\"\n # LMD tested.\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n\n format_to_unpack = \"3H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of body part struct. Used for denoting size of rest of the datagram.\n dg['numBytesCmnPart'] = fields[0]\n # Information. For future use.\n dg['info'] = fields[1]\n # Status. For future use.\n dg['status'] = fields[2]\n\n # Runtime parameters as text format. Parameters separated by ; and lines separated by , delimiter.\n # Text strings refer to names in menus of the K-Controller/SIS.\n tmp = self.FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)\n rt_text = tmp.decode('UTF-8')\n # print(rt_text)\n if translate:\n rt_text = self.translate_runtime_parameters_todict(rt_text)\n dg['runtime_txt'] = rt_text\n \n # remainder = total bytes - (header bytes + data bytes)\n expected_unknown_size = dg['header']['numBytesDgm'] - (self.header_size + dg['numBytesCmnPart'])\n\n # Skip unknown fields.\n self.FID.seek(expected_unknown_size, 1)\n\n return dg\n\n def read_EMdgmIB(self):\n \"\"\"\n Read #IB - results from online built-in test (BIST). Definition used for three different BIST datagrams,\n i.e. #IBE (BIST Error report), #IBR (BIST reply) or #IBS (BIST short reply).\n :return: A dictionary containing EMdgmIB.\n \"\"\"\n # LMD added, untested.\n # TODO: Test with file containing BIST.\n print(\"WARNING: You are using an incomplete, untested function: read_EMdgmIB.\")\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n\n format_to_unpack = \"1H3B1b1B\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of body part struct. Used for denoting size of rest of the datagram.\n dg['numBytesCmnPart'] = fields[0]\n # 0 = last subset of the message; 1 = more messages to come\n dg['BISTInfo'] = fields[1]\n # 0 = plain text; 1 = use style sheet\n dg['BISTStyle'] = fields[2]\n # The BIST number executed.\n dg['BISTNumber'] = fields[3]\n # 0 = BIST executed with no errors; positive number = warning; negative number = error\n dg['BISTStatus'] = fields[4]\n\n # Result of the BIST. Starts with a synopsis of the result, followed by detailed descriptions.\n tmp = self.FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)\n bist_text = tmp.decode('UTF-8')\n # print(bist_text)\n dg['BISTText'] = bist_text\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)\n\n '''\n if self.verbose > 2:\n self.print_datagram(dg)\n '''\n\n return dg\n\n def read_EMdgmMpartition(self):\n \"\"\"\n Read multibeam (M) datagrams - data partition info. General for all M datagrams.\n Kongsberg documentation: \"If a multibeam depth datagram (or any other large datagram) exceeds the limit of a\n UDP package (64 kB), the datagram is split into several datagrams =< 64 kB before sending from the PU.\n The parameters in this struct will give information of the partitioning of datagrams. K-Controller/SIS merges\n all UDP packets/datagram parts to one datagram, and store it as one datagram in the .kmall files. Datagrams\n stored in .kmall files will therefore always have numOfDgm = 1 and dgmNum = 1, and may have size > 64 kB.\n The maximum number of partitions from PU is given by MAX_NUM_MWC_DGMS and MAX_NUM_MRZ_DGMS.\"\n :return: A dictionary containing EMdgmMpartition ('partition').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"2H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Number of datagram parts to re-join to get one Multibeam datagram. E.g. 3.\n dg['numOfDgms'] = fields[0]\n # Datagram part number, e.g. 2 (of 3).\n dg['dgmNum'] = fields[1]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMbody(self):\n \"\"\"\n Read multibeam (M) datagrams - body part. Start of body of all M datagrams.\n Contains information of transmitter and receiver used to find data in datagram.\n :return: A dictionary containing EMdgmMbody ('cmnPart').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"2H8B\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Used for denoting size of current struct.\n dg['numBytesCmnPart'] = fields[0]\n # A ping is made of one or more RX fans and one or more TX pulses transmitted at approximately the same time.\n # Ping counter is incremented at every set of TX pulses\n # (one or more pulses transmitted at approximately the same time).\n dg['pingCnt'] = fields[1]\n # Number of rx fans per ping gives information of how many #MRZ datagrams are generated per ping.\n # Combined with swathsPerPing, number of datagrams to join for a complete swath can be found.\n dg['rxFansPerPing'] = fields[2]\n # Index 0 is the aft swath, port side.\n dg['rxFanIndex'] = fields[3]\n # Number of swaths per ping. A swath is a complete set of across track data.\n # A swath may contain several transmit sectors and RX fans.\n dg['swathsPerPing'] = fields[4]\n # Alongship index for the location of the swath in multi swath mode. Index 0 is the aftmost swath.\n dg['swathAlongPosition'] = fields[5]\n # Transducer used in this tx fan. Index: 0 = TRAI_TX1; 1 = TRAI_TX2 etc.\n dg['txTransducerInd'] = fields[6]\n # Transducer used in this rx fan. Index: 0 = TRAI_RX1; 1 = TRAI_RX2 etc.\n dg['rxTransducerInd'] = fields[7]\n # Total number of receiving units.\n dg['numRxTransducers'] = fields[8]\n # For future use. 0 - current algorithm, >0 - future algorithms.\n dg['algorithmType'] = fields[9]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ_pingInfo(self, dgmVersion=0):\n \"\"\"\n Read #MRZ - ping info. Information on vessel/system level,\n i.e. information common to all beams in the current ping.\n :return: A dictionary containing EMdgmMRZ_pingInfo ('pingInfo').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack_a = \"2H1f6B1H11f2h2B1H1I3f2H1f2H6f4B\"\n fields = struct.unpack(format_to_unpack_a, self.FID.read(struct.Struct(format_to_unpack_a).size))\n\n # Number of bytes in current struct.\n dg['numBytesInfoData'] = fields[0]\n # Byte alignment.\n dg['padding0'] = fields[1]\n\n # # # # # Ping Info # # # # #\n # Ping rate. Filtered/averaged.\n dg['pingRate_Hz'] = fields[2]\n # 0 = Eqidistance; 1 = Equiangle; 2 = High density\n dg['beamSpacing'] = fields[3]\n # Depth mode. Describes setting of depth in K-Controller. Depth mode influences the PUs choice of pulse length\n # and pulse type. If operator has manually chosen the depth mode to use, this is flagged by adding 100 to the\n # mode index. 0 = Very Shallow; 1 = Shallow; 2 = Medium; 3 = Deep; 4 = Deeper; 5 = Very Deep; 6 = Extra Deep;\n # 7 = Extreme Deep\n dg['depthMode'] = fields[4]\n # For advanced use when depth mode is set manually. 0 = Sub depth mode is not used (when depth mode is auto).\n dg['subDepthMode'] = fields[5]\n # Achieved distance between swaths, in percent relative to required swath distance.\n # 0 = function is not used; 100 = achieved swath distance equals required swath distance.\n dg['distanceBtwSwath'] = fields[6]\n # Detection mode. Bottom detection algorithm used. 0 = normal; 1 = waterway; 2 = tracking;\n # 3 = minimum depth; If system running in simulation mode: detectionmode + 100 = simulator.\n dg['detectionMode'] = fields[7]\n # Pulse forms used for current swath. 0 = CW; 1 = mix; 2 = FM\n dg['pulseForm'] = fields[8]\n # TODO: Kongsberg documentation lists padding1 as \"Ping rate. Filtered/averaged.\" This appears to be incorrect.\n # In testing, padding1 prints all zeros. I'm assuming this is for byte alignment, as with other 'padding' cases.\n # Byte alignment.\n dg['padding1'] = fields[9]\n # Ping frequency in hertz. E.g. for EM 2040: 200 000 Hz, 300 000 Hz or 400 000 Hz.\n # If values is less than 100, it refers to a code defined below:\n # -1 = Not used; 0 = 40 - 100 kHz, EM 710, EM 712; 1 = 50 - 100 kHz, EM 710, EM 712;\n # 2 = 70 - 100 kHz, EM 710, EM 712; 3 = 50 kHz, EM 710, EM 712; 4 = 40 kHz, EM 710, EM 712;\n # 180 000 - 400 000 = 180-400 kHz, EM 2040C (10 kHz steps)\n # 200 000 = 200 kHz, EM 2040; 300 000 = 300 kHz, EM 2040; 400 000 = 400 kHz, EM 2040\n dg['frequencyMode_Hz'] = fields[10]\n # Lowest centre frequency of all sectors in this swath. Unit hertz. E.g. for EM 2040: 260 000 Hz.\n dg['freqRangeLowLim_Hz'] = fields[11]\n # Highest centre frequency of all sectors in this swath. Unit hertz. E.g. for EM 2040: 320 000 Hz.\n dg['freqRangeHighLim_Hz'] = fields[12]\n # Total signal length of the sector with longest tx pulse. Unit second.\n dg['maxTotalTxPulseLength_sec'] = fields[13]\n # Effective signal length (-3dB envelope) of the sector with longest effective tx pulse. Unit second.\n dg['maxEffTxPulseLength_sec'] = fields[14]\n # Effective bandwidth (-3dB envelope) of the sector with highest bandwidth.\n dg['maxEffTxBandWidth_Hz'] = fields[15]\n # Average absorption coefficient, in dB/km, for vertical beam at current depth. Not currently in use.\n dg['absCoeff_dBPerkm'] = fields[16]\n # Port sector edge, used by beamformer, Coverage is refered to z of SCS.. Unit degree.\n dg['portSectorEdge_deg'] = fields[17]\n # Starboard sector edge, used by beamformer. Coverage is referred to z of SCS. Unit degree.\n dg['starbSectorEdge_deg'] = fields[18]\n # Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit degree.\n dg['portMeanCov_deg'] = fields[19]\n # Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit degree.\n dg['stbdMeanCov_deg'] = fields[20]\n # Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit meter.\n dg['portMeanCov_m'] = fields[21]\n # Coverage achieved, corrected for raybending. Unit meter.\n dg['starbMeanCov_m'] = fields[22]\n # Modes and stabilisation settings as chosen by operator. Each bit refers to one setting in K-Controller.\n # Unless otherwise stated, default: 0 = off, 1 = on/auto.\n # Bit: 1 = Pitch stabilisation; 2 = Yaw stabilisation; 3 = Sonar mode; 4 = Angular coverage mode;\n # 5 = Sector mode; 6 = Swath along position (0 = fixed, 1 = dynamic); 7-8 = Future use\n dg['modeAndStabilisation'] = fields[23]\n # Filter settings as chosen by operator. Refers to settings in runtime display of K-Controller.\n # Each bit refers to one filter setting. 0 = off, 1 = on/auto.\n # Bit: 1 = Slope filter; 2 = Aeration filter; 3 = Sector filter;\n # 4 = Interference filter; 5 = Special amplitude detect; 6-8 = Future use\n dg['runtimeFilter1'] = fields[24]\n # Filter settings as chosen by operator. Refers to settings in runtime display of K-Controller. 4 bits used per filter.\n # Bits: 1-4 = Range gate size: 0 = small, 1 = normal, 2 = large\n # 5-8 = Spike filter strength: 0 = off, 1= weak, 2 = medium, 3 = strong\n # 9-12 = Penetration filter: 0 = off, 1 = weak, 2 = medium, 3 = strong\n # 13-16 = Phase ramp: 0 = short, 1 = normal, 2 = long\n dg['runtimeFilter2'] = fields[25]\n # Pipe tracking status. Describes how angle and range of top of pipe is determined.\n # 0 = for future use; 1 = PU uses guidance from SIS.\n dg['pipeTrackingStatus'] = fields[26]\n # Transmit array size used. Direction along ship. Unit degree.\n dg['transmitArraySizeUsed_deg'] = fields[27]\n # Receiver array size used. Direction across ship. Unit degree.\n dg['receiveArraySizeUsed_deg'] = fields[28]\n # Operator selected tx power level re maximum. Unit dB. E.g. 0 dB, -10 dB, -20 dB.\n dg['transmitPower_dB'] = fields[29]\n # For marine mammal protection. The parameters describes time remaining until max source level (SL) is achieved.\n # Unit %.\n dg['SLrampUpTimeRemaining'] = fields[30]\n # Byte alignment.\n dg['padding2'] = fields[31]\n # Yaw correction angle applied. Unit degree.\n dg['yawAngle_deg'] = fields[32]\n\n # # # # # Info of Tx Sector Data Block # # # # #\n # Number of transmit sectors. Also called Ntx in documentation. Denotes how\n # many times the struct EMdgmMRZ_txSectorInfo is repeated in the datagram.\n dg['numTxSectors'] = fields[33]\n # Number of bytes in the struct EMdgmMRZ_txSectorInfo, containing tx sector\n # specific information. The struct is repeated numTxSectors times.\n dg['numBytesPerTxSector'] = fields[34]\n\n # # # # # Info at Time of Midpoint of First Tx Pulse # # # # #\n # Heading of vessel at time of midpoint of first tx pulse. From active heading sensor.\n dg['headingVessel_deg'] = fields[35]\n # At time of midpoint of first tx pulse. Value as used in depth calculations.\n # Source of sound speed defined by user in K-Controller.\n dg['soundSpeedAtTxDepth_mPerSec'] = fields[36]\n # Tx transducer depth in meters below waterline, at time of midpoint of first tx pulse.\n # For the tx array (head) used by this RX-fan. Use depth of TX1 to move depth point (XYZ)\n # from water line to transducer (reference point of old datagram format).\n dg['txTransducerDepth_m'] = fields[37]\n # Distance between water line and vessel reference point in meters. At time of midpoint of first tx pulse.\n # Measured in the surface coordinate system (SCS).See Coordinate systems 'Coordinate systems' for definition.\n # Used this to move depth point (XYZ) from vessel reference point to waterline.\n dg['z_waterLevelReRefPoint_m'] = fields[38]\n # Distance between *.all reference point and *.kmall reference point (vessel referenece point) in meters,\n # in the surface coordinate system, at time of midpoint of first tx pulse. Used this to move depth point (XYZ)\n # from vessel reference point to the horisontal location (X,Y) of the active position sensor's reference point\n # (old datagram format).\n dg['x_kmallToall_m'] = fields[39]\n # Distance between *.all reference point and *.kmall reference point (vessel referenece point) in meters,\n # in the surface coordinate system, at time of midpoint of first tx pulse. Used this to move depth point (XYZ)\n # from vessel reference point to the horisontal location (X,Y) of the active position sensor's reference point\n # (old datagram format).\n dg['y_kmallToall_m'] = fields[40]\n # Method of position determination from position sensor data:\n # 0 = last position received; 1 = interpolated; 2 = processed.\n dg['latLongInfo'] = fields[41]\n # Status/quality for data from active position sensor. 0 = valid data, 1 = invalid data, 2 = reduced performance\n dg['posSensorStatus'] = fields[42]\n # Status/quality for data from active attitude sensor. 0 = valid data, 1 = invalid data, 2 = reduced performance\n dg['attitudeSensorStatus'] = fields[43]\n # Padding for byte alignment.\n dg['padding3'] = fields[44]\n\n # For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*\n if dgmVersion == 0:\n format_to_unpack_b = \"2d1f\"\n elif dgmVersion == 2:\n format_to_unpack_b = \"2d2f2B1H\"\n else:\n print(\"Unsupported dgmVersion for MRZ : %s\",str(dgmVersion))\n\n fields = struct.unpack(format_to_unpack_b, self.FID.read(struct.Struct(format_to_unpack_b).size))\n\n # Latitude (decimal degrees) of vessel reference point at time of midpoint of first tx pulse.\n # Negative on southern hemisphere. Parameter is set to define UNAVAILABLE_LATITUDE if not available.\n dg['latitude_deg'] = fields[0]\n # Longitude (decimal degrees) of vessel reference point at time of midpoint of first tx pulse.\n # Negative on western hemisphere. Parameter is set to define UNAVAILABLE_LONGITUDE if not available.\n dg['longitude_deg'] = fields[1]\n # Height of vessel reference point above the ellipsoid, derived from active GGA sensor.\n # ellipsoidHeightReRefPoint_m is GGA height corrected for motion and installation offsets\n # of the position sensor.\n dg['ellipsoidHeightReRefPoint_m'] = fields[2]\n\n if dgmVersion == 2:\n # Backscatter offset set in the installation menu\n dg['bsCorrectionOffset_dB'] = fields[3]\n # Beam intensity data corrected as seabed image data (Lambert and normal incidence corrections)\n dg['lambertsLawApplied'] = fields[4]\n # Ice window installed\n dg['iceWindow'] = fields[5]\n # Sets status for active modes.\n # Bit | Modes | Setting\n # 1 | EM MultiFrequency Mode | 0 = not active, 1 = active\n # 2-16 | Not in use | NA \n dg['activeModes'] = fields[6]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesInfoData'] - struct.Struct(format_to_unpack_a).size\n - struct.Struct(format_to_unpack_b).size, 1)\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ_txSectorInfo(self, dgmVersion=0):\n \"\"\"\n Read #MRZ - sector info. Information specific to each transmitting sector.\n sectorInfo is repeated numTxSectors (Ntx)- times in datagram.\n :return: A dictionary containing EMdgmMRZ_txSectorInfo ('sectorInfo').\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD tested.\n\n dg = {}\n if dgmVersion == 0:\n format_to_unpack = \"4B7f2B1H\"\n elif dgmVersion == 2:\n format_to_unpack = \"4B7f2B1H3f\"\n\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # TX sector index number, used in the sounding section. Starts at 0.\n dg['txSectorNumb'] = fields[0]\n # TX array number. Single TX, txArrNumber = 0.\n dg['txArrNumber'] = fields[1]\n # Default = 0. E.g. for EM2040, the transmitted pulse consists of three sectors, each transmitted from separate\n # txSubArrays. Orientation and numbers are relative the array coordinate system. Sub array installation offsets\n # can be found in the installation datagram, #IIP. 0 = Port subarray; 1 = middle subarray; 2 = starboard subarray\n dg['txSubArray'] = fields[2]\n # Byte alignment.\n dg['padding0'] = fields[3]\n # Transmit delay of the current sector/subarray. Delay is the time from the midpoint of the current transmission\n # to midpoint of the first transmitted pulse of the ping, i.e. relative to the time used in the datagram header.\n dg['sectorTransmitDelay_sec'] = fields[4]\n # Along ship steering angle of the TX beam (main lobe of transmitted pulse),\n # angle referred to transducer array coordinate system. Unit degree.\n dg['tiltAngleReTx_deg'] = fields[5]\n # Unit dB re 1 microPascal.\n dg['txNominalSourceLevel_dB'] = fields[6]\n # 0 = no focusing applied.\n dg['txFocusRange_m'] = fields[7]\n # Centre frequency. Unit hertz.\n dg['centreFreq_Hz'] = fields[8]\n # FM mode: effective bandwidth; CW mode: 1/(effective TX pulse length)\n dg['signalBandWidth_Hz'] = fields[9]\n # Also called pulse length. Unit second.\n dg['totalSignalLength_sec'] = fields[10]\n # Transmit pulse is shaded in time (tapering). Amplitude shading in %.\n # cos2- function used for shading the TX pulse in time.\n dg['pulseShading'] = fields[11]\n # Transmit signal wave form. 0 = CW; 1 = FM upsweep; 2 = FM downsweep.\n dg['signalWaveForm'] = fields[12]\n # Byte alignment.\n dg['padding1'] = fields[13]\n\n if dgmVersion == 2:\n # 20 log(Measured high voltage power level at TX pulse / Nominal high voltage power level). \n # This parameter will also include the effect of user selected transmit power reduction (transmitPower_dB) and mammal protection. \n # Actual SL = txNominalSourceLevel_dB + highVoltageLevel_dB. Unit dB.\n dg['highVoltageLevel_dB'] = fields[14]\n # Backscatter correction added in sector tracking mode. Unit dB.\n dg['sectorTrackingCorr_dB'] = fields[15]\n # Signal length used for backscatter footprint calculation. This compensates for the TX pulse tapering and the RX filter bandwidths. \n # Unit second.\n dg['effectiveSignalLength_sec'] = fields[16]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ_rxInfo(self):\n \"\"\"\n Read #MRZ - receiver specific information. Information specific to the receiver unit used in this swath.\n :return: A dictionary containing EMdgmMRZ_rxInfo ('rxInfo').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"4H4f4H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Bytes in current struct.\n dg['numBytesRxInfo'] = fields[0]\n # Maximum number of main soundings (bottom soundings) in this datagram, extra detections\n # (soundings in water column) excluded. Also referred to as Nrx. Denotes how many bottom points\n # (or loops) given in the struct EMdgmMRZ_sounding_def.\n dg['numSoundingsMaxMain'] = fields[1]\n # Number of main soundings of valid quality. Extra detections not included.\n dg['numSoundingsValidMain'] = fields[2]\n # Bytes per loop of sounding (per depth point), i.e. bytes per loops of the struct EMdgmMRZ_sounding_def.\n dg['numBytesPerSounding'] = fields[3]\n # Sample frequency divided by water column decimation factor. Unit hertz.\n dg['WCSampleRate'] = fields[4]\n # Sample frequency divided by seabed image decimation factor. Unit hertz.\n dg['seabedImageSampleRate'] = fields[5]\n # Backscatter level, normal incidence. Unit dB.\n dg['BSnormal_dB'] = fields[6]\n # Backscatter level, oblique incidence. Unit dB.\n dg['BSoblique_dB'] = fields[7]\n # extraDetectionAlarmFlag = sum of alarm flags. Range 0-10.\n dg['extraDetectionAlarmFlag'] = fields[8]\n # Sum of extradetection from all classes. Also refered to as Nd.\n dg['numExtraDetections'] = fields[9]\n # Range 0-10.\n dg['numExtraDetectionClasses'] = fields[10]\n # Number of bytes in the struct EMdgmMRZ_extraDetClassInfo_def.\n dg['numBytesPerClass'] = fields[11]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesRxInfo'] - struct.Struct(format_to_unpack).size, 1)\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ_extraDetClassInfo(self):\n \"\"\"\n Read #MRZ - extra detection class information. To be entered in loop numExtraDetectionClasses times.\n :return: A dictionary containing EMdgmMRZ_extra DetClassInfo ('extraDetClassInfo').\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # TODO: Need to test with file containing extra detections.\n\n dg = {}\n format_to_unpack = \"1H1b1B\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Number of extra detection in this class.\n dg['numExtraDetInClass'] = fields[0]\n # Byte alignment.\n dg['padding'] = fields[1]\n # 0 = no alarm; 1 = alarm.\n dg['alarmFlag'] = fields[2]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ_sounding(self):\n \"\"\"\n Read #MRZ - data for each sounding, e.g. XYZ, reflectivity, two way travel time etc. Also contains\n information necessary to read seabed image following this datablock (number of samples in SI etc.).\n To be entered in loop (numSoundingsMaxMain + numExtraDetections) times.\n :return: A dictionary containing EMdgmMRZ_sounding ('sounding').\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"1H8B1H6f2H18f4H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Sounding index. Cross reference for seabed image.\n # Valid range: 0 to (numSoundingsMaxMain+numExtraDetections)-1, i.e. 0 - (Nrx+Nd)-1.\n dg['soundingIndex'] = fields[0]\n # Transmitting sector number. Valid range: 0-(Ntx-1), where Ntx is numTxSectors.\n dg['txSectorNumb'] = fields[1]\n\n # # # # # D E T E C T I O N I N F O # # # # #\n # Bottom detection type. Normal bottom detection, extra detection, or rejected.\n # 0 = normal detection; 1 = extra detection; 2 = rejected detection\n # In case 2, the estimated range has been used to fill in amplitude samples in the seabed image datagram.\n dg['detectionType'] = fields[2]\n # Method for determining bottom detection, e.g. amplitude or phase.\n # 0 = no valid detection; 1 = amplitude detection; 2 = phase detection; 3-15 for future use.\n dg['detectionMethod'] = fields[3]\n # For Kongsberg use.\n dg['rejectionInfo1'] = fields[4]\n # For Kongsberg use.\n dg['rejectionInfo2'] = fields[5]\n # For Kongsberg use.\n dg['postProcessingInfo'] = fields[6]\n # Only used by extra detections. Detection class based on detected range.\n # Detection class 1 to 7 corresponds to value 0 to 6. If the value is between 100 and 106,\n # the class is disabled by the operator. If the value is 107, the detections are outside the treshhold limits.\n dg['detectionClass'] = fields[7]\n # Detection confidence level.\n dg['detectionConfidenceLevel'] = fields[8]\n # Byte alignment.\n dg['padding'] = fields[9]\n # Unit %. rangeFactor = 100 if main detection.\n dg['rangeFactor'] = fields[10]\n # Estimated standard deviation as % of the detected depth. Quality Factor (QF) is\n # calculated from IFREMER Quality Factor (IFQ): QF=Est(dz)/z=100*10^-IQF\n dg['qualityFactor'] = fields[11]\n # Vertical uncertainty, based on quality factor (QF, qualityFactor).\n dg['detectionUncertaintyVer_m'] = fields[12]\n # Horizontal uncertainty, based on quality factor (QF, qualityFactor).\n dg['detectionUncertaintyHor_m'] = fields[13]\n # Detection window length. Unit second. Sample data range used in final detection.\n dg['detectionWindowLength_sec'] = fields[14]\n # Measured echo length. Unit second.\n dg['echoLength_sec'] = fields[15]\n\n # # # # # W A T E R C O L U M N P A R A M E T E R S # # # # #\n # Water column beam number. Info for plotting soundings together with water column data.\n dg['WCBeamNumb'] = fields[16]\n # Water column range. Range of bottom detection, in samples.\n dg['WCrange_samples'] = fields[17]\n # Water column nominal beam angle across. Re vertical.\n dg['WCNomBeamAngleAcross_deg'] = fields[18]\n\n # # # # # REFLECTIVITY DATA (BACKSCATTER (BS) DATA) # # # # #\n # Mean absorption coefficient, alfa. Used for TVG calculations. Value as used. Unit dB/km.\n dg['meanAbsCoeff_dbPerkm'] = fields[19]\n # Beam intensity, using the traditional KM special TVG.\n dg['reflectivity1_dB'] = fields[20]\n # Beam intensity (BS), using TVG = X log(R) + 2 alpha R. X (operator selected) is common to all beams in\n # datagram. Alpha (variabel meanAbsCoeff_dBPerkm) is given for each beam (current struct).\n # BS = EL - SL - M + TVG + BScorr, where EL= detected echo level (not recorded in datagram),\n # and the rest of the parameters are found below.\n dg['reflectivity2_dB'] = fields[21]\n # Receiver sensitivity (M), in dB, compensated for RX beampattern\n # at actual transmit frequency at current vessel attitude.\n dg['receiverSensitivityApplied_dB'] = fields[22]\n # Source level (SL) applied (dB): SL = SLnom + SLcorr, where SLnom = Nominal maximum SL,\n # recorded per TX sector (variable txNominalSourceLevel_dB in struct EMdgmMRZ_txSectorInfo_def) and\n # SLcorr = SL correction relative to nominal TX power based on measured high voltage power level and\n # any use of digital power control. SL is corrected for TX beampattern along and across at actual transmit\n # frequency at current vessel attitude.\n dg['sourceLevelApplied_dB'] = fields[23]\n # Backscatter (BScorr) calibration offset applied (default = 0 dB).\n dg['BScalibration_dB'] = fields[24]\n # Time Varying Gain (TVG) used when correcting reflectivity.\n dg['TVG_dB'] = fields[25]\n\n # # # # # R A N G E A N D A N G L E D A T A # # # # #\n # Angle relative to the RX transducer array, except for ME70,\n # where the angles are relative to the horizontal plane.\n dg['beamAngleReRx_deg'] = fields[26]\n # Applied beam pointing angle correction.\n dg['beamAngleCorrection_deg'] = fields[27]\n # Two way travel time (also called range). Unit second.\n dg['twoWayTravelTime_sec'] = fields[28]\n # Applied two way travel time correction. Unit second.\n dg['twoWayTravelTimeCorrection_sec'] = fields[29]\n\n # # # # # G E O R E F E R E N C E D D E P T H P O I N T S # # # # #\n # Distance from vessel reference point at time of first tx pulse in ping, to depth point.\n # Measured in the surface coordinate system (SCS), see Coordinate systems for definition. Unit decimal degrees.\n dg['deltaLatitude_deg'] = fields[30]\n # Distance from vessel reference point at time of first tx pulse in ping, to depth point.\n # Measured in the surface coordinate system (SCS), see Coordinate systems for definition. Unit decimal degrees.\n dg['deltaLongitude_deg'] = fields[31]\n # Vertical distance z. Distance from vessel reference point at time of first tx pulse in ping, to depth point.\n # Measured in the surface coordinate system (SCS), see Coordinate systems for definition.\n dg['z_reRefPoint_m'] = fields[32]\n # Horizontal distance y. Distance from vessel reference point at time of first tx pulse in ping, to depth point.\n # Measured in the surface coordinate system (SCS), see Coordinate systems for definition.\n dg['y_reRefPoint_m'] = fields[33]\n # Horizontal distance x. Distance from vessel reference point at time of first tx pulse in ping, to depth point.\n # Measured in the surface coordinate system (SCS), see Coordinate systems for definition.\n dg['x_reRefPoint_m'] = fields[34]\n # Beam incidence angle adjustment (IBA) unit degree.\n dg['beamIncAngleAdj_deg'] = fields[35]\n # For future use.\n dg['realTimeCleanInfo'] = fields[36]\n\n # # # # # S E A B E D I M A G E # # # # #\n # Seabed image start range, in sample number from transducer. Valid only for the current beam.\n dg['SIstartRange_samples'] = fields[37]\n # Seabed image. Number of the centre seabed image sample for the current beam.\n dg['SIcentreSample'] = fields[38]\n # Seabed image. Number of range samples from the current beam, used to form the seabed image.\n dg['SInumSamples'] = fields[39]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmMRZ(self):\n \"\"\"\n A method to read a full #MRZ datagram.\n Kongsberg documentation: \"The datagram also contains seabed image data. Depths points (x,y,z) are calculated\n in meters, georeferred to the position of the vessel reference point at the time of the first transmitted pulse\n of the ping. The depth point coordinates x and y are in the surface coordinate system (SCS), and are also given\n as delta latitude and delta longitude, referred to origo of the VCS/SCS, at the time of the midpoint of the\n first transmitted pulse of the ping (equals time used in the datagram header timestamp). See Coordinate systems\n for introduction to spatial reference points and coordinate systems. Reference points are also described in\n Reference points and offsets.\"\n :return: A dictionary including full MRZ datagram information including EMdgmHeader ('header'), EMdgmMpartition\n ('Mpart'), EMdgmbody ('Mbody'), EMdgmMRZ_pingInfo ('pingInfo'), EMdgmMRZ_txSectorInfo ('txSectorInfo'),\n EMdgmMRZ_rxInfo ('rxinfo'), EMdgmMRZ_sounding ('soundings'), and ('SIsample_desidB').\n \"\"\"\n # LMD tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['partition'] = self.read_EMdgmMpartition()\n dg['cmnPart'] = self.read_EMdgmMbody()\n\n # The dgmVersion is an integer that specifies the KMall format version per message.\n # Here, the format version for the MRZ message is read.\n dgmVersion = dg['dgmVersion']\n dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo(dgmVersion)\n\n # Read TX sector info for each sector\n txSectorInfo = []\n for sector in range(dg['pingInfo']['numTxSectors']):\n txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo(dgmVersion))\n dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)\n\n # Read reInfo\n dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()\n\n # Read extra detect metadata if they exist.\n extraDetClassInfo = []\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())\n dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)\n\n # Read the sounding data.\n soundings = []\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n soundings.append(self.read_EMdgmMRZ_sounding())\n Nseabedimage_samples += soundings[record]['SInumSamples']\n dg['sounding'] = self.listofdicts2dictoflists(soundings)\n\n # Read the seabed imagery.\n # Seabed image sample amplitude, in 0.1 dB. Actual number of seabed image samples (SIsample_desidB) to be found\n # by summing parameter SInumSamples in struct EMdgmMRZ_sounding_def for all beams. Seabed image data are raw\n # beam sample data taken from the RX beams. The data samples are selected based on the bottom detection ranges.\n # First sample for each beam is the one with the lowest range. The centre sample from each beam is geo\n # referenced (x, y, z data from the detections). The BS corrections applied at the centre sample are the same\n # as used for reflectivity2_dB (struct EMdgmMRZ_sounding_def).\n format_to_unpack = str(Nseabedimage_samples) + \"h\"\n\n dg['SIsample_desidB'] = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmMWCtxInfo(self):\n \"\"\"\n Read #MWC - data block 1: transmit sectors, general info for all sectors.\n :return: A dictionary containing EMdgmMWCtxInfo.\n \"\"\"\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"3H1h1f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Number of bytes in current struct.\n dg['numBytesTxInfo'] = fields[0]\n # Number of transmitting sectors (Ntx). Denotes the number of times\n # the struct EMdgmMWCtxSectorData is repeated in the datagram.\n dg['numTxSectors'] = fields[1]\n # Number of bytes in EMdgmMWCtxSectorData.\n dg['numBytesPerTxSector'] = fields[2]\n # Byte alignment.\n dg['padding'] = fields[3]\n # Heave at vessel reference point, at time of ping, i.e. at midpoint of first tx pulse in rxfan.\n dg['heave_m'] = fields[4]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesTxInfo'] - struct.Struct(format_to_unpack).size, 1)\n\n '''\n if self.verbose > 2:\n self.print_datagram(dg)\n '''\n\n return dg\n\n def read_EMdgmMWCtxSectorData(self):\n \"\"\"\n Read #MWC - data block 1: transmit sector data, loop for all i = numTxSectors.\n :return: A dictionary containing EMdgmMWCtxSectorData\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"3f1H1h\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Along ship steering angle of the TX beam (main lobe of transmitted pulse), angle referred to transducer face.\n # Angle as used by beamformer (includes stabilisation). Unit degree.\n dg['tiltAngleReTx_deg'] = fields[0]\n # Centre frequency of current sector. Unit hertz.\n dg['centreFreq_Hz'] = fields[1]\n # Corrected for frequency, sound velocity and tilt angle. Unit degree.\n dg['txBeamWidthAlong_deg'] = fields[2]\n # Transmitting sector number.\n dg['txSectorNum'] = fields[3]\n # Byte alignment.\n dg['padding'] = fields[4]\n\n '''\n if self.verbose > 2:\n self.print_datagram(dg)\n '''\n\n return dg\n\n def read_EMdgmMWCrxInfo(self):\n \"\"\"\n Read #MWC - data block 2: receiver, general info.\n :return: A dictionary containing EMdgmMWCrxInfo.\n \"\"\"\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"2H3B1b2f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Number of bytes in current struct.\n dg['numBytesRxInfo'] = fields[0]\n # Number of beams in this datagram (Nrx).\n dg['numBeams'] = fields[1]\n # Bytes in EMdgmMWCrxBeamData struct, excluding sample amplitudes (which have varying lengths).\n dg['numBytesPerBeamEntry'] = fields[2]\n # 0 = off; 1 = low resolution; 2 = high resolution.\n dg['phaseFlag'] = fields[3]\n # Time Varying Gain function applied (X). X log R + 2 Alpha R + OFS + C, where X and C is documented\n # in #MWC datagram. OFS is gain offset to compensate for TX source level, receiver sensitivity etc.\n dg['TVGfunctionApplied'] = fields[4]\n # Time Varying Gain offset used (OFS), unit dB. X log R + 2 Alpha R + OFS + C, where X and C is documented\n # in #MWC datagram. OFS is gain offset to compensate for TX source level, receiver sensitivity etc.\n dg['TVGoffset_dB'] = fields[5]\n # The sample rate is normally decimated to be approximately the same as the bandwidth of the transmitted pulse.\n # Unit hertz.\n dg['sampleFreq_Hz'] = fields[6]\n # Sound speed at transducer, unit m/s.\n dg['soundVelocity_mPerSec'] = fields[7]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesRxInfo'] - struct.Struct(format_to_unpack).size, 1)\n '''\n if self.verbose > 2:\n self.print_datagram(dg)\n '''\n\n return dg\n\n def read_EMdgmMWCrxBeamData(self, dgmVersion=0):\n \"\"\"\n Read #MWC - data block 2: receiver, specific info for each beam.\n :return: A dictionary containing EMdgmMWCrxBeamData.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, partially tested.\n # TODO: Test with water column data, phaseFlag = 1 and phaseFlag = 2 to ensure this continues to function properly.\n\n dg = {}\n\n # Rev F definitions\n if dgmVersion == 0:\n format_to_unpack = \"1f4H\"\n # Rev H definitions\n elif dgmVersion == 1:\n format_to_unpack = \"1f4H1f\"\n\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['beamPointAngReVertical_deg'] = fields[0]\n dg['startRangeSampleNum'] = fields[1]\n # Two way range in samples. Approximation to calculated distance from tx to bottom detection\n # [meters] = soundVelocity_mPerSec * detectedRangeInSamples / (sampleFreq_Hz * 2).\n # The detected range is set to zero when the beam has no bottom detection.\n dg['detectedRangeInSamples'] = fields[2]\n dg['beamTxSectorNum'] = fields[3]\n # Number of sample data for current beam. Also denoted Ns.\n dg['numSampleData'] = fields[4]\n\n if dgmVersion == 1:\n # The same information as in detectedRangeInSamples with higher resolution.Two way range in samples.\n # Approximation to calculated distance from tx to bottom detection[meters] =\n # soundVelocity_mPerSec * detectedRangeInSamples / (sampleFreq_Hz * 2).\n # The detected range is set to zero when the beam has no bottom detection.\n dg['detectedRangeInSamplesHighResolution'] = fields[5]\n\n # Pointer to start of array with Water Column data. Length of array = numSampleData.\n # Sample amplitudes in 0.5 dB resolution. Size of array is numSampleData * int8_t.\n # Amplitude array is followed by phase information if phaseFlag >0.\n # Use (numSampleData * int8_t) to jump to next beam, or to start of phase info for this beam, if phase flag > 0.\n format_to_unpack = str(dg['numSampleData']) + \"b\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['sampleAmplitude05dB_p'] = fields\n\n '''\n if self.verbose > 2:\n self.print_datagram(dg)\n '''\n\n return dg\n\n def read_EMdgmMWCrxBeamPhase1(self, numSampleData):\n \"\"\"\n Read #MWC - Beam sample phase info, specific for each beam and water column sample.\n numBeams * numSampleData = (Nrx * Ns) entries. Only added to datagram if phaseFlag = 1.\n Total size of phase block is numSampleData * int8_t.\n :return: A dictionary containing EMdgmCrxBeamPhase1.\n \"\"\"\n # LMD added, untested.\n # TODO: Test with water column data, phaseFlag = 1 to complete/test this function.\n # print(\"WARNING: You are using an incomplete, untested function: read_EMdgmMWCrxBeamPhase1.\")\n\n dg = {}\n format_to_unpack = str(numSampleData) + \"b\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Rx beam phase in 180/128 degree resolution.\n dg['rxBeamPhase'] = fields\n\n return dg\n\n def read_EMdgmMWCrxBeamPhase2(self, numSampleData):\n \"\"\"\n Read #MWC - Beam sample phase info, specific for each beam and water column sample.\n numBeams * numSampleData = (Nrx * Ns) entries. Only added to datagram if phaseFlag = 2.\n Total size of phase block is numSampleData * int16_t.\n :return: A dictionary containing EMdgmCrxBeamPhase2.\n \"\"\"\n # LMD added, untested.\n # TODO: Test with water column data, phaseFlag = 2 to complete/test this function.\n # print(\"WARNING: You are using an incomplete, untested function: read_EMdgmMWCrxBeamPhase2.\")\n\n dg = {}\n format_to_unpack = str(numSampleData) + \"h\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Rx beam phase in 0.01 degree resolution.\n dg['rxBeamPhase'] = fields\n\n return dg\n\n def read_EMdgmMWC(self):\n \"\"\"\n Read #MWC - Multibeam Water Column Datagram. Entire datagram containing several sub structs.\n :return: A dictionary containing EMdgmMWC.\n \"\"\"\n # LMD added, partially tested.\n # NOTE: Tested with phaseFlag = 0.\n # TODO: Test with water column data, phaseFlag = 1 and phaseFlag = 2 to fully complete/test this function.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['partition'] = self.read_EMdgmMpartition()\n dg['cmnPart'] = self.read_EMdgmMbody()\n\n dgmVersion = dg['dgmVersion']\n dg['txInfo'] = self.read_EMdgmMWCtxInfo()\n # Read TX sector info for each sector\n txSectorData = []\n for sector in range(dg['txInfo']['numTxSectors']):\n txSectorData.append(self.read_EMdgmMWCtxSectorData())\n dg['sectorData'] = self.listofdicts2dictoflists(txSectorData)\n\n dg['rxInfo'] = self.read_EMdgmMWCrxInfo()\n\n # Pointer to beam related information. Struct defines information about data for a beam. Beam information is\n # followed by sample amplitudes in 0.5 dB resolution . Amplitude array is followed by phase information if\n # phaseFlag >0. These data defined by struct EMdgmMWCrxBeamPhase1_def (int8_t) or struct\n # EMdgmMWCrxBeamPhase2_def (int16_t) if indicated in the field phaseFlag in struct EMdgmMWCrxInfo_def.\n # Length of data block for each beam depends on the operators choice of phase information (see table):\n '''\n phaseFlag: Beam Block Size: \n 0 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)\n 1 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)\n + numSampleData * size(EMdgmMWCrxBeamPhase1_def)\n 2 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)\n + numSampleData * size(EMdgmMWCrxBeamPhase2_def)\n '''\n\n rxBeamData = []\n rxPhaseInfo = []\n for idx in range(dg['rxInfo']['numBeams']):\n rxBeamData.append(self.read_EMdgmMWCrxBeamData(dgmVersion))\n\n if dg['rxInfo']['phaseFlag'] == 0:\n pass\n\n elif dg['rxInfo']['phaseFlag'] == 1:\n # TODO: Test with water column data, phaseFlag = 1 to complete/test this function.\n rxPhaseInfo.append(self.read_EMdgmMWCrxBeamPhase1(rxBeamData[idx]['numSampleData']))\n\n elif dg['rxInfo']['phaseFlag'] == 2:\n # TODO: Test with water column data, phaseFlag = 2 to complete/test this function.\n rxPhaseInfo.append(self.read_EMdgmMWCrxBeamPhase2(rxBeamData[idx]['numSampleData']))\n\n else:\n print(\"ERROR: phaseFlag error in read_EMdgmMWC function.\")\n\n dg['beamData'] = self.listofdicts2dictoflists(rxBeamData)\n\n # TODO: Should this be handled in a different way? By this method, number of fields in dg is variable.\n if dg['rxInfo']['phaseFlag'] == 1 or dg['rxInfo']['phaseFlag'] == 2:\n dg['phaseInfo'] = self.listofdicts2dictoflists(rxPhaseInfo)\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmScommon(self):\n \"\"\"\n Read sensor (S) output datagram - common part for all external sensors.\n :return: A dictionary containing EMdgmScommon ('cmnPart').\n \"\"\"\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"4H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of current struct. Used for denoting size of rest of\n # datagram in cases where only one datablock is attached.\n dg['numBytesCmnPart'] = fields[0]\n # Sensor system number, as indicated when setting up the system in K-Controller installation menu. E.g.\n # position system 0 refers to system POSI_1 in installation datagram #IIP. Check if this sensor system is\n # active by using #IIP datagram. #SCL - clock datagram:\n '''\n Bit: Sensor system: \n 0 Time syncronisation from clock data\n 1 Time syncronisation from active position data\n 2 1 PPS is used\n '''\n dg['sensorSystem'] = fields[1]\n # Sensor status. To indicate quality of sensor data is valid or invalid. Quality may be invalid even if sensor\n # is active and the PU receives data. Bit code vary according to type of sensor.\n # Bits 0 -7 common to all sensors and #MRZ sensor status:\n '''\n Bit: Sensor data: \n 0 0 = Data OK; 1 = Data OK and sensor is chosen as active; \n #SCL only: 1 = Valid data and 1PPS OK\n 1 0\n 2 0 = Data OK; 1 = Reduced performance; \n #SCL only: 1 = Reduced performance, no time synchronisation of PU\n 3 0\n 4 0 = Data OK; 1 = Invalid data\n 5 0\n 6 0 = Velocity from sensor; 1 = Velocity calculated by PU\n 7 0\n '''\n # For #SPO (position) and CPO (position compatibility) datagrams, bit 8 - 15:\n '''\n Bit: Sensor data: \n 8 0\n 9 0 = Time from PU used (system); 1 = Time from datagram used (e.g. from GGA telegram)\n 10 0 = No motion correction; 1 = With motion correction\n 11 0 = Normal quality check; 1 = Operator quality check. Data always valid.\n 12 0\n 13 0\n 14 0\n 15 0\n '''\n dg['sensorStatus'] = fields[2]\n dg['padding'] = fields[3]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)\n\n return dg\n\n def read_EMdgmSPOdataBlock(self):\n \"\"\"\n Read #SPO - Sensor position data block. Data from active sensor is corrected data for position system\n installation parameters. Data is also corrected for motion (roll and pitch only) if enabled by K-Controller\n operator. Data given both decoded and corrected (active sensors), and raw as received from sensor in text\n string.\n :return: A dictionary containing EMdgmSPOdataBlock ('sensorData').\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"2I1f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # UTC time from position sensor. Unit seconds. Epoch 1970-01-01. Nanosec part to be added for more exact time.\n dg['timeFromSensor_sec'] = fields[0]\n # UTC time from position sensor. Unit nano seconds remainder.\n dg['timeFromSensor_nanosec'] = fields[1]\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['timeFromSensor_sec']\n + dg['timeFromSensor_nanosec'] / 1.0E9)\n # Only if available as input from sensor. Calculation according to format.\n dg['posFixQuality_m'] = fields[2]\n\n # For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*\n format_to_unpack = \"2d3f250s\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Motion corrected (if enabled in K-Controller) data as used in depth calculations. Referred to vessel\n # reference point. Unit decimal degree. Parameter is set to define UNAVAILABLE_LATITUDE if sensor inactive.\n dg['correctedLat_deg'] = fields[0]\n # Motion corrected (if enabled in K-Controller) data as used in depth calculations. Referred to vessel\n # reference point. Unit decimal degree. Parameter is set to define UNAVAILABLE_LONGITUDE if sensor inactive.\n dg['correctedLong_deg'] = fields[1]\n # Speed over ground. Unit m/s. Motion corrected (if enabled in K-Controller) data as used in depth calculations.\n # If unavailable or from inactive sensor, value set to define UNAVAILABLE_SPEED.\n dg['speedOverGround_mPerSec'] = fields[2]\n # Course over ground. Unit degree. Motion corrected (if enabled in K-Controller) data as used in depth\n # calculations. If unavailable or from inactive sensor, value set to define UNAVAILABLE_COURSE.\n dg['courseOverGround_deg'] = fields[3]\n # Height of vessel reference point above the ellipsoid. Unit meter.\n # Motion corrected (if enabled in K-Controller) data as used in depth calculations.\n # If unavailable or from inactive sensor, value set to define UNAVAILABLE_ELLIPSOIDHEIGHT.\n dg['ellipsoidHeightReRefPoint_m'] = fields[4]\n\n # TODO: This is an array of (max?) length MAX_SPO_DATALENGTH; do something else here?\n # TODO: Get MAX_SPO_DATALENGTH from datagram instead of hard-coding in format_to_unpack.\n # TODO: This works for now, but maybe there is a smarter way?\n # Position data as received from sensor, i.e. uncorrected for motion etc.\n tmp = fields[5]\n dg['posDataFromSensor'] = tmp[0:tmp.find(b'\\r\\n')]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmSPO(self):\n \"\"\"\n Read #SPO - Struct of position sensor datagram. From Data from active sensor will be motion corrected if\n indicated by operator. Motion correction is applied to latitude, longitude, speed, course and ellipsoidal\n height. If the sensor is inactive, the fields will be marked as unavailable, defined by the parameters define\n UNAVAILABLE_LATITUDE etc.\n :return: A dictionary of dictionaries, including EMdgmHeader ('header'), EMdgmScommon ('cmnPart'), and\n EMdgmSPOdataBlock ('sensorData').\n \"\"\"\n # LMD added, tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmScommon()\n dg['sensorData'] = self.read_EMdgmSPOdataBlock()\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSKMinfo(self):\n \"\"\"\n Read sensor (S) output datagram - info of KMB datagrams.\n :return: A dictionary containing EMdgmSKMinfo ('infoPart').\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"1H2B4H\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of current struct. Used for denoting size of rest of datagram\n # in cases where only one datablock is attached.\n dg['numBytesInfoPart'] = fields[0]\n # Attitude system number, as numbered in installation parameters.\n # E.g. system 0 referes to system ATTI_1 in installation datagram #IIP.\n dg['sensorSystem'] = fields[1]\n # Sensor status. Summarise the status fields of all KM binary samples added in this datagram (status in struct\n # KMbinary_def). Only available data from input sensor format is summarised. Available data found in\n # sensorDataContents. Bits 0 -7 common to all sensors and #MRZ sensor status:\n '''\n Sensor Status:\n Bit: 0 0 Data OK, 1 Data OK and Sensor is active\n Bit: 1 0\n Bit: 2 0 Data OK, 1 Data Reduced Performance\n Bit: 3 0\n Bit: 4 0 Data OK, 1 Invalid Data\n Bit: 5 0\n Bit: 6 0 Velocity from Sensor, 1 Velocity from PU\n '''\n dg['sensorStatus'] = fields[2]\n # Format of raw data from input sensor, given in numerical code according to table below.\n '''\n Code: Sensor Format: \n 1: KM Binary Sensor Format\n 2: EM 3000 data\n 3: Sagem\n 4: Seapath binary 11\n 5: Seapath binary 23\n 6: Seapath binary 26\n 7: POS/MV Group 102/103\n 8: Coda Octopus MCOM\n '''\n dg['sensorInputFormat'] = fields[3]\n # Number of KM binary sensor samples added in this datagram.\n dg['numSamplesArray'] = fields[4]\n # Length in bytes of one whole KM binary sensor sample.\n dg['numBytesPerSample'] = fields[5]\n # Field to indicate which information is available from the input sensor, at the given sensor format.\n # 0 = not available; 1 = data is available\n # The bit pattern is used to determine sensorStatus from status field in #KMB samples. Only data available from\n # sensor is check up against invalid/reduced performance in status, and summaries in sensorStatus.\n # E.g. the binary 23 format does not contain delayed heave. This is indicated by setting bit 6 in\n # sensorDataContents to 0. In each sample in #KMB output from PU, the status field (struct KMbinary_def) for\n # INVALID delayed heave (bit 6) is set to 1. The summaries sensorStatus in struct EMdgmSKMinfo_def will then\n # be sets to 0 if all available data is ok. Expected data field in sensor input:\n '''\n Indicates what data is available in the given sensor format\n Bit: Sensor Data:\n 0 Horizontal posistion and velocity\n 1 Roll and pitch\n 2 Heading\n 3 Heave and vertical velocity\n 4 Acceleration\n 5 Error fields\n 6 Delayed Heave\n '''\n dg['sensorDataContents'] = fields[6]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesInfoPart'] - struct.Struct(format_to_unpack).size, 1)\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_KMdelayedHeave(self):\n \"\"\"\n Read #SKM - delayed heave. Included if available from sensor.\n :return: A dictionary containing KMdelayedHeave.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD tested with 'empty' delayed heave fields.\n # TODO: Test with data containing delayed heave.\n\n dg = {}\n format_to_unpack = \"2I1f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['time_sec'] = fields[0]\n dg['time_nanosec'] = fields[1]\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'] + dg['time_nanosec'] / 1.0E9)\n # Delayed heave. Unit meter.\n dg['delayedHeave_m'] = fields[2]\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_KMbinary(self):\n \"\"\"\n Read #SKM - sensor attitude data block. Data given timestamped, not corrected.\n See Coordinate Systems for definition of positive angles and axis.\n :return: A dictionary containing KMbinary.\n \"\"\"\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"4B\"\n fields = self.FID.read(struct.Struct(format_to_unpack).size)\n\n # KMB\n dg['dgmType'] = fields.decode('utf-8')\n\n format_to_unpack = \"2H3I\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Datagram length in bytes. The length field at the start (4 bytes)\n # and end of the datagram (4 bytes) are included in the length count.\n dg['numBytesDgm'] = fields[0]\n # Datagram version.\n dg['dgmVersion'] = fields[1]\n # UTC time from inside KM sensor data. Unit second. Epoch 1970-01-01 time.\n # Nanosec part to be added for more exact time.\n dg['time_sec'] = fields[2]\n # Nano seconds remainder. Nanosec part to be added to time_sec for more exact time.\n # If time is unavailable from attitude sensor input, time of reception on serial port is added to this field.\n dg['time_nanosec'] = fields[3]\n dg['dgtime'] = dg['time_sec'] + dg['time_nanosec'] / 1.0E9\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['dgtime'])\n # Bit pattern for indicating validity of sensor data, and reduced performance.\n # The status word consists of 32 single bit flags numbered from 0 to 31, where 0 is the least significant bit.\n # Bit number 0-7 indicate if from a sensor data is invalid: 0 = valid data, 1 = invalid data.\n # Bit number 16-> indicate if data from sensor has reduced performance: 0 = valid data, 1 = reduced performance.\n '''\n Invalid data: | Reduced performance: \n Bit: Sensor data: | Bit: Sensor data: \n 0 Horizontal position and velocity | 16 Horizontal position and velocity\n 1 Roll and pitch | 17 Roll and pitch \n 2 Heading | 18 Heading\n 3 Heave and vertical velocity | 19 Heave and vertical velocity\n 4 Acceleration | 20 Acceleration\n 5 Error fields | 21 Error fields\n 6 Delayed heave | 22 Delayed heave\n '''\n dg['status'] = fields[4]\n\n format_to_unpack = \"2d\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # # # # # P O S I T I O N # # # # #\n # Position in decimal degrees.\n dg['latitude_deg'] = fields[0]\n # Position in decimal degrees.\n dg['longitude_deg'] = fields[1]\n\n format_to_unpack = \"21f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['ellipsoidHeight_m'] = fields[0]\n\n # # # # # A T T I T U D E # # # # #\n dg['roll_deg'] = fields[1]\n dg['pitch_deg'] = fields[2]\n dg['heading_deg'] = fields[3]\n dg['heave_m'] = fields[4]\n\n # # # # # R A T E S # # # # #\n dg['rollRate'] = fields[5]\n dg['pitchRate'] = fields[6]\n dg['yawRate'] = fields[7]\n\n # # # # # V E L O C I T I E S # # # # #\n dg['velNorth'] = fields[8]\n dg['velEast'] = fields[9]\n dg['velDown'] = fields[10]\n\n # # # # # ERRORS IN DATA. SENSOR DATA QUALITY, AS STANDARD DEVIATIONS # # # # #\n dg['latitudeError_m'] = fields[11]\n dg['longitudeError_m'] = fields[12]\n dg['ellipsoidalHeightError_m'] = fields[13]\n dg['rollError_deg'] = fields[14]\n dg['pitchError_deg'] = fields[15]\n dg['headingError_deg'] = fields[16]\n dg['heaveError_m'] = fields[17]\n\n # # # # # A C C E L E R A T I O N # # # # #\n dg['northAcceleration'] = fields[18]\n dg['eastAcceleration'] = fields[19]\n dg['downAcceleration'] = fields[20]\n\n # In testing, it appears 'numBytesDgm' = KMbinary + KMdelayedHeave.\n # We will run into errors here if we use this method to skip unknown fields.\n # Skip unknown fields\n # self.FID.seek(dg['numBytesDgm'] - struct.Struct(format_to_unpack).size, 1)\n\n if self.verbose > 2:\n self.print_datagram(dg)\n\n return dg\n\n def read_EMdgmSKMsample(self, dgInfo):\n \"\"\"\n Read #SKM - all available data. An implementation of the KM Binary sensor input format.\n :param dgInfo: A dictionary containing EMdgmSKMinfo (output of function read_EMdgmSKMinfo).\n :return: A dictionary of lists, containing EMdgmSKMsample ('sample').\n This includes keys 'KMdefault' and 'delayedHeave'.\n \"\"\"\n # LMD tested.\n # TODO: Can add code to omit delayed heave if it is not included.\n\n dg = {}\n\n km_binary_data = []\n km_heave_data = []\n\n for idx in range(dgInfo['numSamplesArray']):\n km_binary_data.append(self.read_KMbinary())\n km_heave_data.append(self.read_KMdelayedHeave())\n\n # Convert list of dictionaries to dictionary of lists.\n dg['KMdefault'] = self.listofdicts2dictoflists(km_binary_data)\n dg['delayedHeave'] = self.listofdicts2dictoflists(km_heave_data)\n\n return dg\n\n def read_EMdgmSKM(self):\n \"\"\"\n Read #SKM - data from attitude and attitude velocity sensors. Datagram may contain several sensor measurements.\n The number of samples in datagram is listed in numSamplesArray in the struct EMdgmSKMinfo_def. Time given in\n datagram header, is time of arrival of data on serial line or on network. Time inside #KMB sample is time from\n the sensors data. If input is other than KM binary sensor input format, the data are converted to the KM binary\n format by the PU. All parameters are uncorrected. For processing of data, installation offsets, installation\n angles and attitude values are needed to correct the data for motion.\n :return: A dictionary containing EMdgmSKM.\n \"\"\"\n # LMD tested.\n\n start = self.FID.tell()\n\n # LMD implementation:\n dg = {}\n\n dg['header'] = self.read_EMdgmHeader()\n dg['infoPart'] = self.read_EMdgmSKMinfo()\n dg['sample'] = self.read_EMdgmSKMsample(dg['infoPart'])\n\n # VES implementation:\n '''\n dgH = self.read_EMdgmHeader()\n dgInfo = self.read_EMdgmSKMinfo()\n dgSamples = self.read_EMdgmSKMsample(dgInfo)\n dg = {**dgH, **dgInfo, **dgSamples}\n '''\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSVPpoint(self):\n \"\"\"\n Read #SVP - Sound Velocity Profile. Data from one depth point contains information specified in this struct.\n :return: A dictionary containing EMdgmSVPpoint.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"2f1I2f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Depth at which measurement is taken. Unit m. Valid range from 0.00 m to 12000 m.\n dg['depth_m'] = fields[0]\n # Measured sound velocity from profile. Unit m/s. For a CTD profile, this will be the calculated sound velocity.\n dg['soundVelocity_mPerSec'] = fields[1]\n # Former absorption coefficient. Voided.\n dg['padding'] = fields[2]\n # Water temperature at given depth. Unit Celsius. For a Sound velocity profile (S00), this will be set to 0.00.\n dg['temp_C'] = fields[3]\n # Salinity of water at given depth. For a Sound velocity profile (S00), this will be set to 0.00.\n dg['salinity'] = fields[4]\n\n return dg\n\n def read_EMdgmSVP(self):\n \"\"\"\n Read #SVP - Sound Velocity Profile. Data from sound velocity profile or from CTD profile.\n Sound velocity is measured directly or estimated, respectively.\n :return: A dictionary containing EMdgmSVP.\n \"\"\"\n # LMD added, tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n\n format_to_unpack = \"2H4s1I\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of body part struct. Used for denoting size of rest of datagram.\n dg['numBytesCmnPart'] = fields[0]\n # Number of sound velocity samples.\n dg['numSamples'] = fields[1]\n # Sound velocity profile format:\n '''\n 'S00' = sound velocity profile\n 'S01' = CTD profile\n '''\n dg['sensorFormat'] = fields[2]\n # Time extracted from the Sound Velocity Profile. Parameter is set to zero if not found.\n dg['time_sec'] = fields[3]\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'])\n\n format_to_unpack = \"2d\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Latitude in degrees. Negative if southern hemisphere. Position extracted from the Sound Velocity Profile.\n # Parameter is set to define UNAVAILABLE_LATITUDE if not available.\n dg['latitude_deg'] = fields[0]\n # Longitude in degrees. Negative if western hemisphere. Position extracted from the Sound Velocity Profile.\n # Parameter is set to define UNAVAILABLE_LONGITUDE if not available.\n dg['longitude_deg'] = fields[1]\n\n # SVP point samples, repeated numSamples times.\n sensorData = []\n for record in range(dg['numSamples']):\n sensorData.append(self.read_EMdgmSVPpoint())\n dg['sensorData'] = self.listofdicts2dictoflists(sensorData)\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSVTinfo(self):\n \"\"\"\n Read part of Sound Velocity at Transducer datagram.\n :return: A dictionary containing EMdgmSVTinfo.\n \"\"\"\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"6H2f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Size in bytes of current struct. Used for denoting size of rest of datagram in cases where only one\n # datablock is attached.\n dg['numBytesInfoPart'] = fields[0]\n # Sensor status. To indicate quality of sensor data is valid or invalid. Quality may be invalid even if sensor\n # is active and the PU receives data. Bit code vary according to type of sensor.\n # Bits 0-7 common to all sensors and #MRZ sensor status:\n '''\n Bit: Sensor data: \n 0 0 Data OK; 1 Data OK and sensor chosen is active\n 1 0\n 2 0 Data OK; 1 Reduced Performance\n 3 0\n 4 0 Data OK; 1 Invalid Data\n 5 0\n 6 0 \n '''\n dg['sensorStatus'] = fields[1]\n # Format of raw data from input sensor, given in numerical code according to table below.\n '''\n Code: Sensor format: \n 1 AML NMEA\n 2 AML SV\n 3 AML SVT\n 4 AML SVP\n 5 Micro SV\n 6 Micro SVT\n 7 Micro SVP\n 8 Valeport MiniSVS\n 9 KSSIS 80\n 10 KSSIS 43\n '''\n dg['sensorInputFormat'] = fields[2]\n # Number of sensor samples added in this datagram.\n dg['numSamplesArray'] = fields[3]\n # Length in bytes of one whole SVT sensor sample.\n dg['numBytesPerSample'] = fields[4]\n # Field to indicate which information is available from the input sensor, at the given sensor format.\n # 0 = not available; 1 = data is available\n # Expected data field in sensor input:\n '''\n Bit: Sensor data: \n 0 Sound Velocity\n 1 Temperature\n 2 Pressure\n 3 Salinity\n '''\n dg['sensorDataContents'] = fields[5]\n # Time parameter for moving median filter. Unit seconds.\n dg['filterTime_sec'] = fields[6]\n # Offset for measured sound velocity set in K-Controller. Unit m/s.\n dg['soundVelocity_mPerSec_offset'] = fields[7]\n\n # Skip unknown fields.\n self.FID.seek(dg['numBytesInfoPart'] - struct.Struct(format_to_unpack).size, 1)\n\n return dg\n\n def read_EMdgmSVTsample(self):\n \"\"\"\n Read #SVT - Sound Velocity at Transducer. Data sample.\n :return: A dictionary containing EMdgmSVTsample.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"2I4f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Time in second. Epoch 1970-01-01. time_nanosec part to be added for more exact time.\n dg['time_sec'] = fields[0]\n # Nano seconds remainder. time_nanosec part to be added to time_sec for more exact time.\n dg['time_nanosec'] = fields[1]\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'] + dg['time_nanosec'] / 1.0E9)\n # Measured sound velocity from sound velocity probe. Unit m/s.\n dg['soundVelocity_mPerSec'] = fields[2]\n # Water temperature from sound velocity probe. Unit Celsius.\n dg['temp_C'] = fields[3]\n # Pressure. Unit Pascal.\n dg['pressure_Pa'] = fields[4]\n # Salinity of water. Measured in g salt/kg sea water.\n dg['salinity'] = fields[5]\n\n return dg\n\n def read_EMdgmSVT(self):\n \"\"\"\n Read #SVT - Sound Velocity at Transducer. Data for sound velocity and temperature are measured directly\n on the sound velocity probe.\n :return: A dictionary containing EMdgmSVT.\n \"\"\"\n # LMD added, tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['infoPart'] = self.read_EMdgmSVTinfo()\n\n sensorData = []\n for record in range(dg['infoPart']['numSamplesArray']):\n sensorData.append(self.read_EMdgmSVTsample())\n dg['sensorData'] = self.listofdicts2dictoflists(sensorData)\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSCLdataFromSensor(self):\n \"\"\"\n Read part of clock datagram giving offsets and the raw input in text format.\n :return: A dictionary containing EMdgmSCLdataFromSensor.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"1f1i64s\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # Offset in seconds from K-Controller operator input.\n dg['offset_sec'] = fields[0]\n # Clock deviation from PU. Difference between time stamp at receive of sensor data and time in the clock\n # source. Unit nanoseconds. Difference smaller than +/- 1 second if 1PPS is active and sync from ZDA.\n dg['clockDevPU_nanosec'] = fields[1]\n\n # TODO: This is an array of (max?) length MAX_SCL_DATALENGTH; do something else here?\n # TODO: Get MAX_SCL_DATALENGTH from datagram instead of hard-coding in format_to_unpack.\n # TODO: This works for now, but maybe there is a smarter way?\n # Position data as received from sensor, i.e. uncorrected for motion etc.\n tmp = fields[2]\n dg['dataFromSensor'] = tmp[0:tmp.find(b'\\x00\\x00L')]\n\n return dg\n\n def read_EMdgmSCL(self):\n \"\"\"\n Read #SCL - Clock datagram.\n :return: A dictionary containing EMdgmSCL.\n \"\"\"\n # LMD tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmScommon()\n dg['sensData'] = self.read_EMdgmSCLdataFromSensor()\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSDEdataFromSensor(self):\n \"\"\"\n # WARNING: INCOMPLETE\n Read part of depth datagram giving depth as used, offsets,\n scale factor and data as received from sensor (uncorrected).\n :return: A dictionary containing EMdgmSDEdataFromSensor\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, untested.\n # TODO: Test with depth data to complete this function!\n print(\"WARNING: You are using an incomplete, untested function: read_EMdgmSDEdataFromSensor.\")\n\n dg = {}\n format_to_unpack = \"3f2d32s\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['depthUsed_m'] = fields[0]\n dg['offset'] = fields[1]\n dg['scale'] = fields[2]\n dg['latitude_deg'] = fields[3]\n dg['longitude_deg'] = fields[4]\n\n # TODO: This is an array of (max?) length MAX_SDE_DATALENGTH; do something else here?\n # TODO: Get MAX_SDE_DATALENGTH from datagram instead of hard-coding in format_to_unpack.\n # TODO: Test with depth data to complete this function!\n tmp = fields[5]\n # dg['dataFromSensor'] = ...\n\n return dg\n\n def read_EMdgmSDE(self):\n \"\"\"\n Read #SDE - Depth datagram.\n :return: A dictionary containing EMdgmSDE.\n \"\"\"\n # LMD added, untested.\n # TODO: Test with depth data!\n print(\"WARNING: You are using an incomplete, untested function: read_EMdgmSDE.\")\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmScommon()\n dg['sensorData'] = self.read_EMdgmSDEdataFromSensor()\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmSHIdataFromSensor(self):\n \"\"\"\n # WARNING: INCOMPLETE\n Read part of Height datagram, giving corrected and uncorrected data as received from sensor.\n :return: A dictionary containing EMdgmSHIdataFromSensor.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, untested.\n # TODO: Test with height data to complete this function!\n print(\"WARNING: You are using an incomplete, untested function: read_EMdgmSHIdataFromSensor.\")\n\n dg = {}\n format_to_unpack = \"1H1f32s\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['sensorType'] = fields[0]\n dg['heightUsed_m'] = fields[1]\n\n # TODO: This is an array of (max?) length MAX_SHI_DATALENGTH; do something else here?\n # TODO: Get MAX_SHI_DATALENGTH from datagram instead of hard-coding in format_to_unpack.\n # TODO: Test with height data to complete this function!\n tmp = fields[2]\n # dg['dataFromSensor'] = ...\n\n print(\"DG: \", dg)\n return dg\n\n def read_EMdgmSHI(self):\n \"\"\"\n Read #SHI - Height datagram.\n :return: A dictionary containing EMdgmSHI.\n \"\"\"\n # LMD added, untested.\n # TODO: Test with height data!\n print(\"WARNING: You are using an incomplete, untested function: read_EMdgmSHI.\")\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmScommon()\n dg['sensData'] = self.read_EMdgmSHIdataFromSensor()\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmCPOdataBlock(self, length):\n \"\"\"\n Read #CPO - Compatibility sensor position compatibility data block. Data from active sensor is referenced to\n position at antenna footprint at water level. Data is corrected for motion ( roll and pitch only) if enabled\n by K-Controller operator. Data given both decoded and corrected (active sensors), and raw as received from\n sensor in text string.\n :return: A dictionary containing EMdgmCPOdataBlock.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD tested.\n\n dg = {}\n format_to_unpack = \"2I1f2d3f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['timeFromSensor_sec'] = fields[0]\n dg['timeFromSensor_nanosec'] = fields[1]\n dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['timeFromSensor_sec']\n + dg['timeFromSensor_nanosec'] / 1.0E9)\n dg['posFixQuality'] = fields[2]\n dg['correctedLat_deg'] = fields[3]\n dg['correctedLong_deg'] = fields[4]\n dg['speedOverGround_mPerSec'] = fields[5]\n dg['courseOverGround_deg'] = fields[6]\n dg['ellipsoidHeightReRefPoint_m'] = fields[7]\n\n # For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*\n pos_data_len = length - struct.Struct(format_to_unpack).size\n format_to_unpack = \"%ds\" % pos_data_len\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n # TODO: This is an array of(max?) length MAX_CPO_DATALENGTH; do something else here?\n # TODO: Get MAX_CPO_DATALENGTH from datagram instead of hard-coding in format_to_unpack.\n # TODO: This works for now, but maybe there is a smarter way?\n dg['posDataFromSensor'] = fields[0]\n\n return dg\n\n def read_EMdgmCPO(self):\n \"\"\"\n Read #CPO - Struct of compatibility position sensor datagram. Data from active sensor will be motion corrected\n if indicated by operator. Motion correction is applied to latitude, longitude, speed, course and ellipsoidal\n height. If the sensor is inactive, the fields will be marked as unavailable, defined by the parameters\n define UNAVAILABLE_LATITUDE etc.\n :return: A dictionary containing EMdgmCPO.\n \"\"\"\n # LMD tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmScommon()\n\n ## Data block length is balance of datagram minus 4 for the confirmation packet length at end\n data_block_len = dg['header']['numBytesDgm'] - 4 -(self.FID.tell()-start)\n dg['sensorData'] = self.read_EMdgmCPOdataBlock( data_block_len )\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmCHEdata(self):\n \"\"\"\n Read #CHE - Heave compatibility data part. Heave reference point is at transducer instead of at vessel\n reference point.\n :return: A dictionary containing EMdgmCHEdata.\n \"\"\"\n # NOTE: There's no fields for the number of bytes in this record. Odd.\n # LMD added, tested.\n\n dg = {}\n format_to_unpack = \"1f\"\n fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))\n\n dg['heave_m'] = fields[0]\n\n return dg\n\n def read_EMdgmCHE(self):\n \"\"\"\n Read #CHE - Struct of compatibility heave sensor datagram. Used for backward compatibility with .all datagram\n format. Sent before #MWC (water column datagram) datagram if compatibility mode is enabled. The multibeam\n datagram body is common with the #MWC datagram.\n :return: A dictionary containing EMdgmCHE.\n \"\"\"\n # LMD added, tested.\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['cmnPart'] = self.read_EMdgmMbody()\n dg['data'] = self.read_EMdgmCHEdata()\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n ###########################################################\n # Writing datagrams\n ###########################################################\n def write_EMdgmMRZ(self, dg):\n ''' A method to write an MRZ datagram back to disk.'''\n\n # Force the header type to be MRZ, just in case\n # the datagram is converted from another type and\n # the old type is still set.\n dg['header']['dgmType'] = b'#MRZ'\n\n self.write_EMdgmHeader(dg['header'])\n self.write_EMdgmMpartition(dg['partition'])\n self.write_EMdgmMbody(dg['cmnPart'])\n self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])\n\n for sector in range(dg['pingInfo']['numTxSectors']):\n self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)\n\n self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])\n\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n self.write_EMdgmMRZ_extraDetClassInfo(self.FID, dg['extraDetClassInfo'], detclass)\n\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n self.write_EMdgmMRZ_sounding(dg['sounding'], record)\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n\n if Nseabedimage_samples > 0:\n if 'SIsample_desidB' not in dg:\n print(\n \"Warning, no Imagery data to write, although the field SInumSamples in the sounding datagram is non-zero.\")\n print(\"This will produce an unreadable file.\")\n # FIX: Should throw an error here.\n else:\n self.write_EMdgmMRZ_seabedImagery(dg, Nseabedimage_samples)\n\n self.FID.write(struct.pack(\"I\", dg['header']['numBytesDgm']))\n\n def write_EMdgmMRZ_woImagery(self, dg):\n ''' A method to write an MRZ datagram back to disk, but omitting the imagery data.'''\n\n # First we need to see how much space the imagery data will take.\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n imageryBytes = Nseabedimage_samples * 2\n\n # Now we need to reset the total packet size.\n dg['header']['numBytesDgm'] -= imageryBytes\n\n # Now write the packet, just leave out the imagery\n # data and set Nsamples to 0.\n self.write_EMdgmHeader(dg['header'])\n self.write_EMdgmMpartition(dg['partition'])\n self.write_EMdgmMbody(dg['cmnPart'])\n self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])\n\n for sector in range(dg['pingInfo']['numTxSectors']):\n self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)\n\n self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])\n\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)\n\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n # Zero out the number of imagery samples for each sounding.\n dg['sounding']['SInumSamples'][record] = 0\n self.write_EMdgmMRZ_sounding(dg['sounding'], record)\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n\n # Don't write the imagery data.\n # write_EMdgmMRZ_seabedImagery(FID, dg, Nseabedimage_samples)\n\n self.FID.write(struct.pack(\"I\", dg['header']['numBytesDgm']))\n\n def write_EMdgmHeader(self, dg):\n ''' Method to write the datagram header.\n\n write_EMdgmHeader(FID, dg['header'])\n\n '''\n\n format_to_pack = \"<1I4s2B1H2I\"\n\n dg_seconds = int(dg['dgtime'])\n dg_nanoseconds = int((dg['dgtime'] - dg_seconds) * 1e9)\n\n self.FID.write(struct.pack(format_to_pack,\n dg['numBytesDgm'],\n dg['dgmType'],\n dg['dgmVersion'],\n dg['systemID'],\n dg['echoSounderID'],\n dg_seconds,\n dg_nanoseconds))\n\n def write_EMdgmMpartition(self, dg):\n ''' A method to write the Partition Information\n\n write_EMdgmMpartition(FID, dg['partition'])\n\n '''\n\n format_to_pack = \"<2H\"\n self.FID.write(struct.pack(format_to_pack,\n dg['numOfDgms'],\n dg['dgmNum']))\n\n def write_EMdgmMbody(self, dg):\n ''' A method to write the datagram body information\n\n write_EMdgmMbody(FID, dg['cmnPart'])\n\n '''\n\n format_to_pack = \"<2H8B\"\n self.FID.write(struct.pack(format_to_pack,\n dg['numBytesCmnPart'],\n dg['pingCnt'],\n dg['rxFansPerPing'],\n dg['rxFanIndex'],\n dg['swathsPerPing'],\n dg['swathAlongPosition'],\n dg['txTransducerInd'],\n dg['rxTransducerInd'],\n dg['numRxTransducers'],\n dg['algorithmType']))\n\n def write_EMdgmMRZ_pingInfo(self, dg):\n '''A method to write MRZ ping info.\n\n write_EMdgmMRZ_pingInfo(FID, dg['pinginfo'])\n\n '''\n\n format_to_pack_a = \"<2H1f6B1H11f2h2B1H1I3f2H1f2H6f4B\"\n self.FID.write(struct.pack(format_to_pack_a,\n dg['numBytesInfoData'],\n dg['padding0'],\n dg['pingRate_Hz'],\n dg['beamSpacing'],\n dg['depthMode'],\n dg['subDepthMode'],\n dg['distanceBtwSwath'],\n dg['detectionMode'],\n dg['pulseForm'],\n dg['padding1'],\n dg['frequencyMode_Hz'],\n dg['freqRangeLowLim_Hz'],\n dg['freqRangeHighLim_Hz'],\n dg['maxTotalTxPulseLength_sec'],\n dg['maxEffTxPulseLength_sec'],\n dg['maxEffTxBandWidth_Hz'],\n dg['absCoeff_dBPerkm'],\n dg['portSectorEdge_deg'],\n dg['starbSectorEdge_deg'],\n dg['portMeanCov_deg'],\n dg['stbdMeanCov_deg'],\n dg['portMeanCov_m'],\n dg['starbMeanCov_m'],\n dg['modeAndStabilisation'],\n dg['runtimeFilter1'],\n dg['runtimeFilter2'],\n dg['pipeTrackingStatus'],\n dg['transmitArraySizeUsed_deg'],\n dg['receiveArraySizeUsed_deg'],\n dg['transmitPower_dB'],\n dg['SLrampUpTimeRemaining'],\n dg['padding2'],\n dg['yawAngle_deg'],\n dg['numTxSectors'],\n dg['numBytesPerTxSector'],\n dg['headingVessel_deg'],\n dg['soundSpeedAtTxDepth_mPerSec'],\n dg['txTransducerDepth_m'],\n dg['z_waterLevelReRefPoint_m'],\n dg['x_kmallToall_m'],\n dg['y_kmallToall_m'],\n dg['latLongInfo'],\n dg['posSensorStatus'],\n dg['attitudeSensorStatus'],\n dg['padding3']))\n\n # For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*\n format_to_pack_b = \"<2d1f\"\n self.FID.write(struct.pack(format_to_pack_b,\n dg['latitude_deg'],\n dg['longitude_deg'],\n dg['ellipsoidHeightReRefPoint_m']))\n\n def write_EMdgmMRZ_txSectorInfo(self, dg, sector):\n ''' Write MRZ txSectorInfo for single index \"sector\".\n\n write_EMdgmMRZ_txSectorInfo(FID, dg['txSectorInfo'], sector)\n\n '''\n\n format_to_pack = \"4B7f2B1H\"\n self.FID.write(struct.pack(format_to_pack,\n dg['txSectorNumb'][sector],\n dg['txArrNumber'][sector],\n dg['txSubArray'][sector],\n dg['padding0'][sector],\n dg['sectorTransmitDelay_sec'][sector],\n dg['tiltAngleReTx_deg'][sector],\n dg['txNominalSourceLevel_dB'][sector],\n dg['txFocusRange_m'][sector],\n dg['centreFreq_Hz'][sector],\n dg['signalBandWidth_Hz'][sector],\n dg['totalSignalLength_sec'][sector],\n dg['pulseShading'][sector],\n dg['signalWaveForm'][sector],\n dg['padding1'][sector]))\n\n def write_EMdgmMRZ_rxInfo(self, dg):\n ''' Write MRZ rxInfo datagram.\n\n write_EMdgmMRZ_rxInfo(FID, dg['rxInfo'])\n\n '''\n\n format_to_pack = \"4H4f4H\"\n self.FID.write(struct.pack(format_to_pack,\n dg['numBytesRxInfo'],\n dg['numSoundingsMaxMain'],\n dg['numSoundingsValidMain'],\n dg['numBytesPerSounding'],\n dg['WCSampleRate'],\n dg['seabedImageSampleRate'],\n dg['BSnormal_dB'],\n dg['BSoblique_dB'],\n dg['extraDetectionAlarmFlag'],\n dg['numExtraDetections'],\n dg['numExtraDetectionClasses'],\n dg['numBytesPerClass']))\n\n def write_EMdgmMRZ_extraDetClassInfo(self, dg, detclass):\n ''' Write the MRZ sounding extra Detection Class information.\n\n write_EMdgmMRZ_extraDetClassInfo(FID,dg['extraDetClassInfo'],detclass)\n\n '''\n\n format_to_pack = \"1H1b1B\"\n self.FID.write(struct.pack(format_to_pack,\n dg['numExtraDetInClass'][detclass],\n dg['padding'][detclass],\n dg['alarmFlag'][detclass]))\n\n def write_EMdgmMRZ_sounding(self, dg, record):\n ''' Write MRZ soundings records.\n\n write_EMdgmMRZ_sounding(FID, dg['sounding'], record)\n\n '''\n\n format_to_pack = \"1H8B1H6f2H18f4H\"\n\n self.FID.write(struct.pack(format_to_pack,\n dg['soundingIndex'][record],\n dg['txSectorNumb'][record],\n dg['detectionType'][record],\n dg['detectionMethod'][record],\n dg['rejectionInfo1'][record],\n dg['rejectionInfo2'][record],\n dg['postProcessingInfo'][record],\n dg['detectionClass'][record],\n dg['detectionConfidenceLevel'][record],\n dg['padding'][record],\n dg['rangeFactor'][record],\n dg['qualityFactor'][record],\n dg['detectionUncertaintyVer_m'][record],\n dg['detectionUncertaintyHor_m'][record],\n dg['detectionWindowLength_sec'][record],\n dg['echoLength_sec'][record],\n dg['WCBeamNumb'][record],\n dg['WCrange_samples'][record],\n dg['WCNomBeamAngleAcross_deg'][record],\n dg['meanAbsCoeff_dbPerkm'][record],\n dg['reflectivity1_dB'][record],\n dg['reflectivity2_dB'][record],\n dg['receiverSensitivityApplied_dB'][record],\n dg['sourceLevelApplied_dB'][record],\n dg['BScalibration_dB'][record],\n dg['TVG_dB'][record],\n dg['beamAngleReRx_deg'][record],\n dg['beamAngleCorrection_deg'][record],\n dg['twoWayTravelTime_sec'][record],\n dg['twoWayTravelTimeCorrection_sec'][record],\n dg['deltaLatitude_deg'][record],\n dg['deltaLongitude_deg'][record],\n dg['z_reRefPoint_m'][record],\n dg['y_reRefPoint_m'][record],\n dg['x_reRefPoint_m'][record],\n dg['beamIncAngleAdj_deg'][record],\n dg['realTimeCleanInfo'][record],\n dg['SIstartRange_samples'][record],\n dg['SIcentreSample'][record],\n dg['SInumSamples'][record]))\n\n def write_EMdgmMRZ_seabedImagery(self, dg, Nseabedimage_samples):\n ''' Write the MRZ seabedImagery datagram\n\n write_EMdgmMRZ_seabedImagery(FID, dg['SIsample_desidB'])\n\n '''\n format_to_pack = str(Nseabedimage_samples) + \"h\"\n\n self.FID.write(struct.pack(format_to_pack,\n *dg['SIsample_desidB']))\n\n ###############################################################\n # Routines for writing and reading custom compressed packets\n ###############################################################\n\n def compressSoundings(self, dg):\n ''' A method to compress the soundings table by column rather than by row.'''\n record = len(dg['soundingIndex'])\n format_to_pack = \"1H8B1H6f2H18f4H\"\n\n buffer = struct.pack(str(record) + \"H\", *dg['soundingIndex'])\n\n buffer += struct.pack(str(record) + \"B\", *dg['txSectorNumb'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionType'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionMethod'])\n buffer += struct.pack(str(record) + \"B\", *dg['rejectionInfo1'])\n buffer += struct.pack(str(record) + \"B\", *dg['rejectionInfo2'])\n buffer += struct.pack(str(record) + \"B\", *dg['postProcessingInfo'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionClass'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionConfidenceLevel'])\n\n buffer += struct.pack(str(record) + \"H\", *dg['padding'])\n\n buffer += struct.pack(str(record) + \"f\", *dg['rangeFactor'])\n buffer += struct.pack(str(record) + \"f\", *dg['qualityFactor'])\n buffer += struct.pack(str(record) + \"f\", *dg['detectionUncertaintyVer_m'])\n buffer += struct.pack(str(record) + \"f\", *dg['detectionUncertaintyHor_m'])\n buffer += struct.pack(str(record) + \"f\", *dg['detectionWindowLength_sec'])\n buffer += struct.pack(str(record) + \"f\", *dg['echoLength_sec'])\n\n buffer += struct.pack(str(record) + \"H\", *dg['WCBeamNumb'])\n buffer += struct.pack(str(record) + \"H\", *dg['WCrange_samples'])\n\n buffer += struct.pack(str(record) + \"f\", *dg['WCNomBeamAngleAcross_deg'])\n buffer += struct.pack(str(record) + \"f\", *dg['meanAbsCoeff_dbPerkm'])\n buffer += struct.pack(str(record) + \"f\", *dg['reflectivity1_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['reflectivity2_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['receiverSensitivityApplied_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['sourceLevelApplied_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['BScalibration_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['TVG_dB'])\n buffer += struct.pack(str(record) + \"f\", *dg['beamAngleReRx_deg'])\n buffer += struct.pack(str(record) + \"f\", *dg['beamAngleCorrection_deg'])\n buffer += struct.pack(str(record) + \"f\", *dg['twoWayTravelTime_sec'])\n buffer += struct.pack(str(record) + \"f\", *dg['twoWayTravelTimeCorrection_sec'])\n buffer += struct.pack(str(record) + \"f\", *dg['deltaLatitude_deg'])\n buffer += struct.pack(str(record) + \"f\", *dg['deltaLongitude_deg'])\n buffer += struct.pack(str(record) + \"f\", *dg['z_reRefPoint_m'])\n buffer += struct.pack(str(record) + \"f\", *dg['y_reRefPoint_m'])\n buffer += struct.pack(str(record) + \"f\", *dg['x_reRefPoint_m'])\n buffer += struct.pack(str(record) + \"f\", *dg['beamIncAngleAdj_deg'])\n\n buffer += struct.pack(str(record) + \"H\", *dg['realTimeCleanInfo'])\n buffer += struct.pack(str(record) + \"H\", *dg['SIstartRange_samples'])\n buffer += struct.pack(str(record) + \"H\", *dg['SIcentreSample'])\n buffer += struct.pack(str(record) + \"H\", *dg['SInumSamples'])\n\n return bz2.compress(buffer)\n\n def encodeArrayIntoUintX(self, A, res):\n ''' Differential encoding of an array of values into a byte array\n A: An array of values\n res: Desired resolution. This determines whether the encoding is\n in an 8-bit or 16-bit array. Details provided below.\n returns: bytes buffer containing packed values and metadata to unpack it.\n The data is differentially encoded, meaning that the difference\n in sequential values is calculated, then the minimum differential value\n is subtracted off the array before scaling each value by max_bits / (max-min).\n max_bits is 255 for uint8 encoding and 65535 for uint16 encoding. To\n determine the encoding, (max-min) / max_bits is compared to the desired\n resolution to ensure the minimum increment falls below it. uint8 is checked\n first, if it fails, uint16 is checked. If it also fails, uint32 is\n used and no actual compression is achieved.\n A buffer is created from the result containing everything needed to\n decipher it. Specifically:\n The first value of the original array as a 4-byte float\n Min difference values as 4-byte float.\n Max difference value as a 4-byte float.\n The number of bits used in the encoding (8 or 16) as a uint8.\n The number of difference values (len(A)-1) as an 4-byte unsigned int\n The array of scaled difference values cast to unsigned \"max_bits\" integers\n '''\n if isinstance(A, list):\n A = np.array(A)\n\n # There are two strategies taken here. Sometimes the\n # data varies smoothly but over a large range, and it\n # is more efficient to encode the data's sequential\n # differences, since they are small in amplitude.\n # But sometimes the data is very stochastic and the\n # first range of differences are large relative to\n # the maximum and minimum values in the data. For\n # example consider the sequence [0 2 0]. The range\n # of the values is 2, but the range of the first\n # differences is 4 (+2 - -2). In this case, it is\n # more efficient to encode the values themselves.\n\n valuesToEncode = np.diff(A.flatten())\n\n maxv = np.max(valuesToEncode)\n minv = np.min(valuesToEncode)\n\n maxA = np.max(A)\n minA = np.min(A)\n\n # print(\"maxvaluesToEncode:%f, minvaluesToEncode:%f\" % (maxv,minv))\n # print(\"maxA:%f, minA:%f\" % (maxA,minA))\n\n differentialEncode = True\n if (maxA - minA) < (maxv - minv):\n differentialEncode = False\n maxv = maxA\n minv = minA\n valuesToEncode = A[1:]\n\n # print(\"Encoding: %s\" % differentialEncode)\n\n if ((maxv - minv) / 255.0) < res:\n bits = 8\n elif ((maxv - minv) / 65535.0) < res:\n bits = 16\n else:\n bits = 32\n\n # print(\"CANNOT Maintain Resolution - Loss of Data!\")\n # print(\"max diff: %f, min diff: %f, res: %f\" % (maxv, minv, res))\n # bits = 16\n # return None\n # print(bits)\n if maxv == minv:\n # Value is constant.\n scaleFactor = 1.0\n else:\n if bits == 8:\n scaleFactor = 255.0 / (maxv - minv)\n elif bits == 16:\n scaleFactor = 65535.0 / (maxv - minv)\n else:\n scaleFactor = 4294967295.0 / (maxv - minv)\n\n tmp = (((valuesToEncode - minv) * scaleFactor)).astype(int)\n\n # This bullshit gets around an apparant bug in the struct module.\n if isinstance(A[0], np.ndarray):\n tmp2 = A[0].tolist()\n else:\n tmp2 = A[0]\n\n if isinstance(tmp2, np.int64) or isinstance(tmp2, np.float64):\n buffer = struct.pack('f', tmp2)\n else:\n buffer = struct.pack('f', tmp2[0])\n # buffer = struct.pack('f',float(A[0][0]))\n\n N = len(tmp)\n buffer += struct.pack('f', minv)\n buffer += struct.pack('f', maxv)\n # Set a marker by recording the number of points\n # to encode as a negative number to indicate that\n # the fields have been differentially encoded.\n if differentialEncode:\n buffer += struct.pack('i', -N)\n else:\n buffer += struct.pack('i', N)\n buffer += struct.pack('B', bits)\n\n if bits == 8:\n buffer += struct.pack(str(N) + 'B', *tmp)\n if bits == 16:\n buffer += struct.pack(str(N) + 'H', *tmp)\n if bits == 32:\n buffer += struct.pack(str(N) + 'I', *tmp)\n\n return buffer\n\n def decodeUintXintoArray(self, buffer):\n ''' Decodes differential-encoded data from X-bit unsigned integers into a float array.\n See encodeArrayIntoUintX().\n\n '''\n\n fields = struct.unpack('fffiB', buffer[0:17])\n A0 = fields[0]\n minv = fields[1]\n maxv = fields[2]\n N = fields[3]\n differentialDecode = False\n if N < 0:\n differentialDecode = True\n N = -N\n\n bits = fields[4]\n\n if bits == 8:\n dA = struct.unpack(str(N) + 'B', buffer[17:(17 + N)])\n bytesDecoded = 17 + N\n elif bits == 16:\n dA = struct.unpack(str(N) + 'H', buffer[17:(17 + N * 2)])\n bytesDecoded = 17 + (N * 2)\n elif bits == 32:\n dA = struct.unpack(str(N) + 'I', buffer[17:(17 + N * 4)])\n bytesDecoded = 17 + (N * 4)\n\n if differentialDecode:\n if bits == 8:\n orig = np.cumsum(\n [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv)).tolist()\n elif bits == 16:\n orig = np.cumsum(\n [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv)).tolist()\n else:\n orig = np.cumsum(\n [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv)).tolist()\n else:\n if bits == 8:\n orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv)\n elif bits == 16:\n orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv)\n else:\n orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv)\n\n # print(A0)\n # print(minv)\n # print(maxv)\n # print(N)\n # print(bits)\n\n return (orig, bytesDecoded)\n\n def encodeAndCompressSoundings(self, dg):\n ''' A method to differential-encode and compress the soundings table.\n\n Float values are encoded in this way\n See encodeArrayIntoUintX() for details on how.\n Some attempt is made to minimize the impact of\n non-float fields in the original datagram too.\n\n A note about the \"res\" or resolution argument to\n encodeArrayIntoUintX(): This field attempts to be\n the maximum error one can expect between the original\n value and the final decoded value after encoding.\n But because it is the first difference in values that\n are actually encoded, errors accumulate in the\n decoding process as the decoded differences are cumulateively\n summed and the errors that result can be larger than the\n \"res\" value. Some experimentation is required to ensure\n sufficient bits are used to reduce the desired error.\n '''\n\n record = len(dg['soundingIndex'])\n\n buffer = struct.pack(str(record) + \"H\", *dg['soundingIndex'])\n\n ## The following optimization has almost no effect\n ## because of the compressoin applied to the\n ## sounding buffer:\n\n # Valid values for txSectorNumber are 0-7 (probably)\n # Valid values for detectionType are 0-2\n # Valid values for detectionMethod are 0-15.\n\n # But detectionMethod > 2 have been reserved for\n # future use as long as any one can remember. Under\n # the assumption that Kongsberg won't record more\n # than 9 detection methods or have more than 9\n # transmit sectors, these values can be packed\n # into a single 8-bit value.\n\n tmp = (np.array(dg['detectionType']) * 100. +\n np.array(dg['detectionMethod']) * 10. +\n np.array(dg['txSectorNumb'])).astype(int)\n buffer += struct.pack(str(record) + \"B\", *tmp)\n # I don't think there's any way to tell with no ambiguity\n # when decoding if they were packed or not. For example,\n # if there were just one tx sector, and only normal type\n # detections of using amplitude method, the values would\n # all be 1, which is a valid tx sector value. So I'll leave\n # these commented out.\n # else:\n # buffer += struct.pack(str(record)+\"B\", *dg['txSectorNumb'])\n # buffer += struct.pack(str(record)+\"B\", *dg['detectionType'])\n # buffer += struct.pack(str(record)+\"B\", *dg['detectionMethod'])\n\n buffer += struct.pack(str(record) + \"B\", *dg['rejectionInfo1'])\n buffer += struct.pack(str(record) + \"B\", *dg['rejectionInfo2'])\n buffer += struct.pack(str(record) + \"B\", *dg['postProcessingInfo'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionClass'])\n buffer += struct.pack(str(record) + \"B\", *dg['detectionConfidenceLevel'])\n\n # No point in carrying along the padding field. It's for byte alignment\n # but we've already reorganized the data. so we can omit it\n # and recreate it on the other side.\n\n buffer += self.encodeArrayIntoUintX(dg['rangeFactor'], 1)\n buffer += self.encodeArrayIntoUintX(dg['qualityFactor'], .01)\n buffer += self.encodeArrayIntoUintX(dg['detectionUncertaintyVer_m'], .01)\n buffer += self.encodeArrayIntoUintX(dg['detectionUncertaintyHor_m'], .1)\n buffer += self.encodeArrayIntoUintX(dg['detectionWindowLength_sec'], .001)\n buffer += self.encodeArrayIntoUintX(dg['echoLength_sec'], .001)\n\n buffer += struct.pack(str(record) + \"H\", *dg['WCBeamNumb'])\n buffer += struct.pack(str(record) + \"H\", *dg['WCrange_samples'])\n buffer += self.encodeArrayIntoUintX(dg['WCNomBeamAngleAcross_deg'], .001)\n\n # meanAbsCoeff_dbPerkm is a single value per transmit sector. No point in\n # encoding them all. This method first line gets a unique index for\n # each sector. These are used to capture a dbPkm for each.\n _, idx = np.unique(dg['txSectorNumb'], return_index=True)\n # Encoding as ushort's in .01's of a dB.\n vals = np.round(np.array(dg['meanAbsCoeff_dbPerkm'])[np.sort(idx)] * 100).astype(int)\n buffer += struct.pack(str(len(idx)) + \"H\", *vals)\n\n # Reflectivity1_dB values get -100 when the detect is invalid\n # and reflectivity2_dB get any of several values thare are\n # also non-sensical. Because they are never near the mean of\n # the valid data, the differential encoding scheme used\n # here becomes very inefficient. So we will set them to\n # the mode of the data to optimize the encoding and set them\n # back to their original values on decoding.\n\n # The values are rounded to 2 decimal places first because\n # they are floats and the chances that any two floats are\n # the same is quite small.\n dg['reflectivity1_dB'] = np.round(dg['reflectivity1_dB'], decimals=2)\n\n # This wizardry calculates the mode (most frequent value)\n # of the reflectivity values associated with valid detects.\n reflectivity_mode = stats.mode([y for x, y in\n zip(dg['detectionMethod'], dg['reflectivity1_dB'])\n if x != 0])[0][0]\n # Replace all the non-detects with the mode.\n dg['reflectivity1_dB'] = [y if x != 0 else reflectivity_mode\n for x, y in\n zip(dg['detectionMethod'], dg['reflectivity1_dB'])]\n\n # Do the same with reflectiivty2.\n dg['reflectivity2_dB'] = np.round(dg['reflectivity2_dB'], decimals=2)\n reflectivity_mode = stats.mode([y for x, y in\n zip(dg['detectionMethod'], dg['reflectivity2_dB'])\n if x != 0])[0][0]\n # Replace all the non-detects with the mode.\n dg['reflectivity2_dB'] = [y if x != 0 else reflectivity_mode\n for x, y in\n zip(dg['detectionMethod'], dg['reflectivity2_dB'])]\n\n buffer += self.encodeArrayIntoUintX(dg['reflectivity1_dB'], .1)\n buffer += self.encodeArrayIntoUintX(dg['reflectivity2_dB'], .001)\n buffer += self.encodeArrayIntoUintX(dg['receiverSensitivityApplied_dB'], .001)\n buffer += self.encodeArrayIntoUintX(dg['sourceLevelApplied_dB'], .001)\n buffer += self.encodeArrayIntoUintX(dg['BScalibration_dB'], .001)\n buffer += self.encodeArrayIntoUintX(dg['TVG_dB'], .001)\n buffer += self.encodeArrayIntoUintX(dg['beamAngleReRx_deg'], .001)\n buffer += self.encodeArrayIntoUintX(dg['beamAngleCorrection_deg'], .001)\n buffer += self.encodeArrayIntoUintX(dg['twoWayTravelTime_sec'], .000001)\n buffer += self.encodeArrayIntoUintX(dg['twoWayTravelTimeCorrection_sec'], .0000001)\n buffer += self.encodeArrayIntoUintX(dg['deltaLatitude_deg'], .0000001)\n buffer += self.encodeArrayIntoUintX(dg['deltaLongitude_deg'], .0000001)\n buffer += self.encodeArrayIntoUintX(dg['z_reRefPoint_m'], .001)\n buffer += self.encodeArrayIntoUintX(dg['y_reRefPoint_m'], .001)\n buffer += self.encodeArrayIntoUintX(dg['x_reRefPoint_m'], .001)\n buffer += self.encodeArrayIntoUintX(dg['beamIncAngleAdj_deg'], .001)\n\n # realTimeCleanInfo is for future use. So we can omit it for now.\n # buffer += struct.pack(str(record)+\"H\", *dg['realTimeCleanInfo'])\n\n buffer += struct.pack(str(record) + \"H\", *dg['SIstartRange_samples'])\n buffer += struct.pack(str(record) + \"H\", *dg['SIcentreSample'])\n buffer += struct.pack(str(record) + \"H\", *dg['SInumSamples'])\n\n return bz2.compress(buffer)\n\n def expandAndDecodeSoundings(self, buffer, records):\n ''' When the soundings datagram is differential-encoded and compressed, this method reverses it on reading.\n buffer: bytes object containing the compressed data.\n records: Number of soundings encoded in the block.\n returns: dg['sounding'] containing dictionary of lists of sounding record fields.\n '''\n\n buffer = bz2.decompress(buffer)\n dg = {}\n ptr = 0\n dg['soundingIndex'] = struct.unpack(str(records) + \"H\", buffer[0:(records * 2)])\n ptr += (records * 2)\n\n tmp = np.array(struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)]))\n ptr += records\n dg['detectionType'] = np.round(tmp / 100.).astype(int)\n dg['detectionMethod'] = np.round((tmp - dg['detectionType'] * 100) / 10.).astype(int)\n dg['txSectorNumb'] = np.round((tmp - dg['detectionType'] * 100 - dg['detectionMethod'] * 10)).astype(int)\n dg['detectionType'] = dg['detectionType'].tolist()\n dg['detectionMethod'] = dg['detectionMethod'].tolist()\n dg['txSectorNumb'] = dg['txSectorNumb'].tolist()\n # dg['txSectorNumb'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records )])\n # ptr += records\n # dg['detectionType'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n # ptr += records\n # dg['detectionMethod'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n # ptr += records\n dg['rejectionInfo1'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n ptr += records\n dg['rejectionInfo2'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n ptr += records\n dg['postProcessingInfo'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n ptr += records\n dg['detectionClass'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n ptr += records\n dg['detectionConfidenceLevel'] = struct.unpack(str(records) + \"B\", buffer[ptr:(ptr + records)])\n ptr += records\n\n # The padding data is not encoded, so we just generate 0's for it here.\n dg['padding'] = list(np.zeros(shape=len(dg['soundingIndex'])).astype(int))\n\n dg['rangeFactor'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['qualityFactor'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['detectionUncertaintyVer_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['detectionUncertaintyHor_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['detectionWindowLength_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['echoLength_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n\n dg['WCBeamNumb'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n ptr += (records * 2)\n dg['WCrange_samples'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n ptr += (records * 2)\n\n dg['WCNomBeamAngleAcross_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n\n # meanAbsCoeff_dbPerkm is a single value for each transmit sector.\n # And we've only encodeied one for each as ushorts in 0.01 dB.\n # So we extract these.\n Nsectors = len(np.unique(dg['txSectorNumb']))\n values = np.array(struct.unpack(str(Nsectors) + \"H\", buffer[ptr:(ptr + (Nsectors * 2))])) / 100.0\n ptr += (Nsectors * 2)\n # Then assign them to each sector.\n tmp = np.zeros(shape=len(dg['soundingIndex']))\n for sectoridx in np.unique(dg['txSectorNumb']):\n tmp[dg['txSectorNumb'] == sectoridx] = values[sectoridx]\n dg['meanAbsCoeff_dbPerkm'] = tmp.tolist()\n\n dg['reflectivity1_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n # Reset values for no-detect values that were modified to\n # improve compression.\n dg['reflectivity1_dB'] = [-100. if x == 0 else y\n for x, y in\n zip(dg['detectionMethod'], dg['reflectivity1_dB'])]\n\n dg['reflectivity2_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n # Reset values for no-detect values that were modified to\n # improve compression. Note this makes a suble if inconsequential\n # change to the file, as the values in reflectivity2_dB for\n # failed detections are not -100. They are not uniform in value\n # and so cannot be replaced exactly here. But since these\n # are for non-detects it should not matter to anyone. (I hope)\n dg['reflectivity2_dB'] = [-100. if x == 0 else y\n for x, y in\n zip(dg['detectionMethod'], dg['reflectivity2_dB'])]\n\n dg['receiverSensitivityApplied_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['sourceLevelApplied_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['BScalibration_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['TVG_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['beamAngleReRx_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['beamAngleCorrection_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['twoWayTravelTime_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['twoWayTravelTimeCorrection_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['deltaLatitude_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['deltaLongitude_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['z_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['y_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['x_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n dg['beamIncAngleAdj_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])\n ptr += bytesDecoded\n\n # dg['realTimeCleanInfo'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n # ptr += (records * 2)\n dg['realTimeCleanInfo'] = list(np.zeros(shape=len(dg['soundingIndex'])).astype(int))\n dg['SIstartRange_samples'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n ptr += (records * 2)\n dg['SIcentreSample'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n ptr += (records * 2)\n dg['SInumSamples'] = struct.unpack(str(records) + \"H\", buffer[ptr:(ptr + (records * 2))])\n ptr += (records * 2)\n\n return dg\n\n def write_EncodedCompressedSoundings(self, buffer):\n ''' Write MRZ soundings records.\n write_EMdgmMRZ_sounding(FID, dg['sounding'])\n '''\n self.FID.write(struct.pack('I', len(buffer)))\n self.FID.write(buffer)\n return\n\n def encodeAndCompressImagery(self, dg):\n ''' A method to encode and compress the imagery data.'''\n buffer = self.encodeArrayIntoUintX(np.array(dg['SIsample_desidB']), .1)\n return bz2.compress(buffer)\n\n def decodeAndDecompresssImagery(self, buffer, Nseabedimage_samples):\n format_to_unpack = str(Nseabedimage_samples) + \"h\"\n return self.decodeUintXintoArray(bz2.decompress(buffer))\n\n def write_EncodedCompressedImagery(self, buffer):\n ''' A method to write the encoded compressed imagery'''\n self.FID.write(struct.pack(\"I\", len(buffer)))\n self.FID.write(buffer)\n\n def write_EMdgmCZ0(self, dg):\n ''' A method to write an MRZ datagram back to disk, but omitting the imagery data.'''\n\n # First we need to see how much space the imagery data will take.\n # And set the number of imagery samples per sounding field to zero.\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n # dg['sounding']['SInumSamples'][record] = 0\n imageryBytes = Nseabedimage_samples * 2\n\n # Now we need to reset the total packet size.\n # dg['header']['numBytesDgm'] -= imageryBytes\n\n # And we need to create a new MRZ packet type to hold compressed data.\n dg['header']['dgmType'] = b'#CZ0'\n\n imageryBuffer = self.encodeAndCompressImagery(dg)\n\n soundingsBuffer = self.encodeAndCompressSoundings(dg['sounding'])\n\n # Reduce the datagram size by the difference in size of the\n # original and compressed sounding data, including the size\n # of teh soundings buffer which is written as a 4-type int.\n Nsoundings = (dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain'])\n dg['header']['numBytesDgm'] -= (Nsoundings * 120\n - (len(soundingsBuffer) + 4))\n\n # Reduce the datagram size by the difference in size of the\n # original and encoded, compressed imagery data.\n dg['header']['numBytesDgm'] -= (imageryBytes - (len(imageryBuffer) + 4))\n\n # Now write the packet, just leave out the imagery\n # data and set Nsamples to 0.\n self.write_EMdgmHeader(dg['header'])\n self.write_EMdgmMpartition(dg['partition'])\n self.write_EMdgmMbody(dg['cmnPart'])\n self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])\n\n for sector in range(dg['pingInfo']['numTxSectors']):\n self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)\n\n self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])\n\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)\n\n self.write_EncodedCompressedSoundings(soundingsBuffer)\n self.write_EncodedCompressedImagery(imageryBuffer)\n\n self.FID.write(struct.pack(\"I\", dg['header']['numBytesDgm']))\n\n def write_EMdgmCZ1(self, dg):\n ''' A method to write a new datagram compressing teh soundings and\n omitting the imagery data.'''\n\n # First we need to see how much space the imagery data will take.\n # And set the number of imagery samples per sounding field to zero.\n Nseabedimage_samples = 0\n for record in range(dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain']):\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n dg['sounding']['SInumSamples'][record] = 0\n imageryBytes = Nseabedimage_samples * 2\n\n # Now we need to reset the total packet size.\n dg['header']['numBytesDgm'] -= imageryBytes\n\n # And we need to create a new MRZ packet type to hold compressed data.\n dg['header']['dgmType'] = b'#CZ1'\n\n soundingsBuffer = self.encodeAndCompressSoundings(dg['sounding'])\n\n # Reduce the datagram size by the difference in size of the\n # original and compressed sounding data, including the size\n # of the soundings buffer which is also written, as a 4-type int.\n Nsoundings = (dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain'])\n dg['header']['numBytesDgm'] -= (Nsoundings * 120\n - (len(soundingsBuffer) + 4))\n\n # Now write the packet, just leave out the imagery\n # data and set Nsamples to 0.\n self.write_EMdgmHeader(dg['header'])\n self.write_EMdgmMpartition(dg['partition'])\n self.write_EMdgmMbody(dg['cmnPart'])\n self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])\n\n for sector in range(dg['pingInfo']['numTxSectors']):\n self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)\n\n self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])\n\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)\n\n self.write_EncodedCompressedSoundings(soundingsBuffer)\n # write_EncodedCompressedImagery(FID,imageryBuffer)\n # Don't write the imagery data.\n # write_EMdgmMRZ_seabedImagery(FID, dg, Nseabedimage_samples)\n\n self.FID.write(struct.pack(\"I\", dg['header']['numBytesDgm']))\n\n def read_EMdgmCZ0(self):\n \"\"\"\n The #CR0 datagram is a custom datagram in which the sounding data\n and imagery data are encoded and compressed.\n\n The format of this datagram will evolve as better methods are devised.\n Therefore, files compressed in this way should only be used in a\n temporary way for passing data over telemetry links. Files left\n compressed are in danger of being unreadable in future releases.\n\n \"\"\"\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['partition'] = self.read_EMdgmMpartition()\n dg['cmnPart'] = self.read_EMdgmMbody()\n\n dgmVersion = dg['dgmVersion']\n dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo(dgmVersion)\n\n # Read TX sector info for each sector\n txSectorInfo = []\n for sector in range(dg['pingInfo']['numTxSectors']):\n txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo(dgmVersion))\n dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)\n\n # Read reInfo\n dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()\n\n # Read extra detect metadata if they exist.\n extraDetClassInfo = []\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())\n dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)\n\n # Read the sounding data.\n Nseabedimage_samples = 0\n\n soundingsBuffer = self.read_EncodedCompressedSoundingsBlock()\n\n Nsoundings = (dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain'])\n dg['sounding'] = self.expandAndDecodeSoundings(soundingsBuffer,\n Nsoundings)\n\n for record in range(Nsoundings):\n Nseabedimage_samples += dg['sounding']['SInumSamples'][record]\n\n # Read the seabed imagery.\n # Seabed image sample amplitude, in 0.1 dB. Actual number of\n # seabed image samples (SIsample_desidB) to be found\n # by summing parameter SInumSamples in struct EMdgmMRZ_sounding_def\n # for all beams. Seabed image data are raw beam sample data\n # taken from the RX beams. The data samples are selected\n # based on the bottom detection ranges. First sample for\n # each beam is the one with the lowest range. The centre\n # sample from each beam is georeferenced (x, y, z data from\n # the detections). The BS corrections applied at the centre\n # sample are the same as used for reflectivity2_dB\n # (struct EMdgmMRZ_sounding_def).\n imageryBuffer = self.read_EncodedCompressedImageryBlock()\n dg['SIsample_desidB'], bytesDecoded = self.decodeAndDecompresssImagery(imageryBuffer,\n Nseabedimage_samples)\n dg['SIsample_desidB'] = np.array(dg['SIsample_desidB'], dtype=int)\n\n # Increase the reported size of the packet by the increase\n # in the size of the decoded soundings block. There are 120\n # bytes per sounding. And the size of the soundings buffer\n # is also recorded, as a 4-byte int.\n dg['header']['numBytesDgm'] += (Nsoundings * 120 -\n (len(soundingsBuffer) + 4))\n # Same for compressed imagery.\n dg['header']['numBytesDgm'] += (Nseabedimage_samples * 2 -\n (len(imageryBuffer) + 4))\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EMdgmCZ1(self):\n \"\"\"\n The #CR1 datagram is a custom datagram in which the sounding data\n are encoded and compressed and imagery is omitted.\n\n The format of this datagram will evolve as better methods are devised.\n Therefore, files compressed in this way should only be used in a\n temporary way for passing data over telemetry links. Files left\n compressed are in danger of being unreadable in future releases.\n\n \"\"\"\n\n start = self.FID.tell()\n\n dg = {}\n dg['header'] = self.read_EMdgmHeader()\n dg['partition'] = self.read_EMdgmMpartition()\n dg['cmnPart'] = self.read_EMdgmMbody()\n\n\n dgmVersion = dg['dgmVersion']\n dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo(dgmVersion)\n\n # Read TX sector info for each sector\n txSectorInfo = []\n for sector in range(dg['pingInfo']['numTxSectors']):\n txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo(dgmVersion))\n dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)\n\n # Read reInfo\n dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()\n\n # Read extra detect metadata if they exist.\n extraDetClassInfo = []\n for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):\n extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())\n dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)\n\n # Read the sounding data.\n Nseabedimage_samples = 0\n\n soundingsBuffer = self.read_EncodedCompressedSoundingsBlock()\n Nsoundings = (dg['rxInfo']['numExtraDetections'] +\n dg['rxInfo']['numSoundingsMaxMain'])\n dg['sounding'] = self.expandAndDecodeSoundings(soundingsBuffer, Nsoundings)\n\n # Increase the reported size of the packet by the increase\n # in the size of the decoded soundings block. There are 120\n # bytes per sounding. And the size of the soundings buffer\n # is also recorded, as a 4-byte int.\n dg['header']['numBytesDgm'] += (Nsoundings * 120 -\n (len(soundingsBuffer) + 4))\n # Skip the imagery data...\n\n # Seek to end of the packet.\n self.FID.seek(start + dg['header']['numBytesDgm'], 0)\n\n return dg\n\n def read_EncodedCompressedSoundingsBlock(self):\n ''' Read the compressed soundings block'''\n bytestoread = struct.unpack('I', self.FID.read(4))\n buffer = self.FID.read(bytestoread[0])\n return buffer\n\n def read_EncodedCompressedImageryBlock(self):\n ''' Read the compressed imagery block.'''\n bytestoread = struct.unpack('I', self.FID.read(4))\n buffer = self.FID.read(bytestoread[0])\n return buffer\n\n ###########################################################\n # Utilities\n ###########################################################\n\n def OpenFiletoRead(self, inputfilename=None):\n \"\"\" Open a KMALL data file for reading.\"\"\"\n if self.filename is None:\n if inputfilename is None:\n print(\"No file name specified\")\n sys.exit(1)\n else:\n filetoopen = inputfilename\n else:\n filetoopen = self.filename\n\n if self.verbose >= 1:\n print(\"Opening: %s to read\" % filetoopen)\n\n self.FID = open(filetoopen, \"rb\")\n\n def OpenFiletoWrite(self, inputfilename=None):\n \"\"\" Open a KMALL data file for reading.\"\"\"\n if self.filename is None:\n if inputfilename is None:\n print(\"No file name specified\")\n sys.exit(1)\n else:\n filetoopen = inputfilename\n else:\n filetoopen = self.filename\n\n if self.verbose >= 1:\n print(\"Opening: %s to write\" % filetoopen)\n\n self.FID = open(filetoopen, \"wb\")\n\n def closeFile(self):\n \"\"\" Close a file.\"\"\"\n if self.FID is not None:\n self.FID.close()\n\n def print_datagram(self, dg):\n \"\"\" A utility function to print the fields of a parsed datagram. \"\"\"\n print(\"\\n\")\n for k, v in dg.items():\n print(\"%s:\\t\\t\\t%s\\n\" % (k, str(v)))\n\n def index_file(self):\n \"\"\" Index a KMALL file - message type, time, size, byte offset. \"\"\"\n\n if self.FID is None:\n self.OpenFiletoRead()\n else:\n self.closeFile() # forces flushing.\n self.OpenFiletoRead()\n\n # Get size of the file.\n self.FID.seek(0, 2)\n self.file_size = self.FID.tell()\n self.FID.seek(0, 0)\n\n if (self.verbose == 1):\n print(\"Filesize: %d\" % self.file_size)\n\n self.msgoffset = []\n self.msgsize = []\n self.msgtime = []\n self.msgtype = []\n self.pktcnt = 0\n\n while self.FID.tell() < self.file_size:\n\n try:\n # Get the byte offset.\n self.msgoffset.append(self.FID.tell())\n\n # Read the first four bytes to get the datagram size.\n msgsize = struct.unpack(\"I\", self.FID.read(4))\n self.msgsize.append(msgsize[0])\n\n # Read the datagram.\n msg_buffer = self.FID.read(int(self.msgsize[self.pktcnt]) - 4)\n except:\n print(\"Error indexing file: %s\" % self.filename)\n self.msgoffset = self.msgoffset[:-1]\n self.msgsize = self.msgsize[:-1]\n continue\n\n # Interpret the header.\n header_without_length = struct.Struct('ccccBBHII')\n\n (dgm_type0, dgm_type1, dgm_type2, dgm_type3, dgm_version,\n sysid, emid,\n sec,\n nsec) = header_without_length.unpack_from(msg_buffer, 0)\n\n dgm_type = dgm_type0 + dgm_type1 + dgm_type2 + dgm_type3\n\n self.msgtype.append(str(dgm_type))\n # Decode time\n # osec = sec\n # osec *= 1E9\n # osec += nsec\n # lisec = nanosec\n # lisec /= 1E6\n\n # Captue the datagram header timestamp.\n self.msgtime.append(sec + nsec / 1.0E9)\n\n if self.verbose:\n print(\"MSG_TYPE: %s,\\tOFFSET:%0.0f,\\tSIZE: %0.0f,\\tTIME: %0.3f\" %\n (dgm_type,\n self.msgoffset[self.pktcnt],\n self.msgsize[self.pktcnt],\n self.msgtime[self.pktcnt]))\n\n self.pktcnt += 1\n\n self.msgoffset = np.array(self.msgoffset)\n self.msgsize = np.array(self.msgsize)\n self.msgtime = np.array(self.msgtime)\n\n self.Index = pd.DataFrame({'Time': self.msgtime,\n 'ByteOffset': self.msgoffset,\n 'MessageSize': self.msgsize,\n 'MessageType': self.msgtype})\n self.Index.set_index('Time', inplace=True)\n self.Index['MessageType'] = self.Index.MessageType.astype('category')\n if self.verbose >= 2:\n print(self.Index)\n\n def extract_nav(self):\n ''' Extract navigation data.\n Only works when data is interpreted into the KMbinary record at the\n moment.'''\n self.extract_attitude()\n\n def extract_attitude(self):\n ''' Extract all raw attitude data from data file into self.att\n FIX: This method needs to be much more robust. It currently only\n handles our situation in which we are providing POS/MV Group 102\n messages, and these, it appears, are being interpreted into the\n KMbinary datagram. But it does not handle 1) multiple navigation\n inputs, 2) multiple navigation input types, 3) there are no checks to\n see that the data is valid. etc.\n '''\n\n if self.Index is None:\n self.index_file()\n\n if self.FID is None:\n self.OpenFiletoRead()\n\n # Get offsets for 'SKM' attitude datagrams.\n SKMOffsets = [x for x, y in zip(self.msgoffset, self.msgtype)\n if y == \"b'#SKM'\"]\n\n attitudeDatagrams = list()\n for offset in SKMOffsets:\n self.FID.seek(offset, 0)\n dg = self.read_EMdgmSKM()\n attitudeDatagrams.append(dg['sample']['KMdefault'])\n\n # Convert list of dictionaries to dictionary of lists.\n self.att = self.listofdicts2dictoflists(attitudeDatagrams)\n\n self.FID.seek(0, 0)\n return\n\n def listofdicts2dictoflists(self, listofdicts):\n \"\"\" A utility to convert a list of dicts to a dict of lists.\"\"\"\n # dg = {}\n #\n # # This is done in two steps, handling both dictionary items that are\n # # lists and scalars separately. As long as no item combines both lists\n # # and scalars the method works.\n # #\n # # There is some mechanism to handle this in a single list\n # # comprehension statement, checking for types on the fly, but I cannot\n # # find any syntax that returns the proper result.\n # if len(listofdicts) == 0:\n # return None\n #\n # for k, v in listofdicts[0].items():\n # dg[k] = [item for dictitem in listofdicts\n # if isinstance(dictitem[k], list)\n # for item in dictitem[k]]\n # scalartmp = [dictitem[k] for dictitem in listofdicts\n # if not isinstance(dictitem[k], list)]\n # if len(dg[k]) == 0:\n # dg[k] = scalartmp\n #\n # return dg\n if listofdicts:\n needs_flattening = [k for (k,v) in listofdicts[0].items() if isinstance(v, list)]\n d_of_l = {k: [dic[k] for dic in listofdicts] for k in listofdicts[0]}\n if needs_flattening:\n # print('flattening {}'.format(needs_flattening))\n for nf in needs_flattening:\n d_of_l[nf] = [item for sublist in d_of_l[nf] for item in sublist]\n return d_of_l\n else:\n return None\n\n def extract_xyz(self):\n pass\n\n def check_ping_count(self):\n \"\"\" A method to check to see that all required MRZ datagrams exist \"\"\"\n\n if self.Index is None:\n self.index_file()\n\n if self.FID is None:\n self.OpenFiletoRead()\n\n # M = map( lambda x: x==\"b'#MRZ'\", self.msgtype)\n # MRZOffsets = self.msgoffset[list(M)]\n\n # Get the file byte count offset for each MRZ datagram.\n MRZOffsets = [x for x, y in zip(self.msgoffset, self.msgtype) if y == \"b'#MRZ'\"]\n self.pingcnt = []\n self.rxFans = []\n self.rxFanIndex = []\n\n # Skip through the file capturing the ping count information:\n # The ping count values\n # The number of receive fans specified for each ping\n # The receive fan index for each received MRZ record.\n #\n # Notes: A ping can span more than 1 MRZ datagrams. This happens when\n # 1 MRZ datagram exists for each receive \"fan\"\n # In dual swath mode, at least two receive fans are generated.\n # The ping counter will not change for the second MRZ packet.\n\n for offset in MRZOffsets:\n self.FID.seek(offset, 0)\n header = self.read_EMdgmHeader()\n part = self.read_EMdgmMpartition()\n if part['numOfDgms'] > 1:\n raise ValueError(\"KMALL file contains partitionned messages, \"\n \"reconstruction not handled: analysis cancelled!\")\n\n dg = self.read_EMdgmMbody()\n self.pingcnt.append(dg['pingCnt'])\n self.rxFans.append(dg['rxFansPerPing'])\n self.rxFanIndex.append(dg['rxFanIndex'])\n\n self.pingcnt = np.array(self.pingcnt)\n self.rxFans = np.array(self.rxFans)\n self.rxFanIndex = np.array(self.rxFanIndex)\n\n # Things to check:\n # Is the total sum of rxFans equal to the number of MRZ packets?\n # Are the unique ping counter values sequential?\n # The number of multiple ping counter values has to be larger than the\n # number of rx fans and packets.\n\n # Sorting by ping count and then calculating the difference in\n # successive values allows one to check to see that at least one\n # packet exists for each ping (which may have more than one).\n if len(self.pingcnt) > 0:\n PingCounterRange = max(self.pingcnt) - min(self.pingcnt)\n dpu = np.diff(np.sort(np.unique(self.pingcnt)))\n NpingsMissed = sum((dpu[dpu > 1] - 1))\n NpingsSeen = len(np.unique(self.pingcnt))\n # MaxDiscontinuity = max(abs(dpu))\n\n if self.verbose > 1:\n print(\"File: %s\\n\\tPing Counter Range: %d:%d N=%d\" %\n (self.filename, min(self.pingcnt), max(self.pingcnt), PingCounterRange))\n print(\"\\tNumbr of pings missing: %d of %d\" % (NpingsMissed, NpingsMissed + NpingsSeen))\n\n else:\n PingCounterRange = 0\n NpingsSeen = 0\n NpingsMissed = 0\n if self.verbose > 1:\n print(\"No pings in file.\")\n\n # print(\"\\tNumbr of pings seen: %d\" % NpingsSeen)\n # print('File: %s\\n\\tNumber of missed full pings: %d of %d' %\n # (self.filename, PingCounterRange - NpingsSeen, PingCounterRange ))\n\n # dp = np.diff(self.pingcnt)\n # FirstPingInSeries = np.array([x==0 for x in dp])\n HaveAllMRZ = True\n MissingMRZCount = 0\n # Go through every \"ping\" these may span multiple packets...\n for idx in range(len(self.pingcnt)):\n # Side note: This method is going to produce a warning multiple\n # times for each ping series that fails the test. Sloppy.\n\n # Capture how many rx fans there should be for this ping.\n N_RxFansforSeries = self.rxFans[idx]\n # Get the rxFan indices associated with this ping record.\n PingsInThisSeriesMask = np.array([x == self.pingcnt[idx] for x in self.pingcnt])\n rxFanIndicesforThisSeries = self.rxFanIndex[PingsInThisSeriesMask]\n\n # Check to see that number of records equals the total.\n if len(rxFanIndicesforThisSeries) != N_RxFansforSeries:\n if HaveAllMRZ:\n if self.verbose > 1:\n print(\"\\tDetected missing MRZ records!\")\n\n if self.verbose > 1:\n print('\\tNot enough rxFan (MRZ) records for ping: %d: Indices %s of [0:%d] found' %\n (self.pingcnt[idx],\n \",\".join(str(x) for x in rxFanIndicesforThisSeries),\n N_RxFansforSeries - 1))\n HaveAllMRZ = False\n MissingMRZCount = MissingMRZCount + 1\n\n # Shamelessly creating a data frame just to get a pretty table.\n res = pd.DataFrame([[\"File\", \"NpingsTotal\", \"Pings Missed\", \"MissingMRZRecords\"],\n [self.filename, NpingsMissed + NpingsSeen, NpingsMissed, MissingMRZCount]])\n print(res.to_string(index=False, header=False))\n\n if HaveAllMRZ:\n if self.verbose > 1:\n print(\"\\tNumber of MRZ records equals number required for each ping.\")\n\n return (self.filename, NpingsMissed + NpingsSeen, NpingsMissed, MissingMRZCount)\n\n def report_packet_types(self):\n \"\"\" A method to report datagram packet count and size in a file. \"\"\"\n\n if self.Index is None:\n self.index_file()\n\n # Get a list of packet types seen.\n types = list(set(self.msgtype))\n\n pktcount = {}\n pktSize = {}\n pktMinSize = {}\n pktMaxSize = {}\n pkTotalCount = 0\n # Calculate some stats.\n for type in types:\n M = np.array(list(map(lambda x: x == type, self.msgtype)))\n pktcount[type] = sum(M)\n pkTotalCount += pktcount[type]\n pktSize[type] = sum(self.msgsize[M])\n pktMinSize[type] = min(self.msgsize[M])\n pktMaxSize[type] = max(self.msgsize[M])\n\n # print(self.Index.groupby(\"MessageType\").describe().reset_index())\n msg_type_group = self.Index.groupby(\"MessageType\")\n summary = {\"Count\": msg_type_group[\"MessageType\"].count(),\n \"Size:\": msg_type_group[\"MessageSize\"].sum(),\n \"Min Size\": msg_type_group[\"MessageSize\"].min(),\n \"Max Size\": msg_type_group[\"MessageSize\"].max()}\n IndexSummary = pd.DataFrame(summary)\n\n print(IndexSummary)\n print(\"Total packets number: \", pkTotalCount)\n\n def _initialize_sequential_read(self, start_ptr, end_ptr):\n \"\"\"\n sequential_read_records gives you the ability to just read a chunk of a file, starting at start_ptr, ending\n at end_ptr. This method sets up this functionality by figuring out the length of the chunk and the max length\n of the file.\n \"\"\"\n self.eof = False\n if end_ptr:\n filelen = int(end_ptr - start_ptr)\n else:\n self.FID.seek(-start_ptr, 2)\n filelen = self.FID.tell()\n self.FID.seek(0, 2)\n self.file_size = self.FID.tell()\n self.FID.seek(start_ptr, 0)\n return filelen\n\n def _build_startbytesearch(self):\n \"\"\"\n Build the regular expression we are going to use to find the next startbyte, if necessary.\n \"\"\"\n # we search for the pound sign as a first step, use this compiled expression for the second tier, ensuring\n # the pound sign actually indicates the record identifier\n\n # went through and found the possible letters for all the records we care about\n # have to be explicit, as there are datagrams within datagrams, see read_EMdgmSKMinfo\n search_exp = b'#[CIMS][CDHIKOPRVWZ][CEILMOPTZ01]'\n compiled_expr = re.compile(search_exp)\n return compiled_expr\n\n def seek_next_startbyte(self, file_length, start_ptr=0):\n \"\"\"\n Determines if current pointer is at the start of a record. If not, finds the next valid one.\n \"\"\"\n # check is to continue on until you find the pound sign, which might indicate the record identifier,\n # can't just search for # though, have to use regex to ensure the 3 capital letter identifier comes after.\n at_the_right_byte = False\n while not at_the_right_byte:\n cur_ptr = self.FID.tell()\n if cur_ptr >= start_ptr + file_length:\n # at the end of file, return False to stop searching\n return False\n # consider start bytes right at the end of the given filelength as valid, even if they extend\n # over to the next chunk\n srchdat = self.FID.read(min(20, (start_ptr + file_length) - cur_ptr))\n stx_idx = srchdat.find(b'#')\n if stx_idx >= 0:\n possible_start = cur_ptr + stx_idx\n self.FID.seek(possible_start)\n datchk = self.FID.read(4)\n m = self.datagram_ident_search.search(datchk, 0)\n if m:\n self.FID.seek(possible_start - 4)\n return True\n\n def _divide_rec(self, rec):\n \"\"\"\n MRZ comes in from sequential read by time/ping. Each ping may have multiple sectors to it which we want\n to treat as separate pings. Do this by generating a new record for each sector in the ping. When rec is MRZ,\n the return is a list of rec split by sector. Otherwise returns the original rec as the only element in a list\n returns: totalrecs, list of split rec\n \"\"\"\n if self.datagram_ident != 'MRZ':\n return [rec]\n elif rec['pingInfo']['numTxSectors'] == 1:\n return [rec]\n else:\n totalrecs = []\n pingtime = rec['header']['dgtime']\n for sec in rec['txSectorInfo']['txSectorNumb']:\n split_rec = copy.copy(rec)\n split_rec['txSectorInfo'] = {k: v[sec] for (k,v) in rec['txSectorInfo'].items()}\n rx_index = np.where(np.array(rec['sounding']['txSectorNumb']) == sec)\n split_rec['sounding'] = {k: np.array(v)[rx_index] for (k,v) in rec['sounding'].items()}\n\n # ping time equals datagram time plus sector transmit delay\n split_rec['header']['dgtime'] = pingtime + split_rec['txSectorInfo']['sectorTransmitDelay_sec']\n\n totalrecs.append(split_rec)\n return totalrecs\n\n def _pad_to_dense(self, arr, padval=999.0, maxlen=500, override_type=None, detectioninfo=False):\n \"\"\"\n Appends the minimal required amount of zeroes at the end of each array in the jagged array `M`, such that `M`\n loses its jaggedness.\n\n A required operation for our sector-wise read. Each sector has a varying amount of beams over time, so the\n resulting number of values per ping (beam pointing angle for example) will differ between pings. Here we make\n these ragged arrays square, by using the padval to fill in the holes.\n\n A padval of 999 is arbitrary, but we use that nodatavalue in kluster to reform pings and do processing, so\n leave at 999 for Kluster. maxlen is the max number of expected beams per sector.\n returns: Z, square array padded with padval where arr is ragged\n \"\"\"\n\n # override the dynamic length of beams across records by applying static length limit.\n # ideally this should cover all cases\n if override_type is not None:\n typ = override_type\n else:\n typ = arr[0].dtype\n\n Z = np.full((len(arr), maxlen), padval, dtype=typ)\n for enu, row in enumerate(arr):\n # some records being read have NaNs in them unexpectedly, like part of the record isn't being read\n row[np.isnan(row)] = 0\n if detectioninfo:\n Z[enu, :len(row)] = self.translate_detectioninfo(row)\n else:\n Z[enu, :len(row)] = row\n return Z\n\n def _build_sequential_read_categories(self):\n \"\"\"\n sequential_read_records will go through the file and build a dictionary of the desired records. Specify those\n records that you want here, in recs_categories. I use a dot notation to access the correct attribute, see\n below.\n \"\"\"\n recs_categories = {'SKM': ['sample.KMdefault.dgtime', 'sample.KMdefault.roll_deg', 'sample.KMdefault.pitch_deg',\n 'sample.KMdefault.heave_m', 'sample.KMdefault.heading_deg',\n 'sample.KMdefault.latitude_deg', 'sample.KMdefault.longitude_deg',\n 'sample.KMdefault.ellipsoidHeight_m'],\n 'IIP': ['header.dgtime', 'install_txt'],\n 'MRZ': ['header.dgtime', 'cmnPart.pingCnt', 'cmnPart.rxTransducerInd',\n 'pingInfo.soundSpeedAtTxDepth_mPerSec', 'pingInfo.numTxSectors', 'header.systemID',\n 'txSectorInfo.txSectorNumb', 'txSectorInfo.tiltAngleReTx_deg',\n 'txSectorInfo.sectorTransmitDelay_sec', 'txSectorInfo.centreFreq_Hz',\n 'sounding.beamAngleReRx_deg', 'sounding.txSectorNumb', 'sounding.detectionType',\n 'sounding.qualityFactor', 'sounding.twoWayTravelTime_sec',\n 'pingInfo.modeAndStabilisation', 'pingInfo.pulseForm', 'pingInfo.depthMode'],\n 'IOP': ['header.dgtime', 'runtime_txt'],\n 'SVP': ['time_sec', 'sensorData.depth_m', 'sensorData.soundVelocity_mPerSec']}\n\n recs_categories_translator = {'SKM': {'sample.KMdefault.dgtime': [['attitude', 'time'], ['navigation', 'time']],\n 'sample.KMdefault.roll_deg': [['attitude', 'roll']],\n 'sample.KMdefault.pitch_deg': [['attitude', 'pitch']],\n 'sample.KMdefault.heave_m': [['attitude', 'heave']],\n 'sample.KMdefault.heading_deg': [['attitude', 'heading']],\n 'sample.KMdefault.latitude_deg': [['navigation', 'latitude']],\n 'sample.KMdefault.longitude_deg': [['navigation', 'longitude']],\n 'sample.KMdefault.ellipsoidHeight_m': [['navigation', 'altitude']]},\n 'MRZ': {'header.dgtime': [['ping', 'time']],\n 'cmnPart.pingCnt': [['ping', 'counter']],\n 'cmnPart.rxTransducerInd': [['ping', 'rxid']],\n 'pingInfo.soundSpeedAtTxDepth_mPerSec': [['ping', 'soundspeed']],\n 'pingInfo.numTxSectors': [['ping', 'ntx']],\n 'header.systemID': [['ping', 'serial_num']],\n 'txSectorInfo.txSectorNumb': [['ping', 'txsectorid']],\n 'txSectorInfo.tiltAngleReTx_deg': [['ping', 'tiltangle']],\n 'txSectorInfo.sectorTransmitDelay_sec': [['ping', 'delay']],\n 'txSectorInfo.centreFreq_Hz': [['ping', 'frequency']],\n 'sounding.beamAngleReRx_deg': [['ping', 'beampointingangle']],\n 'sounding.txSectorNumb': [['ping', 'txsector_beam']],\n 'sounding.detectionType': [['ping', 'detectioninfo']],\n 'sounding.qualityFactor': [['ping', 'qualityfactor_percent']],\n 'sounding.twoWayTravelTime_sec': [['ping', 'traveltime']],\n 'pingInfo.modeAndStabilisation': [['ping', 'yawpitchstab']],\n 'pingInfo.pulseForm': [['ping', 'mode']],\n 'pingInfo.depthMode': [['ping', 'modetwo']]},\n 'IIP': {'header.dgtime': [['installation_params', 'time']],\n 'install_txt': [['installation_params', 'installation_settings']]},\n 'IOP': {'header.dgtime': [['runtime_params', 'time']],\n 'runtime_txt': [['runtime_params', 'runtime_settings']]},\n 'SVP': {'time_sec': [['profile', 'time']],\n 'sensorData.depth_m': [['profile', 'depth']],\n 'sensorData.soundVelocity_mPerSec': [['profile', 'soundspeed']]}}\n\n recs_categories_result = {\n 'attitude': {'time': None, 'roll': None, 'pitch': None, 'heave': None, 'heading': None},\n 'installation_params': {'time': None, 'serial_one': None, 'serial_two': None,\n 'installation_settings': None},\n 'ping': {'time': None, 'counter': None, 'rxid': None, 'soundspeed': None, 'ntx': None,\n 'serial_num': None, 'txsectorid': None, 'tiltangle': None, 'delay': None,\n 'frequency': None, 'beampointingangle': None, 'txsector_beam': None,\n 'detectioninfo': None, 'qualityfactor_percent': None, 'traveltime': None, 'mode': None,\n 'modetwo': None, 'yawpitchstab': None},\n 'runtime_params': {'time': None, 'runtime_settings': None},\n 'profile': {'time': None, 'depth': None, 'soundspeed': None},\n 'navigation': {'time': None, 'latitude': None, 'longitude': None, 'altitude': None}}\n\n return recs_categories, recs_categories_translator, recs_categories_result\n\n def _finalize_records(self, recs_to_read, recs_count):\n \"\"\"\n Take output from sequential_read_records and alter the type/size/translate as needed for Kluster to read and\n convert to xarray. Major steps include\n - adding empty arrays so that concatenation later on will work\n - pad_to_dense to convert the ragged sector-wise arrays into square numpy arrays\n - translate the runtime parameters from integer/binary codes to string identifiers for easy reading (and to\n allow comparing results between different file types)\n returns: recs_to_read, dict of dicts finalized\n \"\"\"\n # drop the delay array and txsector_beam array since we've already used it for adjusting ping time and building\n # sector masks\n recs_to_read['ping'].pop('delay')\n recs_to_read['ping'].pop('txsector_beam')\n\n # need to force in the serial number, its not in the header anymore with these kmall files...\n if recs_to_read['installation_params']['installation_settings'] is not None:\n inst_params = recs_to_read['installation_params']['installation_settings'][0]\n if inst_params is not None:\n recs_to_read['installation_params']['serial_one'] = np.array([int(inst_params['pu_serial_number'])])\n # currently nothing in the record for identifying the second system in a dual head\n recs_to_read['installation_params']['serial_two'] = np.array([0])\n\n for rec in recs_to_read:\n for dgram in recs_to_read[rec]:\n if recs_count[rec] == 0:\n if rec != 'runtime_params' or dgram == 'time':\n # found no records, empty array\n recs_to_read[rec][dgram] = np.zeros(0)\n else:\n # found no records, empty array of strings for the mode/stab records\n recs_to_read[rec][dgram] = np.zeros(0, 'U2')\n elif rec == 'ping':\n if dgram in ['beampointingangle', 'traveltime', 'qualityfactor_percent']:\n # these datagrams can vary in number of beams, have to pad with 999 for 'jaggedness'\n recs_to_read[rec][dgram] = self._pad_to_dense(recs_to_read[rec][dgram])\n elif dgram in ['detectioninfo', 'qualityfactor']:\n # same for detection info, but it also needs to be converted to something other than int8\n recs_to_read[rec][dgram] = self._pad_to_dense(recs_to_read[rec][dgram], override_type=np.int)\n elif dgram == 'yawandpitchstabilization':\n recs_to_read[rec][dgram] = self.translate_yawpitch_tostring(np.array(recs_to_read[rec][dgram]))\n elif dgram == 'mode':\n recs_to_read[rec][dgram] = self.translate_mode_tostring(np.array(recs_to_read[rec][dgram]))\n elif dgram == 'modetwo':\n recs_to_read[rec][dgram] = self.translate_mode_two_tostring(np.array(recs_to_read[rec][dgram]))\n else:\n recs_to_read[rec][dgram] = np.array(recs_to_read[rec][dgram])\n elif rec in ['navigation', 'attitude']: # these recs have time blocks of data in them, need to be concatenated\n recs_to_read[rec][dgram] = np.concatenate(recs_to_read[rec][dgram])\n else:\n recs_to_read[rec][dgram] = np.array(recs_to_read[rec][dgram])\n return recs_to_read\n\n def sequential_read_records(self, start_ptr=0, end_ptr=0, first_installation_rec=False):\n \"\"\"\n Read the file and return a dict of the wanted records/fields according to recs_categories. If start_ptr/end_ptr\n is provided, start and end at those byte offsets.\n\n returns: recs_to_read, dict of dicts for each desired record read sequentially, see recs_categories\n \"\"\"\n recs_categories, recs_categories_translator, recs_categories_result = self._build_sequential_read_categories()\n wanted_records = list(recs_categories.keys())\n recs_to_read = copy.deepcopy(recs_categories_result)\n recs_count = dict([(k, 0) for k in recs_to_read])\n\n if self.FID is None:\n self.OpenFiletoRead()\n\n filelen = self._initialize_sequential_read(start_ptr, end_ptr)\n if start_ptr:\n self.seek_next_startbyte(filelen, start_ptr=start_ptr)\n\n while not self.eof:\n if self.FID.tell() >= start_ptr + filelen:\n self.eof = True\n break\n self.decode_datagram()\n if self.datagram_ident not in wanted_records:\n self.skip_datagram()\n continue\n self.read_datagram()\n for rec_ident in list(recs_categories_translator[self.datagram_ident].values())[0]:\n recs_count[rec_ident[0]] += 1\n\n rec = self.datagram_data\n recs = self._divide_rec(rec) # split up the MRZ record for multiple sectors, otherwise just returns [rec]\n for rec in recs:\n for subrec in recs_categories[self.datagram_ident]:\n # override for nested recs, designated with periods in the recs_to_read dict\n if subrec.find('.') > 0:\n if len(subrec.split('.')) == 3:\n rec_key = subrec.split('.')[2]\n tmprec = rec[subrec.split('.')[0]][subrec.split('.')[1]][rec_key]\n else:\n rec_key = subrec.split('.')[1]\n tmprec = rec[subrec.split('.')[0]][rec_key]\n else:\n rec_key = subrec\n tmprec = rec[rec_key]\n\n if subrec in ['install_txt', 'runtime_txt']: # str, casting to list splits the string, dont want that\n val = [tmprec]\n else:\n try: # flow for array/list attribute\n val = [np.array(tmprec)]\n except TypeError: # flow for float/int attribute\n val = [tmprec]\n\n # generate new list or append to list for each rec of that dgram type found\n for translated in recs_categories_translator[self.datagram_ident][subrec]:\n if recs_to_read[translated[0]][translated[1]] is None:\n recs_to_read[translated[0]][translated[1]] = copy.copy(val)\n else:\n recs_to_read[translated[0]][translated[1]].extend(val)\n if self.datagram_ident == 'IIP' and first_installation_rec:\n self.eof = True\n recs_to_read = self._finalize_records(recs_to_read, recs_count)\n return recs_to_read\n\n def translate_yawpitch_tostring(self, arr):\n \"\"\"\n Translate the binary code to a string identifier. Allows user to understand the mode\n without translating the integer code in their head. Kluster will build plots using these string identifiers\n in the legend.\n\n 'yawpitchstabilization' = 'Y' for Yaw stab, 'P' for pitch stab, 'PY' for both, 'N' for neither\n # xxxxxxx0 no pitch stab, xxxxxxx1 pitch stab\n # xxxxxx0x no yaw stab, xxxxxx1x yaw stab\n\n returns: rslt, numpy array of strings containing the translated yawpitch values\n \"\"\"\n rslt = np.full(arr.shape, 'N', dtype='U2')\n first_bit_chk = np.bitwise_and(arr, (1 << 0)).astype(bool)\n sec_bit_chk = np.bitwise_and(arr, (1 << 1)).astype(bool)\n\n rslt[np.intersect1d(np.where(first_bit_chk), np.where(sec_bit_chk))] = 'PY'\n rslt[np.intersect1d(np.where(first_bit_chk), np.where(sec_bit_chk == False))] = 'P'\n rslt[np.intersect1d(np.where(first_bit_chk == False), np.where(sec_bit_chk))] = 'Y'\n return rslt\n\n def translate_mode_tostring(self, arr):\n \"\"\"\n Translate the binary code to a string identifier (for MRZ pulseForm). Allows user to understand the mode\n without translating the integer code in their head. Kluster will build plots using these string identifiers\n in the legend.\n\n 'mode' = 'CW' for continuous waveform, 'FM' for frequency modulated, 'MIX' for both\n 0 for CW, 1 for MIX, 2 for FM\n\n returns: rslt, numpy array of strings containing the translated mode values\n \"\"\"\n rslt = np.full(arr.shape, 'MIX', dtype='U3')\n\n rslt[np.where(arr == 0)] = 'CW'\n rslt[np.where(arr == 1)] = 'MIX'\n rslt[np.where(arr == 2)] = 'FM'\n\n return rslt\n\n def translate_mode_two_tostring(self, arr):\n \"\"\"\n Translate the binary code to a string identifier (for MRZ depthMode). Allows user to understand the mode\n without translating the integer code in their head. Kluster will build plots using these string identifiers\n in the legend.\n\n 0 = VS, 1 = SH, 2 = ME, 3 = DE, 4 = DR, 5 = VD, 6 = ED, 7 = XD\n\n if mode is manually selected, there will be an 'm' in front (ex: VSm)\n\n returns: rslt, numpy array of strings containing the translated mode_two values\n \"\"\"\n rslt = np.zeros(arr.shape, dtype='U3')\n\n rslt[np.where(arr == 7)] = 'XD'\n rslt[np.where(arr == 6)] = 'ED'\n rslt[np.where(arr == 5)] = 'VD'\n rslt[np.where(arr == 4)] = 'DR'\n rslt[np.where(arr == 3)] = 'DE'\n rslt[np.where(arr == 2)] = 'ME'\n rslt[np.where(arr == 1)] = 'SH'\n rslt[np.where(arr == 0)] = 'VS'\n\n rslt[np.where(arr == 107)] = 'XDm'\n rslt[np.where(arr == 106)] = 'EDm'\n rslt[np.where(arr == 105)] = 'VDm'\n rslt[np.where(arr == 104)] = 'DRm'\n rslt[np.where(arr == 103)] = 'DEm'\n rslt[np.where(arr == 102)] = 'MEm'\n rslt[np.where(arr == 101)] = 'SHm'\n rslt[np.where(arr == 100)] = 'VSm'\n\n return rslt\n\n def translate_runtime_parameters_todict(self, r_text):\n \"\"\"\n runtime parameters text comes from file as a string with carriage retuns between entries.\n\n ex: '\"\\\\nSector coverage\\\\nMax angle Port: 70.0\\\\nMax angle Starboard: 70.0\\\\nMax coverage Port: ...\"'\n\n we want a dictionary of key: value pairs so we can save them as an xarray attribute and read them as a dict\n whenever we need to access. Also, we translate the keys to something more human readable. The translated\n key names will match up with .all files read with par module as well, so there is some cross compatibility (useful\n for Kluster multibeam processing)\n\n ex:\n\n returns: translated, dict of translated runtime parameters and values\n \"\"\"\n translated = {}\n entries = r_text.split('\\n')\n for entry in entries:\n if entry and (entry.find(':') != -1): # valid entries look like 'key: value', the rest are headers or blank\n key, value = entry.split(':')\n translated[key] = value.lstrip().rstrip()\n return translated\n\n def translate_installation_parameters_todict(self, i_text):\n \"\"\"\n installation parameters text comes from file as a comma delimited string with mix of = and ; separating the\n key/value pairs\n\n ex: 'SCV:Empty,EMXV:EM2040P,\\nPU_0,\\nSN=53011,\\nIP=157.237.20.40:0xffff0000,\\nUDP=1997,...'\n\n we want a dictionary of key: value pairs so we can save them as an xarray attribute and read them as a dict\n whenever we need to access. Also, we translate the keys to something more human readable. The translated\n key names will match up with .all files read with par module as well, so there is some cross compatibility (useful\n for Kluster multibeam processing)\n\n ex: {\"operator_controller_version\": \"Empty\", \"multibeam_system\": \"EM2040P\", \"pu_id_type\": \"0\",\n \"pu_serial_number\": \"53011\", \"ip_address_subnet_mask\": \"157.237.20.40:0xffff0000\",\n \"command_tcpip_port\": \"1997\",...}\n\n returns: translated, dict of translated installation parameters and values\n \"\"\"\n translate_install = {'SCV:': 'operator_controller_version', 'EMXV:': 'sonar_model_number', 'PU_': 'pu_id_type',\n 'SN=': 'pu_serial_number', 'IP=': 'ip_address_subnet_mask', 'UDP=': 'command_tcpip_port',\n 'TYPE=': 'cpu_type', 'DCL:': 'dcl_version', 'KMALL:': 'kmall_version',\n 'SYSTEM:': 'system_description', 'EMXI:SWLZ=': 'waterline_vertical_location'}\n translate_versions = {'CPU:': 'cpu_software_version', 'VXW:': 'vxw_software_version',\n 'FILTER:': 'filter_software_version', 'CBMF:': 'cbmf_software_version',\n 'TX:': 'tx_software_version', 'RX:': 'rx_software_version'}\n translate_serial = {'TX:': 'tx_serial_number', 'RX:': 'rx_serial_number'}\n # device translator will use the device identifier plus the values here, ex: 'TRAI_HD1' + '_serial_number'\n translate_device_ident = {'ATTI_1': 'motion_sensor_1', 'ATTI_2': 'motion_sensor_2', 'ATTI_3': 'motion_sensor_3',\n 'POSI_1': 'position_1', 'POSI_2': 'position_2', 'POSI_3': 'position_3',\n 'CLCK': 'clock', 'SVPI': 'sound_velocity_1', 'TRAI_HD1': 'transducer_1'}\n translate_device = {'N=': '_serial_number', 'X=': '_along_location', 'Y=': '_athwart_location',\n 'Z=': '_vertical_location', 'R=': '_roll_angle', 'P=': '_pitch_angle',\n 'H=': '_heading_angle', 'S=': '_sounder_size_deg',\n 'V=': '_version', 'W=': '_system_description', 'IPX=': '_port_sector_forward',\n 'IPY=': '_port_sector_starboard', 'IPZ=': '_port_sector_down',\n 'ICX=': '_center_sector_forward', 'ICY=': '_center_sector_starboard',\n 'ICZ=': '_center_sector_down', 'ISX=': '_starboard_sector_forward',\n 'ISY=': '_starboard_sector_starboard', 'ISZ=': '_starboard_sector_down',\n 'ITX=': '_tx_forward', 'ITY=': '_tx_starboard', 'ITZ=': '_tx_down',\n 'IRX=': '_rx_forward', 'IRY=': '_rx_starboard', 'IRZ=': '_rx_down', 'D=': '_time_delay',\n 'G=': '_datum', 'T=': '_time_stamp', 'C=': '_motion_compensation', 'F=': '_data_format',\n 'Q=': '_quality_check', 'I=': '_input_source', 'U=': '_active_passive',\n 'M=': 'motion_reference', 'A=': '_1pps'}\n\n # split by comma delimited groups\n records = [i_text.split(',') for i_text in i_text.split('\\n')]\n # subgroups are semicolon delimited\n # ex: TRAI_HD1:N=218;X=-0.293;Y=0.000;Z=0.861;R=0.496...\n records_flatten = [r.split(';') for rec in records for r in rec if r]\n\n translated = {}\n translate = translate_install\n for rec in records_flatten:\n # subgroups are parsed here, first rec contains the prefix\n # ex: ['ATTI_1:X=0.000', 'Y=0.000', 'Z=0.000', 'R=0.000', 'P=0.000', 'H=0.000', 'D=0.000'...\n if len(rec) > 1:\n prefix, first_rec = rec[0].split(':')\n try:\n prefix = translate_device_ident[prefix] # if its a prefix we haven't seen before, just pass it through\n except:\n pass\n ky, data = first_rec.split('=')\n translated[prefix + translate_device[ky + '=']] = data\n for subrec in rec[1:]:\n ky, data = subrec.split('=')\n translated[prefix + translate_device[ky + '=']] = data\n # regular groups parsed here, use the headers to determine which translator to use\n # ex: ['CBMF:1.11 18.02.20 ']\n else:\n if rec[0] == 'VERSIONS:':\n translate = translate_versions\n continue\n elif rec[0] == 'SERIALno:':\n translate = translate_serial\n continue\n elif rec[0] in ['VERSIONS-END', 'SERIALno-END']:\n translate = translate_install\n continue\n elif rec[0][-7:] == 'NOT_SET':\n continue\n\n key = [trans_key for trans_key in translate if rec[0].find(trans_key) != -1]\n if len(key) == 0:\n print('Unable to parse {}'.format(rec))\n elif len(key) == 1:\n translated[translate[key[0]]] = rec[0][len(key[0]):].rstrip()\n else:\n raise ValueError('Found multiple entries valid for record {}:{}'.format(rec, key))\n\n # plug in new keys for active position/motion sensor needed for kluster to identify the right sensor\n for mot_sens in ['motion_sensor_1_active_passive', 'motion_sensor_2_active_passive',\n 'motion_sensor_3_active_passive']:\n if mot_sens in translated:\n if translated[mot_sens] == 'ACTIVE':\n translated['active_heading_sensor'] = 'motion_' + mot_sens[14] # 'motion_1' in most cases\n for pos_sens in ['position_1_active_passive', 'position_2_active_passive', 'position_3_active_passive']:\n if pos_sens in translated:\n if translated[pos_sens] == 'ACTIVE':\n translated['active_position_system_number'] = 'position_' + pos_sens[9] # 'position_1'\n return translated\n\n\n def fast_read_start_end_time(self):\n \"\"\"\n Get the start and end time for the file without mapping the file\n returns: list, [UTC start time in seconds, UTC end time in seconds]\n \"\"\"\n self.datagram_data = None\n self.eof = False\n\n if self.FID is None:\n self.OpenFiletoRead()\n else:\n self.FID.seek(0)\n\n start_time = None\n end_time = None\n\n while not self.eof:\n self.decode_datagram()\n self.read_datagram()\n try:\n start_time = self.datagram_data['header']['dgtime']\n break\n except:\n continue\n\n # pick 10k of reading just to make sure you get some valid records, or the filelength if it is less than that\n self.FID.seek(0)\n chunksize = min(10 * 1024, self.FID.tell())\n self.FID.seek(-chunksize, 2)\n self.seek_next_startbyte(chunksize, self.FID.tell())\n while not self.eof:\n self.decode_datagram()\n self.read_datagram()\n try:\n end_time = self.datagram_data['header']['dgtime']\n break\n except:\n continue\n return [start_time, end_time]\n\n @staticmethod\n def read_header_raw(data) -> dict:\n header = {}\n format_to_unpack = \"1I4s2B1H2I\"\n fields = struct.unpack(format_to_unpack, data[0:struct.calcsize(format_to_unpack)])\n # Datagram length in bytes. The length field at the start (4 bytes) and end\n # of the datagram (4 bytes) are included in the length count.\n header['numBytesDgm'] = fields[0]\n # Array of length 4. Multibeam datagram type definition, e.g. #AAA\n header['dgmType'] = fields[1]\n # Datagram version.\n header['dgmVersion'] = fields[2]\n # System ID. Parameter used for separating datagrams from different echosounders\n # if more than one system is connected to SIS/K-Controller.\n header['systemID'] = fields[3]\n # Echo sounder identity, e.g. 122, 302, 710, 712, 2040, 2045, 850.\n header['echoSounderID'] = fields[4]\n # UTC time in seconds + Nano seconds remainder. Epoch 1970-01-01.\n header['time_sec'] = fields[5]\n header['time_nanosec'] = fields[6]\n return header\n\n @staticmethod\n def update_header_with_dgm_size(header, new_size) -> bytes:\n header['numBytesDgm'] = new_size\n format_to_pack = \"1I4s2B1H2I\"\n header_in_bytes = struct.pack(format_to_pack, header['numBytesDgm'], header['dgmType'],\n header['dgmVersion'], header['systemID'],\n header['echoSounderID'], header['time_sec'], header['time_nanosec'])\n return header_in_bytes\n\n def partition_msg(self, msg_to_split: bytes) -> []:\n \"\"\"\n Takes a KMALL datagram in bytes and splits its specific data content into several messages with a guaranteed\n maximum size of MAX_DATAGRAM_SIZE.\n The resulting messages are regular constructed KMALL datagrams made of a Header structure,\n a partition structure, a chunck of the original data content, terminated with a repetition of the datagram size.\n\n Partitions originating from the same message share the same timestamp as done by Kongsberg hardware.\n\n :param msg_to_split: KMALL datagram bytes containing the message to split\n :return: an array of smaller messages\n \"\"\"\n\n message_size = len(msg_to_split)\n if message_size <= self.MAX_DATAGRAM_SIZE:\n # No partitionning needed\n return [msg_to_split]\n else:\n # Data to be split is only a subset of the datagram:\n data_size = message_size - self.HEADER_AND_PART_SIZE - 4\n numOfDgms = math.ceil(data_size / float(self.MAX_DATA_SIZE))\n # Header from original message\n header_dict = self.read_header_raw(msg_to_split[:self.HEADER_STRUCT_SIZE])\n # Get the data content in the datagram and split it into smaller packs\n data_to_split = msg_to_split[self.HEADER_AND_PART_SIZE:-4]\n\n messages = []\n # Partitions created in this loop will all have the max packet size of 64000\n for i in range(numOfDgms - 1):\n header = self.update_header_with_dgm_size(header_dict, self.MAX_DATAGRAM_SIZE)\n # Partition index changes\n part_struct = struct.pack(\"2H\", numOfDgms, i+1)\n split = data_to_split[i*self.MAX_DATA_SIZE:(i+1)*self.MAX_DATA_SIZE]\n # Header + partition + data + message size repeated\n m = bytearray(header) + bytearray(part_struct) + bytearray(split) \\\n + bytearray(struct.pack('I', self.MAX_DATA_SIZE))\n messages.append(m)\n\n # Last partition must contain the rest\n rest_size = data_size % self.MAX_DATA_SIZE\n header = self.update_header_with_dgm_size(header_dict, rest_size + self.HEADER_AND_PART_SIZE + 4)\n part_struct = struct.pack(\"2H\", numOfDgms, numOfDgms)\n split = data_to_split[(numOfDgms - 1) * self.MAX_DATA_SIZE:]\n m = header + part_struct + split + struct.pack('I', self.MAX_DATA_SIZE)\n messages.append(m)\n\n return messages\n\ndef main(args=None):\n ''' Commandline script code.'''\n if args == None:\n args = sys.argv[1:]\n\n # Handle input arguments\n parser = argparse.ArgumentParser(description=\"A python script (and class) \"\n \"for parsing Kongsberg KMALL \"\n \"data files.\")\n parser.add_argument('-f', action='store', dest='kmall_filename',\n help=\"The path and filename to parse.\")\n parser.add_argument('-d', action='store', dest='kmall_directory',\n help=\"A directory containing kmall data files to parse.\")\n parser.add_argument('-V', action='store_true', dest='verify',\n default=False, help=\"Perform series of checks to verify the kmall file.\")\n parser.add_argument('-z', action='store_true', dest='compress',\n default=False, help=\"Create a compressed (somewhat lossy) version of the file. See -l\")\n parser.add_argument('-l', action='store', type=int, dest='compressionLevel',\n default=0, help=(\"Set the compression level (Default: 0).\\n\" +\n \"\\t 0: Somewhat lossy compression of soundings and imagery data.(Default)\\n\" +\n \"\\t 1: Somewhat lossy compression of soundings with imagery omitted.\"))\n parser.add_argument('-Z', action='store_true', dest='decompress',\n default=False, help=(\"Decompress a file compressed with this library. \" +\n \"Files must end in .Lz, where L is an integer indicating \" +\n \"the compression level (set by -l when compresssing)\"))\n parser.add_argument('-s',action='store_true', dest='split', default=False,\n help=\"Rewrites the KMALL file with large datagrams split into partitions\")\n\n parser.add_argument('-v', action='count', dest='verbose', default=0,\n help=\"Increasingly verbose output (e.g. -v -vv -vvv),\"\n \"for debugging use -vvv\")\n args = parser.parse_args()\n\n verbose = args.verbose\n\n kmall_filename = args.kmall_filename\n kmall_directory = args.kmall_directory\n verify = args.verify\n compress = args.compress\n decompress = args.decompress\n compressionLevel = args.compressionLevel\n split = args.split\n\n validCompressionLevels = [0, 1]\n if compressionLevel not in validCompressionLevels:\n print(\"Error: Compression level may be one of \" + str(validCompressionLevels))\n sys.exit()\n\n suffix = \"kmall\"\n if decompress:\n suffix\n\n if kmall_directory:\n filestoprocess = []\n\n if verbose >= 3:\n print(\"directory: \" + kmall_directory)\n\n # Recursively work through the directory looking for kmall files.\n for root, subFolders, files in os.walk(kmall_directory):\n for fileval in files:\n if fileval[-suffix.__len__():] == suffix:\n filestoprocess.append(os.path.join(root, fileval))\n else:\n filestoprocess = [kmall_filename]\n\n if filestoprocess.__len__() == 0:\n print(\"No files found to process.\")\n sys.exit()\n\n for filename in filestoprocess:\n print(\"\")\n print(\"Processing: %s\" % filename)\n\n # Create the class instance.\n K = kmall(filename)\n K.verbose = args.verbose\n if (K.verbose >= 1):\n print(\"Processing file: %s\" % K.filename)\n\n # Index file (check for index)\n K.index_file()\n\n # Rewrite with packet splits if requested.\n if split:\n filename_split = filename + \".split.kmall\"\n print(\"KMALL packets split will be written to : \" + filename_split)\n split_cnt = 0\n with open(filename_split, 'wb') as file:\n for offset, size, mtype in zip(K.Index['ByteOffset'],\n K.Index['MessageSize'],\n K.Index['MessageType']):\n K.FID.seek(offset, 0)\n datagram = K.FID.read(size)\n if size > K.MAX_DATAGRAM_SIZE:\n # split into smaller partitionned datagrams\n messages = K.partition_msg(datagram)\n split_cnt += 1\n for m in messages:\n file.write(m)\n else:\n file.write(datagram)\n print(\"Split done on {} packets\".format(split_cnt))\n\n ## Do packet verification if requested.\n pingcheckdata = []\n navcheckdata = []\n if verify:\n K.report_packet_types()\n pingcheckdata.append([x for x in K.check_ping_count()])\n\n K.extract_attitude()\n # Report gaps in attitude data.\n dt_att = np.diff([x.timestamp() for x in K.att[\"datetime\"]])\n navcheckdata.append([np.min(np.abs(dt_att)),\n np.max(dt_att),\n np.mean(dt_att),\n 1.0 / np.mean(dt_att),\n sum(dt_att >= 1.0)])\n # print(\"Navigation Gaps min: %0.3f, max: %0.3f, mean: %0.3f (%0.3fHz)\" %\n # (np.min(np.abs(dt_att)),np.max(dt_att),np.mean(dt_att),1.0/np.mean(dt_att)))\n # print(\"Navigation Gaps >= 1s: %d\" % sum(dt_att >= 1.0))\n print(\"Packet statistics:\")\n\n # Print column headers\n # print('%s' % \"\\t\".join(['File','Npings','NpingsMissing','NMissingMRZ'] +\n # ['Nav Min Time Gap','Nav Max Time Gap', 'Nav Mean Time Gap','Nav Mean Freq','Nav N Gaps >1s']))\n\n # Print columns\n # for x,y in zip(pingcheckdata,navcheckdata):\n # row = x+y\n # #print(row)\n # print(\"\\t\".join([str(x) for x in row]))\n\n # Create DataFrame to make printing easier.\n DataCheck = pd.DataFrame([x + y for x, y in zip(pingcheckdata, navcheckdata)], columns=\n ['File', 'Npings', 'NpingsMissing', 'NMissingMRZ'] +\n ['NavMinTimeGap', 'NavMaxTimeGap', 'NavMeanTimeGap', 'NavMeanFreq', 'NavNGaps>1s'])\n # K.navDataCheck = pd.DataFrame(navcheckdata,columns=['Min Time Gap','Max Time Gap', 'Mean Time Gap','Mean Freq','N Gaps >1s'])\n pd.set_option('display.max_columns', 30)\n pd.set_option('display.expand_frame_repr', False)\n print(DataCheck)\n\n ## Do compression if desired, at the desired level.\n if compress:\n\n if compressionLevel == 0:\n\n print(\"Compressing soundings and imagery.\")\n compressedFilename = K.filename + \".0z\"\n\n # Modify filename if the file already exists\n idx = 1\n while os.path.exists(compressedFilename):\n compressedFilename = ((K.filename + \"_\" + \"%02d.0z\") % idx)\n idx += 1\n\n T = kmall(compressedFilename)\n K.index_file()\n T.OpenFiletoWrite()\n\n for offset, size, mtype in zip(K.Index['ByteOffset'],\n K.Index['MessageSize'],\n K.Index['MessageType']):\n K.FID.seek(offset, 0)\n if mtype == \"b'#MRZ'\":\n dg = K.read_EMdgmMRZ()\n T.write_EMdgmCZ0(dg)\n else:\n buffer = K.FID.read(size)\n T.FID.write(buffer)\n\n K.closeFile()\n T.closeFile()\n\n if compressionLevel == 1:\n\n print(\"Compressing soundings, omitting imagery.\")\n compressedFilename = K.filename + \".1z\"\n\n # Modify filename if the file already exists\n idx = 1\n while os.path.exists(compressedFilename):\n compressedFilename = compressedFilename + \"_\" + str(idx)\n\n T = kmall(compressedFilename)\n K.index_file()\n T.OpenFiletoWrite()\n\n for offset, size, mtype in zip(K.Index['ByteOffset'],\n K.Index['MessageSize'],\n K.Index['MessageType']):\n K.FID.seek(offset, 0)\n if mtype == \"b'#MRZ'\":\n dg = K.read_EMdgmMRZ()\n T.write_EMdgmCZ1(dg)\n else:\n buffer = K.FID.read(size)\n T.FID.write(buffer)\n\n K.closeFile()\n T.closeFile()\n\n # Decompress the file is requested.\n if decompress:\n\n # Discern the compression level and base filename.\n regexp = '(?P<basename>.*\\.kmall)\\.(?P<level>\\d+)z'\n tokens = re.search(regexp, K.filename)\n if tokens is None:\n print(\"Could not discern compression level.\")\n print(\"Expecting xxxxx.kmall.\\d+.z, where \\d+ is 1 or more\")\n print(\"integers indicating the compression level.\")\n sys.exit()\n\n fileBasename = tokens['basename']\n compressionLevel = tokens['level']\n\n # Give some status.\n if compressionLevel == \"0\":\n print(\"Decompressing soundings and imagery.(Level: 0)\")\n elif compressionLevel == \"1\":\n print(\"Decompessing soundings, imagery was omitted in this format. (Level: 1)\")\n\n decompressedFilename = fileBasename\n # Check to see if decompressed filename exists and modify if necessary.\n idx = 1\n while os.path.exists(decompressedFilename):\n decompressedFilename = ((fileBasename[:-6] +\n \"_\" + \"%02d\" + '.kmall') % idx)\n idx += 1\n\n if verbose >= 1:\n print(\"Decompressing to: %s\" % decompressedFilename)\n print(\"Decompressing from Level: %s\" % compressionLevel)\n\n # Create kmall object for decompressed file and open it.\n T = kmall(filename=decompressedFilename)\n T.OpenFiletoWrite()\n\n # Loop through the file, decompressing datagrams\n # when necessary and just writing them when not.\n for offset, size, mtype in zip(K.Index['ByteOffset'],\n K.Index['MessageSize'],\n K.Index['MessageType']):\n K.FID.seek(offset, 0)\n if compressionLevel == \"0\":\n\n if mtype == \"b'#CZ0'\":\n dg = K.read_EMdgmCZ0()\n T.write_EMdgmMRZ(dg)\n else:\n buffer = K.FID.read(size)\n T.FID.write(buffer)\n\n if compressionLevel == \"1\":\n\n if mtype == \"b'#CZ1'\":\n dg = K.read_EMdgmCZ1()\n T.write_EMdgmMRZ(dg)\n else:\n buffer = K.FID.read(size)\n T.FID.write(buffer)\n\n T.closeFile()\n K.closeFile()\n\nif __name__ == '__main__':\n sys.exit(main())" ]
[ [ "numpy.max", "numpy.full", "numpy.array", "numpy.isnan", "numpy.concatenate", "numpy.zeros", "pandas.set_option", "numpy.round", "pandas.DataFrame", "numpy.min", "numpy.mean", "numpy.where", "numpy.bitwise_and", "numpy.sort", "numpy.abs", "numpy.unique" ] ]
IBSHAMI/Bike_sharing_prediction
[ "702f0baacd4ae3e050c5b91f800ed54df85ac939" ]
[ "utils.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n\ndef one_hot_encode(df, columns, drop_columns):\n \"\"\"\n One hot encode a dataframe with categorical columns\n \"\"\"\n for col in columns:\n dummies = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, dummies], axis=1)\n\n df.drop(drop_columns, axis=1, inplace=True)\n\n return df\n\n\ndef train_test_split_df(df, test_size=0.2):\n \"\"\"\n Split dataframe into train and test data\n \"\"\"\n df_train, df_test = df[:round(len(df) * (1 - test_size))], df[round(\n len(df) * (1 - test_size)):]\n print(f\"len train df: {len(df_train)}\")\n print(f\"len test df: {len(df_test)}\")\n\n return df_train, df_test\n\n\ndef standardize(df, columns):\n \"\"\"\n Standardize dataframe with columns\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(df[columns])\n df[columns] = scaler.transform(df[columns])\n\n return df, scaler\n\n\ndef divide_train_target(df, data_columns, date_fields, target):\n \"\"\"\n Divide dataframe into train and target\n convert train and target to numpy arrays\n \"\"\"\n features_columns = data_columns\n columns_to_drop = date_fields\n # combine elements of date_fields and target\n columns_to_drop.extend(target)\n\n for col in columns_to_drop:\n features_columns.remove(col)\n\n x, y = df[features_columns].to_numpy(), df[target].to_numpy()\n\n return x, y\n\n\ndef train_validation_split(x, y, test_size=0.2, random_state=42):\n \"\"\"\n Split dataframe into train and validation data\n \"\"\"\n\n x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=test_size, random_state=random_state)\n\n return x_train, x_valid, y_train, y_valid" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.concat", "pandas.get_dummies", "sklearn.preprocessing.StandardScaler" ] ]
lapid92/model_optimization
[ "3fc6db67cde912a1e22399bd43bc345ba035b8b6" ]
[ "model_compression_toolkit/pytorch/mixed_precision/mixed_precision_wrapper.py" ]
[ "# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom typing import Any, List\n\nimport torch\nimport copy\n\nfrom model_compression_toolkit import FrameworkInfo\nfrom model_compression_toolkit.common import BaseNode\nfrom model_compression_toolkit.pytorch.utils import set_model\n\n\nclass PytorchMixedPrecisionWrapper(torch.nn.Module):\n \"\"\"\n Class that wraps a Pytorch layer (nn.Module) to be used for mixed precision quantization.\n Allows to maintain quantized weights tensors for each of the layer's attributes that we want to quantize,\n with each of the candidate bitwidth options specified for the mixed precision model.\n During MP search, it allows to activate the relevant quantized weights tensor according to a given configuration,\n and use it for inference.\n \"\"\"\n def __init__(self,\n n: BaseNode,\n fw_info: FrameworkInfo):\n \"\"\"\n Construct a Pytorch model that constitutes as a wrapper for a Pytorch layer, built from a given graph node.\n Args:\n n: Node to build its Pytorch layer.\n fw_info: Framework information (e.g., mapping from layers to their attributes to quantize).\n \"\"\"\n super(PytorchMixedPrecisionWrapper, self).__init__()\n\n assert n.candidates_quantization_cfg is not None\n\n framework_attr = copy.copy(n.framework_attr)\n self.layer = n.type(**framework_attr)\n # loading the weights from the graph node (weights of the trained model)\n self.layer.load_state_dict({k: torch.Tensor(v) for k, v in n.weights.items()}, strict=False)\n set_model(self.layer)\n\n self.weight_attrs = fw_info.get_kernel_op_attributes(n.type)\n # float_weights is a list of weights for each attribute that we want to quantize.\n self.float_weights = [n.get_weights_by_keys(attr) for attr in\n self.weight_attrs]\n\n assert len(self.weight_attrs) == len(self.float_weights)\n\n self.node_q_cfg = n.candidates_quantization_cfg\n self.quantizer_fn_list = [qc.weights_quantization_cfg.weights_quantization_fn for qc in self.node_q_cfg]\n self.quantized_weights = self._get_quantized_weights()\n\n def forward(self, x: Any) -> Any:\n \"\"\"\n Args:\n x: input tensors to model.\n Returns:\n torch Tensor which is the output of the wrapped layer on the given input.\n \"\"\"\n return self.layer(x)\n\n def _get_quantized_weights(self):\n \"\"\"\n Calculates the quantized weights' tensors for each of the bitwidth candidates for quantization,\n to be stored and used during MP search.\n Returns: a list of quantized weights - for each bitwidth and layer's attribute to be quantized.\n \"\"\"\n quantized_weights = []\n for index, qc in enumerate(self.node_q_cfg):\n # for each quantization configuration in mixed precision\n # get quantized weights for each attribute and for each filter\n quantized_per_attr = []\n for float_weight in self.float_weights:\n # for each attribute\n quantized_per_attr.append(self.quantizer_fn_list[index](tensor_data=float_weight,\n n_bits=qc.weights_quantization_cfg.weights_n_bits,\n signed=True,\n quantization_params=qc.weights_quantization_cfg.weights_quantization_params,\n per_channel=qc.weights_quantization_cfg.weights_per_channel_threshold,\n output_channels_axis=qc.weights_quantization_cfg.weights_channels_axis))\n quantized_weights.append(quantized_per_attr)\n\n return quantized_weights\n\n def set_active_weights(self,\n bitwidth_idx: int,\n attr: str = None):\n \"\"\"\n Set a weights' tensor to use by the layer wrapped by the module.\n Args:\n bitwidth_idx: Index of a candidate quantization configuration to use its quantized\n version of the float weight.\n attr: Attributes of the layer's weights to quantize\n \"\"\"\n if attr is None: # set bit width to all weights of the layer\n attr_idxs = [attr_idx for attr_idx in range(len(self.quantized_weights[bitwidth_idx]))]\n self._set_bit_width_index(bitwidth_idx, attr_idxs)\n else: # set bit width to a specific attribute\n attr_idx = self.weight_attrs.index(attr)\n self._set_bit_width_index(bitwidth_idx, [attr_idx])\n\n def _set_bit_width_index(self,\n bitwidth_idx: int,\n attr_idxs: List[int]):\n \"\"\"\n Sets the wrapped layer's weights state with quantized weights, according to the given configuration.\n Args:\n bitwidth_idx: Index of a candidate quantization configuration to use its quantized\n version of the float weight.\n attr_idxs: Indices list of attributes of the layer's weights to quantize\n Returns: None (sets the new state of the layer inplace).\n \"\"\"\n assert bitwidth_idx < len(self.quantized_weights), \\\n f\"Index {bitwidth_idx} does not exist in current quantization candidates list\"\n\n loaded_weights = {k: torch.as_tensor(v) for k, v in self.layer.state_dict().items()}\n with torch.no_grad():\n for attr_idx in attr_idxs:\n # need to prepare the weights' tensor - extract it from the maintained quantized_weights list\n # and move it to the relevant device as the wrapped layer's weights.\n weights_tensor = self.quantized_weights[bitwidth_idx][attr_idx]\n weights_device = loaded_weights[self.weight_attrs[attr_idx]].device\n active_weights = torch.nn.Parameter(torch.from_numpy(weights_tensor).to(weights_device))\n loaded_weights[self.weight_attrs[attr_idx]] = active_weights\n self.layer.load_state_dict(loaded_weights, strict=True)\n" ]
[ [ "torch.as_tensor", "torch.no_grad", "torch.Tensor", "torch.from_numpy" ] ]
kodexp/smt
[ "cc390b795ea21eed66aae95218d5dfb67ed87a88" ]
[ "smt/utils/line_search.py" ]
[ "\"\"\"\nAuthor: Dr. John T. Hwang <[email protected]>\n \nThis package is distributed under New BSD license.\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport scipy.sparse\nimport six\nfrom six.moves import range\n\nVALID_LINE_SEARCHES = (\"backtracking\", \"bracketed\", \"quadratic\", \"cubic\", \"null\")\n\n\ndef get_line_search_class(line_search):\n if line_search == \"backtracking\":\n return BacktrackingLineSearch\n elif line_search == \"bracketed\":\n return BracketedLineSearch\n elif line_search == \"quadratic\":\n return QuadraticLineSearch\n elif line_search == \"cubic\":\n return CubicLineSearch\n elif line_search == \"null\":\n return NullLineSearch\n\n\nclass LineSearch(object):\n \"\"\"\n Base line search class.\n \"\"\"\n\n def __init__(self, x, dx, func, grad, u1=1.0e-4, u2=0.9):\n \"\"\"\n Initialize all attributes for the given problem.\n\n Arguments\n ---------\n x : ndarray[:]\n Vector representing the current location in the n-D space.\n dx : ndarray[:]\n Search direction.\n func : function\n scalar function of x.\n grad : function\n vector function that yields the gradient of func.\n u1 : float\n Parameter in the sufficient decrease criterion to ensure non-zero decrease.\n u2 : float\n Parameter in the curvature criterion to ensure gradient norm decreases.\n \"\"\"\n self.x = x\n self.dx = dx\n self.func = func\n self.grad = grad\n\n self.u1 = u1\n self.u2 = u2\n\n self.phi_0 = self._phi(0.0)\n self.dphi_0 = self._dphi(0.0)\n\n def _phi(self, a):\n \"\"\"\n Function in terms of alpha (a).\n\n phi(a) = func(x + a dx)\n \"\"\"\n return self.func(self.x + a * self.dx)\n\n def _dphi(self, a):\n \"\"\"\n Derivative of phi w.r.t. alpha (a).\n \"\"\"\n return np.dot(self.grad(self.x + a * self.dx), self.dx)\n\n def _func_decreased(self, a):\n \"\"\"\n Check sufficient decrease criterion.\n \"\"\"\n return self._phi(a) <= self.phi_0 + self.u1 * a * self.dphi_0\n\n def _grad_decreased(self, a):\n \"\"\"\n Check curvature criterion.\n \"\"\"\n return np.abs(self._dphi(a)) <= np.abs(self.u2 * self.dphi_0)\n\n\nclass NullLineSearch(object):\n \"\"\"\n Base line search class.\n \"\"\"\n\n def __init__(self, x, dx, func, grad, u1=1.0e-4, u2=0.9):\n \"\"\"\n Initialize all attributes for the given problem.\n\n Arguments\n ---------\n x : ndarray[:]\n Vector representing the current location in the n-D space.\n dx : ndarray[:]\n Search direction.\n func : function\n scalar function of x.\n grad : function\n vector function that yields the gradient of func.\n u1 : float\n Parameter in the sufficient decrease criterion to ensure non-zero decrease.\n u2 : float\n Parameter in the curvature criterion to ensure gradient norm decreases.\n \"\"\"\n self.x = x\n self.dx = dx\n\n def __call__(self, initial_a=1):\n return self.x + initial_a * self.dx\n\n\nclass BacktrackingLineSearch(LineSearch):\n \"\"\"\n Simple backtracking line search enforcing only sufficient decrease.\n \"\"\"\n\n def __call__(self, initial_a=1.0, rho=0.5):\n a = initial_a\n while not self._func_decreased(a):\n a *= rho\n return self.x + a * self.dx\n\n\nclass BracketedLineSearch(LineSearch):\n \"\"\"\n Base class for line search algorithms enforcing the Strong Wolfe conditions.\n \"\"\"\n\n def __call__(self, initial_a=1):\n a1 = 0\n a2 = initial_a\n p1 = self._phi(a1)\n p2 = self._phi(a2)\n dp1 = self._dphi(a1)\n dp2 = self._dphi(a2)\n\n for ind in range(20):\n if not self._func_decreased(a2) or p2 > p1:\n # We've successfully bracketed if\n # 1. The function value is greater than at a=0\n # 2. The function value has increased from the previous iteration\n return self._zoom(a1, p1, dp1, a2, p2, dp2)\n\n if self._grad_decreased(a2):\n # At this point, the func decrease condition is satisfied,\n # so if the grad decrease also is satisfied, we're done.\n return self.x + a2 * self.dx\n elif dp2 >= 0:\n # If only the func decrease is satisfied, but the phi' is positive\n # we've successfully bracketed.\n return self._zoom(a2, p2, dp2, a1, p1, dp1)\n else:\n # Otherwise, we're lower than initial f and previous f,\n # and the slope is still negative and steeper than initial.\n # We can get more aggressive and increase the step.\n a1 = a2\n p1 = p2\n dp1 = dp2\n a2 = a2 * 1.5\n p2 = self._phi(a2)\n dp2 = self._dphi(a2)\n\n def _zoom(self, a1, p1, dp1, a2, p2, dp2):\n \"\"\"\n Find a solution in the interval, [a1, a2], assuming that phi(a1) < phi(a2).\n \"\"\"\n while True:\n a, p, dp = self._compute_minimum(a1, p1, dp1, a2, p2, dp2)\n\n if not self._func_decreased(a) or p > p1:\n # If still lower than initial f or still higher than low\n # then make this the new high.\n a2 = a\n p2 = p\n dp2 = dp\n else:\n if self._grad_decreased(a):\n # Both conditions satisfied, so we're done.\n return self.x + a * self.dx\n elif dp * (a2 - a1) >= 0:\n # We have a new low and the slope has the right sign.\n a2 = a1\n p2 = p1\n dp2 = dp1\n a1 = a\n p1 = p\n dp1 = dp\n\n def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):\n \"\"\"\n Estimate the minimum as the midpoint.\n \"\"\"\n a = 0.5 * a1 + 0.5 * a2\n p = self._phi(a)\n dp = self._dphi(a)\n return a, p, dp\n\n\nclass QuadraticLineSearch(BracketedLineSearch):\n \"\"\"\n Use quadratic interpolation in the zoom method.\n \"\"\"\n\n def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):\n quadratic_mtx = np.zeros((3, 3))\n quadratic_mtx[0, :] = [1.0, a1, a1 ** 2]\n quadratic_mtx[1, :] = [1.0, a2, a2 ** 2]\n quadratic_mtx[2, :] = [0.0, 1.0, 2 * a1]\n c0, c1, c2 = np.linalg.solve(quadratic_mtx, [p1, p2, dp1])\n\n d0 = c1\n d1 = 2 * c2\n\n a = -d0 / d1\n p = self._phi(a)\n dp = self._dphi(a)\n return a, p, dp\n\n\nclass CubicLineSearch(BracketedLineSearch):\n \"\"\"\n Use cubic interpolation in the zoom method.\n \"\"\"\n\n def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):\n cubic_mtx = np.zeros((4, 4))\n cubic_mtx[0, :] = [1.0, a1, a1 ** 2, a1 ** 3]\n cubic_mtx[1, :] = [1.0, a2, a2 ** 2, a2 ** 3]\n cubic_mtx[2, :] = [0.0, 1.0, 2 * a1, 3 * a1 ** 2]\n cubic_mtx[3, :] = [0.0, 1.0, 2 * a2, 3 * a2 ** 2]\n c0, c1, c2, c3 = np.linalg.solve(cubic_mtx, [p1, p2, dp1, dp2])\n\n d0 = c1\n d1 = 2 * c2\n d2 = 3 * c3\n r1, r2 = np.roots([d2, d1, d0])\n\n a = None\n p = max(p1, p2)\n if (a1 <= r1 <= a2 or a2 <= r1 <= a1) and np.isreal(r1):\n px = self._phi(r1)\n if px < p:\n a = r1\n p = px\n dp = self._dphi(r1)\n if (a1 <= r2 <= a2 or a2 <= r2 <= a1) and np.isreal(r2):\n px = self._phi(r2)\n if px < p:\n a = r2\n p = px\n dp = self._dphi(r2)\n\n return a, p, dp\n" ]
[ [ "numpy.zeros", "numpy.roots", "numpy.isreal", "numpy.linalg.solve", "numpy.abs" ] ]
jkznst/maskrcnn-benchmark
[ "7d238568e6240397ddacdb1b87d08334e8c358b4" ]
[ "maskrcnn_benchmark/data/datasets/occludedlinemod.py" ]
[ "import os\n\nimport torch\nimport torch.utils.data\nfrom PIL import Image\nimport sys\n\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.keypoint import BB8Keypoints\n\n\nclass OccludedLINEMODDataset(torch.utils.data.Dataset):\n\n CLASSES = (\n \"__background__ \",\n \"obj_01\",\n \"obj_05\",\n \"obj_06\",\n \"obj_08\",\n \"obj_09\",\n \"obj_10\",\n \"obj_11\",\n \"obj_12\"\n )\n\n def __init__(self, data_dir, split, use_difficult=False, transforms=None):\n self.root = data_dir\n self.image_set = split\n self.keep_difficult = use_difficult\n self.transforms = transforms\n\n self._annopath = os.path.join(self.root, \"Annotations\", \"%s.xml\")\n self._imgpath = os.path.join(self.root, \"JPEGImages\", \"%s.jpg\")\n self._imgsetpath = os.path.join(self.root, \"ImageSets\", \"Main\", \"%s.txt\")\n\n with open(self._imgsetpath % self.image_set) as f:\n self.ids = f.readlines()\n self.ids = [x.strip(\"\\n\") for x in self.ids]\n self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}\n\n cls = OccludedLINEMODDataset.CLASSES\n self.class_to_ind = dict(zip(cls, range(len(cls))))\n\n def __getitem__(self, index):\n img_id = self.ids[index]\n img = Image.open(self._imgpath % img_id).convert(\"RGB\")\n\n target = self.get_groundtruth(index)\n target = target.clip_to_image(remove_empty=True)\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, index\n\n def __len__(self):\n return len(self.ids)\n\n def get_groundtruth(self, index):\n img_id = self.ids[index]\n anno = ET.parse(self._annopath % img_id).getroot()\n anno = self._preprocess_annotation(anno)\n\n height, width = anno[\"im_info\"]\n target = BoxList(anno[\"boxes\"], (width, height), mode=\"xyxy\")\n target.add_field(\"labels\", anno[\"labels\"])\n target.add_field(\"difficult\", anno[\"difficult\"])\n\n keypoints = anno[\"bb8keypoints\"]\n num_instance = keypoints.shape[0]\n if num_instance > 0:\n keypoints = keypoints.view(num_instance, -1, 2)\n keypoints[:, :, 0] *= width\n keypoints[:, :, 1] *= height\n keypoints = torch.cat((keypoints,\n 2 * torch.ones((keypoints.shape[0], keypoints.shape[1], 1)))\n , dim=-1) # set to all visible\n keypoints = BB8Keypoints(keypoints, (width, height))\n target.add_field(\"bb8keypoints\", keypoints)\n return target\n\n def _preprocess_annotation(self, target):\n boxes = []\n gt_classes = []\n difficult_boxes = []\n bb8_keypoints = []\n TO_REMOVE = 1\n \n for obj in target.iter(\"object\"):\n difficult = int(obj.find(\"difficult\").text) == 1\n if not self.keep_difficult and difficult:\n continue\n name = obj.find(\"name\").text.lower().strip()\n if name not in self.CLASSES:\n continue\n bb = obj.find(\"bndbox\")\n # Make pixel indexes 0-based\n # Refer to \"https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211\"\n box = [\n bb.find(\"xmin\").text, \n bb.find(\"ymin\").text, \n bb.find(\"xmax\").text, \n bb.find(\"ymax\").text,\n ]\n bndbox = tuple(\n map(lambda x: x - TO_REMOVE, list(map(int, box)))\n )\n # bb8 normalized coordinates in range [0,1], in \"xy\" mode\n bb8 = obj.find(\"BB8\").text.split(\",\")\n bb8 = [float(i) for i in bb8]\n\n boxes.append(bndbox)\n gt_classes.append(self.class_to_ind[name])\n difficult_boxes.append(difficult)\n bb8_keypoints.append(bb8)\n\n size = target.find(\"size\")\n im_info = tuple(map(int, (size.find(\"height\").text, size.find(\"width\").text)))\n\n res = {\n \"boxes\": torch.tensor(boxes, dtype=torch.float32),\n \"labels\": torch.tensor(gt_classes),\n \"difficult\": torch.tensor(difficult_boxes),\n \"bb8keypoints\": torch.tensor(bb8_keypoints),\n \"im_info\": im_info,\n }\n return res\n\n def get_img_info(self, index):\n img_id = self.ids[index]\n anno = ET.parse(self._annopath % img_id).getroot()\n size = anno.find(\"size\")\n im_info = tuple(map(int, (size.find(\"height\").text, size.find(\"width\").text)))\n return {\"height\": im_info[0], \"width\": im_info[1]}\n\n def map_class_id_to_class_name(self, class_id):\n return OccludedLINEMODDataset.CLASSES[class_id]\n" ]
[ [ "torch.tensor", "torch.ones" ] ]
arnon-weinberg/Upscale-interpolate-STARnet
[ "d898d38364a36f4633cfba8f914db20d9b900217" ]
[ "eval.py" ]
[ "from __future__ import print_function\nimport argparse\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom fbpn_sr_rbpn_v1 import Net as FBPNSR_RBPN_V1\nfrom fbpn_sr_rbpn_v2 import Net as FBPNSR_RBPN_V2\nfrom fbpn_sr_rbpn_v3 import Net as FBPNSR_RBPN_V3\nfrom fbpn_sr_rbpn_v4 import Net as FBPNSR_RBPN_V4\nfrom fbpn_sr_rbpn_v1_ref import Net as FBPNSR_RBPN_V1_REF\nfrom fbpn_sr_rbpn_v2_ref import Net as FBPNSR_RBPN_V2_REF\nfrom fbpn_sr_rbpn_v3_ref import Net as FBPNSR_RBPN_V3_REF\nfrom fbpn_sr_rbpn_v4_ref import Net as FBPNSR_RBPN_V4_REF, FeatureExtractor\nfrom data import get_test_set\nfrom functools import reduce\nimport numpy as np\nimport utils\nfrom math import ceil\nimport time\nimport cv2\nimport math\nimport pdb\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Super Res Example')\nparser.add_argument('--upscale_factor', type=int, default=4, help=\"super resolution upscale factor\")\nparser.add_argument('--testBatchSize', type=int, default=1, help='testing batch size')\nparser.add_argument('--gpu_mode', type=bool, default=True) # Use GPU or CPU\nparser.add_argument('--chop_forward', type=bool, default=False)\nparser.add_argument('--threads', type=int, default=1, help='number of threads for data loader to use')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\nparser.add_argument('--gpus', default=1, type=float, help='number of gpu')\nparser.add_argument('--data_dir', type=str, default='vimeo_triplet/sequences')\nparser.add_argument('--file_list', type=str, default='tri_testlist.txt')\nparser.add_argument('--model_type', type=str, default='FBPNSR_RBPN_V4_REF')\nparser.add_argument('--output', default='Results/', help='Location to save checkpoint models')\nparser.add_argument('--model', default='weights/FBPNSR_RBPN_V4_REF_Lr_STAR_ST.pth', help='sr pretrained base model')\n\nopt = parser.parse_args()\n\ngpus_list=range(opt.gpus)\nprint(opt)\n\ncuda = opt.gpu_mode\nif cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n\ntorch.manual_seed(opt.seed)\nif cuda:\n torch.cuda.manual_seed(opt.seed)\nelse:\n torch.manual_seed(opt.seed)\n\nprint('===> Loading datasets')\ntest_set = get_test_set(opt.data_dir, opt.upscale_factor, opt.file_list)\ntesting_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)\n\nprint('===> Building model ', opt.model_type)\nif opt.model_type == 'FBPNSR_RBPN_V1_REF':\n model = FBPNSR_RBPN_V1_REF(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V2_REF':\n model = FBPNSR_RBPN_V2_REF(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V3_REF':\n model = FBPNSR_RBPN_V3_REF(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V4_REF':\n model = FBPNSR_RBPN_V4_REF(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V1':\n model = FBPNSR_RBPN_V1(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V2':\n model = FBPNSR_RBPN_V2(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V3':\n model = FBPNSR_RBPN_V3(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\nelif opt.model_type == 'FBPNSR_RBPN_V4':\n model = FBPNSR_RBPN_V4(base_filter=256, feat = 64, num_stages=3, n_resblock=5, scale_factor=opt.upscale_factor)\n \nif cuda:\n model = torch.nn.DataParallel(model, device_ids=gpus_list)\nelse:\n model = torch.nn.DataParallel(model, device_ids=['cpu'])\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\nprint('---------- Networks architecture -------------')\nprint_network(model)\nprint('----------------------------------------------')\n\nmodel.load_state_dict(torch.load(opt.model, map_location=lambda storage, loc: storage))\nprint('Pre-trained SR model is loaded.')\n\n\nif cuda:\n model = model.cuda(gpus_list[0])\n\ndef eval():\n model.eval()\n avg_psnr_predicted = 0.0\n for batch in testing_data_loader:\n t0 = time.time()\n input, flow_f, flow_b, filename, d_dir = batch[0], batch[1], batch[2], batch[3], batch[4]\n \n with torch.no_grad():\n if cuda:\n t_im1 = Variable(input[0]).cuda(gpus_list[0])\n t_im2 = Variable(input[1]).cuda(gpus_list[0])\n t_flow_f = Variable(flow_f).cuda(gpus_list[0]).float()\n t_flow_b = Variable(flow_b).cuda(gpus_list[0]).float()\n else:\n t_im1 = Variable(input[0])\n t_im2 = Variable(input[1])\n t_flow_f = Variable(flow_f).float()\n t_flow_b = Variable(flow_b).float()\n \n if opt.chop_forward:\n with torch.no_grad():\n pred_ht, pred_h1, pred_h2, pred_l = chop_forward(t_im1, t_im2, t_flow_f, t_flow_b, model)\n else:\n with torch.no_grad():\n pred_ht, pred_h1, pred_h2, pred_l = model(t_im1, t_im2, t_flow_f, t_flow_b, train=False)\n \n t1 = time.time()\n \n print(\"===> Processing: %s || Timer: %.4f sec.\" % (d_dir[0]+'/frame10i11.png', (t1 - t0)))\n pred_ht = utils.denorm(pred_ht[0].cpu().data,vgg=True)\n pred_h1 = utils.denorm(pred_h1[0].cpu().data,vgg=True)\n pred_h2 = utils.denorm(pred_h2[0].cpu().data,vgg=True)\n pred_l = utils.denorm(pred_l[0].cpu().data,vgg=True)\n\n #save_img(pred, d_dir[0],'frame10i11.png', True)\n save_img(pred_ht, d_dir[0],'im2.png', False)\n save_img(pred_h1, d_dir[0],'im1.png', False)\n save_img(pred_h2, d_dir[0],'im3.png', False)\n save_img(pred_l, d_dir[0],'im_l.png', False)\n #save_img(target, str(count), False)\n \n #print(\"PSNR_predicted=\", avg_psnr_predicted/count)\n\ndef save_img(img, d_dir,img_name, pred_flag):\n save_img = img.squeeze().clamp(0, 1).numpy().transpose(1,2,0)\n filename = os.path.splitext(img_name)\n\n # save img\n save_dir=os.path.join(opt.output, d_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n if pred_flag:\n save_fn = save_dir +'/'+ filename[0]+'_'+opt.model_type+filename[1]\n else:\n save_fn = save_dir +'/'+ img_name\n cv2.imwrite(save_fn, cv2.cvtColor(save_img*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])\n \ndef chop_forward(t_im1, t_im2, t_flow_f, t_flow_b, iter, model, shave=8, min_size=160000, nGPUs=opt.gpus):\n b, c, h, w = t_im1.size()\n h_half, w_half = h // 2, w // 2\n h_size, w_size = h_half + shave, w_half + shave\n \n mod_size = 4\n if h_size%mod_size:\n h_size = ceil(h_size/mod_size)*mod_size\n if w_size%mod_size:\n w_size = ceil(w_size/mod_size)*mod_size\n \n inputlist = [\n [t_im1[:, :, 0:h_size, 0:w_size], t_im2[:, :, 0:h_size, 0:w_size], t_flow_f[:, :, 0:h_size, 0:w_size], t_flow_b[:, :, 0:h_size, 0:w_size], iter],\n [t_im1[:, :, 0:h_size, (w - w_size):w],t_im2[:, :, 0:h_size, (w - w_size):w],t_flow_f[:, :, 0:h_size, (w - w_size):w],t_flow_b[:, :, 0:h_size, (w - w_size):w],iter ],\n [t_im1[:, :, (h - h_size):h, 0:w_size],t_im2[:, :, (h - h_size):h, 0:w_size],t_flow_f[:, :, (h - h_size):h, 0:w_size],t_flow_b[:, :, (h - h_size):h, 0:w_size],iter ],\n [t_im1[:, :, (h - h_size):h, (w - w_size):w],t_im2[:, :, (h - h_size):h, (w - w_size):w],t_flow_f[:, :, (h - h_size):h, (w - w_size):w],t_flow_b[:, :, (h - h_size):h, (w - w_size):w],iter ]]\n\n if w_size * h_size < min_size:\n outputlist = []\n for i in range(0, 4, nGPUs):\n with torch.no_grad():\n input_batch = inputlist[i]#torch.cat(inputlist[i:(i + nGPUs)], dim=0)\n output_batch = model(input_batch[0], input_batch[1], input_batch[2], input_batch[3], train=False)\n outputlist.extend(output_batch.chunk(nGPUs, dim=0))\n else:\n outputlist = [\n chop_forward(patch[0], patch[1], patch[2],patch[3],patch[4], model, shave, min_size, nGPUs) \\\n for patch in inputlist]\n\n scale=1\n h, w = scale * h, scale * w\n h_half, w_half = scale * h_half, scale * w_half\n h_size, w_size = scale * h_size, scale * w_size\n shave *= scale\n\n with torch.no_grad():\n output = Variable(t_im1.data.new(b, c, h, w))\n output[:, :, 0:h_half, 0:w_half] \\\n = outputlist[0][:, :, 0:h_half, 0:w_half]\n output[:, :, 0:h_half, w_half:w] \\\n = outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size]\n output[:, :, h_half:h, 0:w_half] \\\n = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half]\n output[:, :, h_half:h, w_half:w] \\\n = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]\n\n return output\n\n##Eval Start!!!!\neval()\n" ]
[ [ "torch.cuda.manual_seed", "torch.autograd.Variable", "torch.no_grad", "torch.manual_seed", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.nn.DataParallel" ] ]
goncaloperes/robustness_metrics
[ "5ee77294432e1265e432b6e84e06e2a5ae2af387" ]
[ "robustness_metrics/metrics/uncertainty_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Robustness Metrics Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for uncertainty metrics.\"\"\"\nimport itertools\nimport math\n\nfrom absl import logging\nfrom absl.testing import parameterized\nimport numpy as np\nimport robustness_metrics as rm\nimport sklearn.model_selection\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\n_GCE_DEFAULT = (\"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,threshold=0.0)\")\n_GCE_EXPLICIT_DEFAULT = (\"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,\"\n \"threshold=0.0,recalibration_method=None)\")\n_GCE_TEMP_SCALING_ALL = (\n \"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,threshold=0.0,\"\n \"recalibration_method='temperature_scaling')\")\n_GCE_ISOTONIC_REGR_ALL = (\n \"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,threshold=0.0,\"\n \"recalibration_method='isotonic_regression')\")\n_GCE_TEMP_SCALING_SPLIT = (\n \"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,\"\n \"threshold=0.0,recalibration_method='temperature_scaling',\"\n \"fit_on_percent=60.0,seed=3765486)\")\n_GCE_ISOTONIC_REGR_SPLIT = (\"gce(binning_scheme='adaptive',max_prob=True,\"\n \"class_conditional=False,norm='l2',num_bins=2,\"\n \"threshold=0.0,recalibration_method=\"\n \"'isotonic_regression',fit_on_percent=60.0,\"\n \"seed=3765486)\")\n\n_UNCERTAINTY_METRICS = [\n \"ece\", \"nll\", \"brier\", _GCE_DEFAULT, _GCE_EXPLICIT_DEFAULT,\n _GCE_TEMP_SCALING_ALL, _GCE_ISOTONIC_REGR_ALL, _GCE_TEMP_SCALING_SPLIT,\n _GCE_ISOTONIC_REGR_SPLIT, \"temperature_scaling\"]\n\n\ndef _get_info(num_classes=2):\n return rm.datasets.base.DatasetInfo(num_classes=num_classes)\n\n\ndef _normalize(x):\n return [x_i / sum(x) for x_i in x]\n\n\ndef _with_labelset(name):\n if \"(\" in name:\n return name[:-1] + \",use_dataset_labelset=True)\"\n else:\n return f\"{name}(use_dataset_labelset=True)\"\n\n\ndef _with_tempdir(name, tempdir):\n if \"(\" in name:\n return name[:-1] + f\",pickle_path='{tempdir}')\"\n else:\n return f\"{name}(pickle_path='{tempdir}')\"\n\n\nclass KerasMetricTest(parameterized.TestCase, tf.test.TestCase):\n\n def assertDictsAlmostEqual(self, dict_1, dict_2):\n self.assertEqual(dict_1.keys(), dict_2.keys())\n for key in dict_1:\n self.assertAlmostEqual(dict_1[key], dict_2[key], places=5)\n\n @parameterized.parameters([(name,) for name in _UNCERTAINTY_METRICS])\n def test_binary_prediction_two_predictions_per_element(self, name):\n if \"gce\" == name[:3]:\n tempdir = self.create_tempdir().full_path\n # Need unmodified `name` to look up expected output.\n tempname = _with_tempdir(name, tempdir)\n else:\n tempname = name\n metric = rm.metrics.get(tempname, _get_info(2))\n metric_ls = rm.metrics.get(\n _with_labelset(tempname),\n rm.datasets.base.DatasetInfo(num_classes=3, appearing_classes=[0, 1]))\n metric.add_predictions(\n rm.common.types.ModelPredictions(predictions=[[.2, .8], [.7, .3]]),\n metadata={\"label\": 1, \"element_id\": 1})\n metric_ls.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[_normalize([.2, .8, .3]),\n _normalize([.7, .3, .5])]),\n metadata={\"label\": 1, \"element_id\": 1})\n expected_output = {\n _GCE_DEFAULT: {\"gce\": .45},\n _GCE_EXPLICIT_DEFAULT: {\"gce\": 0.45},\n _GCE_TEMP_SCALING_ALL: {\"gce\": 0.0, \"beta\": 1.6040830888247386e+33},\n _GCE_ISOTONIC_REGR_ALL: {\"gce\": 0.0},\n _GCE_TEMP_SCALING_SPLIT: {\"gce\": 0.0},\n _GCE_ISOTONIC_REGR_SPLIT: {\"gce\": 0.0},\n \"ece\": {\"ece\": .45},\n \"nll\": {\"nll\": -math.log((.8 + .3) / 2)},\n \"brier\": {\"brier\": ((.2 + .7) / 2)**2 + ((.2 + .7) / 2)**2},\n \"temperature_scaling\": {\"beta\": 1.6040830888247386e+33},\n }[name]\n if name == _GCE_TEMP_SCALING_SPLIT:\n self.assertAlmostEqual(metric.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllGreater(metric.result()[\"beta\"], 0)\n self.assertAlmostEqual(metric_ls.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllGreater(metric_ls.result()[\"beta\"], 0)\n else:\n self.assertDictsAlmostEqual(metric.result(), expected_output)\n self.assertDictsAlmostEqual(metric_ls.result(), expected_output)\n\n @parameterized.parameters([(name,) for name in _UNCERTAINTY_METRICS])\n def test_binary_predictions_on_different_predictions(self, name):\n if \"gce\" == name[:3]:\n tempdir = self.create_tempdir().full_path\n # Need unmodified `name` to look up expected output.\n tempname = _with_tempdir(name, tempdir)\n else:\n tempname = name\n metric = rm.metrics.get(tempname, _get_info(2))\n metric_ls = rm.metrics.get(\n _with_labelset(tempname),\n rm.datasets.base.DatasetInfo(num_classes=3, appearing_classes=[0, 2]))\n metric.add_predictions(\n rm.common.types.ModelPredictions(predictions=[[.2, .8]]),\n metadata={\"label\": 1, \"element_id\": 1})\n metric_ls.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[_normalize([.2, .5, .8])]),\n metadata={\"label\": 2, \"element_id\": 1})\n metric.add_predictions(\n rm.common.types.ModelPredictions(predictions=[[.3, .7]]),\n metadata={\"label\": 0, \"element_id\": 2})\n metric_ls.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[_normalize([.3, .8, .7])]),\n metadata={\"label\": 0, \"element_id\": 2})\n expected_output = {\n _GCE_DEFAULT: {\"gce\": 0.51478150},\n _GCE_EXPLICIT_DEFAULT: {\"gce\": 0.5147815},\n _GCE_TEMP_SCALING_ALL: {\"gce\": 0.48694175, \"beta\": 0.4177706241607666},\n _GCE_ISOTONIC_REGR_ALL: {\"gce\": 0.0},\n _GCE_TEMP_SCALING_SPLIT: {\"gce\": 1.0, \"beta\": 0.4177706241607666},\n _GCE_ISOTONIC_REGR_SPLIT: {\"gce\": 1.0},\n \"ece\": {\"ece\": .45},\n \"nll\": {\"nll\": 0.5 * (-math.log(.8) - math.log(.3))},\n \"brier\": {\"brier\": 0.5 * (.2**2 + .2**2 + .7**2 + .7**2)},\n \"temperature_scaling\": {\"beta\": 0.4177706241607666},\n }[name]\n if name == _GCE_TEMP_SCALING_SPLIT:\n self.assertAlmostEqual(metric.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllGreater(metric.result()[\"beta\"], 0)\n self.assertAlmostEqual(metric_ls.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllGreater(metric_ls.result()[\"beta\"], 0)\n else:\n self.assertDictsAlmostEqual(metric.result(), expected_output)\n self.assertDictsAlmostEqual(metric_ls.result(), expected_output)\n\n @parameterized.parameters([(name,) for name in _UNCERTAINTY_METRICS])\n def test_tertiary_prediction(self, name):\n if \"gce\" == name[:3]:\n tempdir = self.create_tempdir().full_path\n # Need unmodified `name` to look up expected output.\n tempname = _with_tempdir(name, tempdir)\n else:\n tempname = name\n metric = rm.metrics.get(tempname, _get_info(3))\n metric_ls = rm.metrics.get(\n _with_labelset(tempname),\n rm.datasets.base.DatasetInfo(num_classes=4,\n appearing_classes=[1, 2, 3]))\n metric.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[[.2, .4, .4], [.5, .3, .2]]),\n metadata={\"label\": 2})\n metric_ls.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[_normalize([.5, .2, .4, .4]),\n _normalize([.9, .5, .3, .2])]),\n metadata={\"label\": 3, \"element_id\": 1})\n metric.add_predictions(\n rm.common.types.ModelPredictions(predictions=[[.8, .15, .05]]),\n metadata={\"label\": 1, \"element_id\": 2})\n metric_ls.add_predictions(\n rm.common.types.ModelPredictions(\n predictions=[_normalize([.4, .8, .15, .05])]),\n metadata={\"label\": 2, \"element_id\": 2})\n expected_output = {\n _GCE_DEFAULT:\n {\"gce\": 0.6174544},\n _GCE_EXPLICIT_DEFAULT:\n {\"gce\": 0.6174544},\n _GCE_TEMP_SCALING_ALL:\n {\"gce\": 0.5588576, \"beta\": -0.23755066096782684},\n _GCE_ISOTONIC_REGR_ALL:\n {\"gce\": 0.23570},\n _GCE_TEMP_SCALING_SPLIT:\n {\"gce\": 1.0, \"beta\": -0.23755066096782684},\n _GCE_ISOTONIC_REGR_SPLIT:\n {\"gce\": 1.0},\n \"ece\":\n {\"ece\": 0.575},\n \"nll\":\n {\"nll\": -0.5 * (math.log((.4 + .2) / 2) + math.log(.15))},\n \"brier\":\n {\"brier\": 0.5 * (((.2 + .5) / 2)**2 + ((.4 + .3) / 2)**2 +\n ((.6 + .8) / 2)**2 + .8**2 + .85**2 + .05**2)},\n \"temperature_scaling\": {\"beta\": -0.23755066096782684}\n }[name]\n if name == _GCE_TEMP_SCALING_SPLIT:\n self.assertAlmostEqual(metric.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllLess(metric.result()[\"beta\"], 0)\n self.assertAlmostEqual(metric_ls.result()[\"gce\"],\n expected_output[\"gce\"])\n self.assertAllLess(metric_ls.result()[\"beta\"], 0)\n else:\n self.assertDictsAlmostEqual(metric.result(), expected_output)\n self.assertDictsAlmostEqual(metric_ls.result(), expected_output)\n\n\nclass IsotonicRegressionTest(tf.test.TestCase):\n\n def test_IR_class(self):\n fit_predictions = np.array([[0.42610548, 0.41748077, 0.15641374],\n [0.44766216, 0.47721294, 0.0751249],\n [0.1862702, 0.15139402, 0.66233578],\n [0.05753544, 0.8561222, 0.08634236],\n [0.18697925, 0.29836466, 0.51465609]])\n fit_labels = np.array([0, 1, 2, 1, 2])\n scale_predictions = np.array([[0.1215652, 0.21415779, 0.66427702],\n [0.70361542, 0.21748313, 0.07890145],\n [0.46009217, 0.12798458, 0.41192324],\n [0.29240777, 0.31575023, 0.391842],\n [0.70334041, 0.13486871, 0.16179089]])\n scale_labels = np.array([2, 0, 0, 1, 0])\n pickle_path = self.create_tempdir()\n ir = rm.metrics.IsotonicRegression(pickle_path=pickle_path.full_path)\n ir.fit(fit_predictions, fit_labels)\n calibrated_predictions = ir.scale(scale_predictions)\n\n # Test calibration error should go down.\n ece_calibrated = rm.metrics.ExpectedCalibrationError()\n ece_calibrated.add_batch(calibrated_predictions, label=scale_labels)\n ece_scale = rm.metrics.ExpectedCalibrationError()\n ece_scale.add_batch(scale_predictions, label=scale_labels)\n self.assertLess(ece_calibrated.result()[\"ece\"], ece_scale.result()[\"ece\"])\n\n\nclass CRPSTest(tf.test.TestCase):\n\n def test_crps_increases_with_increasing_deviation_in_mean(self):\n \"\"\"Assert that the CRPS score increases when we increase the mean.\n \"\"\"\n tf.random.set_seed(1)\n\n nspacing = 10\n npredictive_samples = 10000\n ntrue_samples = 1000\n\n # (nspacing,npredictive_samples) samples from N(mu_i, 1)\n predictive_samples = tf.random.normal((nspacing, npredictive_samples))\n predictive_samples += tf.expand_dims(tf.linspace(0.0, 5.0, nspacing), 1)\n\n crps_samples = []\n for _ in range(ntrue_samples):\n labels = tf.random.normal((nspacing,))\n metric = rm.metrics.get(\"crps\", _get_info(None))\n rm.metrics.add_batch(metric, predictive_samples, label=labels)\n crps_sample = metric.result()[\"crps\"]\n crps_samples.append(crps_sample)\n\n crps_samples = tf.stack(crps_samples, 1)\n crps_average = tf.reduce_mean(crps_samples, axis=1)\n crps_average = crps_average.numpy()\n\n # The average should be monotonically increasing\n for i in range(1, len(crps_average)):\n crps_cur = crps_average[i]\n crps_prev = crps_average[i-1]\n self.assertLessEqual(crps_prev, crps_cur,\n msg=\"CRPS violates monotonicity in mean\")\n\n\nclass KerasECEMetricTest(parameterized.TestCase, tf.test.TestCase):\n\n _TEMPERATURES = [0.01, 1.0, 5.0]\n _NLABELS = [2, 4]\n _NSAMPLES = [8192, 16384]\n\n def _generate_perfect_calibration_logits(self, nsamples, nclasses):\n \"\"\"Generate well distributed and well calibrated probabilities.\n\n Args:\n nsamples: int, >= 1, number of samples to generate.\n nclasses: int, >= 2, number of classes.\n\n Returns:\n logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log\n probabilities (logits) of the probabilistic predictions.\n labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each\n element is in the range 0,..,nclasses-1.\n \"\"\"\n tf.random.set_seed(1)\n\n logits = 2.0*tf.random.normal((nsamples, nclasses))\n py = tfp.distributions.Categorical(logits=logits)\n labels = py.sample()\n\n return logits, labels\n\n def _generate_random_calibration_logits(self, nsamples, nclasses):\n \"\"\"Generate well distributed and poorly calibrated probabilities.\n\n Args:\n nsamples: int, >= 1, number of samples to generate.\n nclasses: int, >= 2, number of classes.\n\n Returns:\n logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log\n probabilities (logits) of the probabilistic predictions.\n labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each\n element is in the range 0,..,nclasses-1.\n \"\"\"\n tf.random.set_seed(1)\n\n logits = 2.0*tf.random.normal((nsamples, nclasses))\n py = tfp.distributions.Categorical(logits=logits)\n labels = py.sample()\n logits_other = 2.0*tf.random.normal((nsamples, nclasses))\n\n return logits_other, labels\n\n def test_binary_classification(self):\n num_bins = 10\n pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])\n # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]\n # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]\n labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])\n n = len(pred_probs)\n\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = np.array([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])\n bin_correct_sums = np.array([0, 0, 0, 0, 0, 1, 2, 0, 2, 0])\n bin_prob_sums = np.array([0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68,\n 0.71, 0.81 + 0.85, 0])\n\n correct_ece = 0.\n bin_accs = np.array([0.] * num_bins)\n bin_confs = np.array([0.] * num_bins)\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])\n\n metric = rm.metrics.uncertainty._KerasECEMetric(\n num_bins, name=\"ECE\", dtype=tf.float64)\n self.assertLen(metric.variables, 3)\n\n ece1 = metric(labels, pred_probs)\n self.assertAllClose(ece1, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n # Test various types of input shapes.\n metric.reset_states()\n metric.update_state(labels[:2], pred_probs[:2])\n metric.update_state(labels[2:6].reshape(2, 2),\n pred_probs[2:6].reshape(2, 2))\n metric.update_state(labels[6:7], pred_probs[6:7])\n ece2 = metric(labels[7:, np.newaxis], pred_probs[7:, np.newaxis])\n ece3 = metric.result()\n self.assertAllClose(ece2, ece3)\n self.assertAllClose(ece3, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n def test_binary_classification_binning_rule(self):\n num_bins = 10\n pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])\n # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]\n # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]\n labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])\n n = len(pred_probs)\n\n custom_binning_score = np.array(\n [0.05, 0.11, 0.37, 0.52, 0.26, 0.47, 0.73, 0.23])\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = np.array([1, 1, 2, 1, 1, 1, 0, 1, 0, 0])\n # pred_probs is correct at indices 1, 2, 3, 6, and 7.\n bin_correct_sums = np.array([0, 1, 1, 1, 0, 1, 0, 1, 0, 0])\n bin_prob_sums = np.array(\n [0.51, 1 - 0.45, 0.68 + 0.85, 1 - 0.39, 1 - 0.29, 0.66, 0, 0.81, 0, 0])\n\n correct_ece = 0.\n bin_accs = np.array([0.] * num_bins)\n bin_confs = np.array([0.] * num_bins)\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])\n\n metric = rm.metrics.uncertainty._KerasECEMetric(\n num_bins, name=\"RebinnedECE\", dtype=tf.float64)\n self.assertLen(metric.variables, 3)\n\n ece1 = metric(labels, pred_probs, custom_binning_score=custom_binning_score)\n self.assertAllClose(ece1, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n # Test various types of input shapes.\n metric.reset_states()\n metric.update_state(\n labels[:1],\n pred_probs[:1],\n custom_binning_score=custom_binning_score[:1])\n metric.update_state(\n labels[1:5].reshape(2, 2),\n pred_probs[1:5].reshape(2, 2),\n custom_binning_score=custom_binning_score[1:5].reshape(2, 2))\n metric.update_state(\n labels[5:7],\n pred_probs[5:7],\n custom_binning_score=custom_binning_score[5:7])\n ece2 = metric(\n labels[7:, np.newaxis],\n pred_probs[7:, np.newaxis],\n custom_binning_score=custom_binning_score[7:, np.newaxis])\n ece3 = metric.result()\n self.assertAllClose(ece2, ece3)\n self.assertAllClose(ece3, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n def test_binary_classification_keras_model(self):\n num_bins = 10\n pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])\n # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]\n # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]\n labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])\n n = len(pred_probs)\n\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = [0, 0, 0, 0, 0, 2, 3, 1, 2, 0]\n bin_correct_sums = [0, 0, 0, 0, 0, 1, 2, 0, 2, 0]\n bin_prob_sums = [0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71,\n 0.81 + 0.85, 0]\n\n correct_ece = 0.\n bin_accs = [0.] * num_bins\n bin_confs = [0.] * num_bins\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])\n\n metric = rm.metrics.uncertainty._KerasECEMetric(num_bins, name=\"ECE\")\n self.assertLen(metric.variables, 3)\n\n model = tf.keras.models.Sequential([tf.keras.layers.Lambda(lambda x: 1*x)])\n model.compile(loss=\"binary_crossentropy\", optimizer=\"sgd\", metrics=[metric])\n outputs = model.predict(pred_probs)\n self.assertAllClose(pred_probs, outputs)\n _, ece = model.evaluate(pred_probs, labels)\n self.assertAllClose(ece, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n def test_ece_multiclass_classification(self):\n num_bins = 10\n pred_probs = [\n [0.31, 0.32, 0.27],\n [0.37, 0.33, 0.30],\n [0.30, 0.31, 0.39],\n [0.61, 0.38, 0.01],\n [0.10, 0.65, 0.25],\n [0.91, 0.05, 0.04],\n ]\n # max_pred_probs: [0.32, 0.37, 0.39, 0.61, 0.65, 0.91]\n # pred_class: [1, 0, 2, 0, 1, 0]\n labels = [1., 0, 0., 1., 0., 0.]\n n = len(pred_probs)\n\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = [0, 0, 0, 3, 0, 0, 2, 0, 0, 1]\n bin_correct_sums = [0, 0, 0, 2, 0, 0, 0, 0, 0, 1]\n bin_prob_sums = [0, 0, 0, 0.32 + 0.37 + 0.39, 0, 0, 0.61 + 0.65, 0, 0, 0.91]\n\n correct_ece = 0.\n bin_accs = [0.] * num_bins\n bin_confs = [0.] * num_bins\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])\n\n metric = rm.metrics.uncertainty._KerasECEMetric(\n num_bins, name=\"ECE\", dtype=tf.float64)\n self.assertLen(metric.variables, 3)\n\n metric.update_state(labels[:4], pred_probs[:4])\n ece1 = metric(labels[4:], pred_probs[4:])\n ece2 = metric.result()\n self.assertAllClose(ece1, ece2)\n self.assertAllClose(ece2, correct_ece)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n\n\nclass BrierDecompositionTest(parameterized.TestCase, tf.test.TestCase):\n _TEMPERATURES = [0.01, 1.0, 5.0]\n _NLABELS = [2, 4]\n _NSAMPLES = [8192, 16384]\n\n @parameterized.parameters(\n itertools.product(_TEMPERATURES, _NLABELS, _NSAMPLES)\n )\n def test_brier_decomposition(self, temperature, nlabels, nsamples):\n \"\"\"Test the accuracy of the estimated Brier decomposition.\"\"\"\n tf.random.set_seed(1)\n logits = tf.random.normal((nsamples, nlabels)) / temperature\n labels = tf.random.uniform((nsamples,), maxval=nlabels, dtype=tf.int32)\n\n metric = rm.metrics.get(\"brier_decomposition\", _get_info(nlabels))\n rm.metrics.add_batch(metric,\n tf.nn.softmax(logits, axis=-1).numpy(),\n label=labels.numpy())\n result = metric.result()\n uncertainty = result[\"uncertainty\"]\n resolution = result[\"resolution\"]\n reliability = result[\"reliability\"]\n\n # Recover an estimate of the Brier score from the decomposition\n brier = uncertainty - resolution + reliability\n\n # Estimate Brier score directly-\n metric = rm.metrics.get(\"brier\", _get_info(nlabels))\n rm.metrics.add_batch(metric,\n tf.nn.softmax(logits, axis=-1).numpy(),\n label=labels.numpy())\n brier_direct = metric.result()[\"brier\"]\n\n logging.info(\"Brier, n=%d k=%d T=%.2f, Unc %.4f - Res %.4f + Rel %.4f = \"\n \"Brier %.4f, Brier-direct %.4f\",\n nsamples, nlabels, temperature,\n uncertainty, resolution, reliability,\n brier, brier_direct)\n\n self.assertGreaterEqual(resolution, 0.0, msg=\"Brier resolution negative\")\n self.assertGreaterEqual(reliability, 0.0, msg=\"Brier reliability negative\")\n self.assertAlmostEqual(\n brier + 1, brier_direct, delta=1.0e-2,\n msg=\"Brier from decomposition (%.4f) and Brier direct (%.4f) disagree \"\n \"beyond estimation error.\" % (brier, brier_direct))\n\n\nclass SemiParametricCalibrationErrorTest(tf.test.TestCase):\n\n def test_zero_one(self):\n n = 2000\n probs = np.random.rand(n)\n calibration_error = 0.7 * probs ** 2 + 0.3 * probs\n # Simulate outcomes according to this model.\n labels = (np.random.rand(n) <= calibration_error).astype(np.float)\n metric = rm.metrics.get(\"semiparametric_ce(smoothing='spline')\",\n _get_info(None))\n rm.metrics.add_batch(metric, probs, label=labels)\n est = metric.result()[\"ce\"]\n # est = ce.rms_calibration_error(probs, labels)\n self.assertGreaterEqual(est, 0)\n self.assertLessEqual(est, 1)\n\n def test_simple_call(self):\n n = 2000\n probs = np.random.rand(n)\n calibration_error = 0.7 * probs ** 2 + 0.3 * probs\n # Simulate outcomes according to this model.\n labels = (np.random.rand(n) <= calibration_error).astype(np.float)\n metric = rm.metrics.get(\"semiparametric_ce(smoothing='spline')\",\n _get_info(None))\n rm.metrics.add_batch(metric, probs, label=labels)\n est = metric.result()[\"ce\"]\n self.assertGreaterEqual(est, 0)\n self.assertLessEqual(est, 1)\n\n def test_conf_int(self):\n n = 2000\n probs = np.random.rand(n)\n calibration_error = 0.7 * probs ** 2 + 0.3 * probs\n # Simulate outcomes according to this model.\n labels = (np.random.rand(n) <= calibration_error).astype(np.float)\n metric = rm.metrics.get(\"semiparametric_ce_ci(smoothing='spline')\",\n _get_info(None))\n rm.metrics.add_batch(metric, probs, label=labels)\n results = metric.result()\n self.assertGreaterEqual(results[\"low\"], 0)\n self.assertLessEqual(results[\"low\"], 1)\n self.assertGreaterEqual(results[\"high\"], 0)\n self.assertLessEqual(results[\"high\"], 1)\n\n def test_mean_plug_in(self):\n n = 2000\n probs = np.random.rand(n)\n calibration_error = 0.7 * probs ** 2 + 0.3 * probs\n # Continuous outcomes previously weren't allowed because StratifiedKFold\n # only allows discrete outcomes. Useful for testing to have an oracle\n # that passes in true calibration probabilities as outcomes, which are\n # continuous. Therefore, pass in a KFold object.\n metric = rm.metrics.SemiParametricCalibrationError(\n _get_info(None),\n smoothing=\"spline\",\n fold_generator=sklearn.model_selection.KFold(5, shuffle=True))\n rm.metrics.add_batch(metric, probs, label=calibration_error)\n est = metric.result()[\"ce\"]\n self.assertGreaterEqual(est, 0)\n self.assertLessEqual(est, 1)\n\n\ndef _get_adaptive_bins_test_parameters():\n np.random.seed(0)\n predictions = np.random.rand(500)\n # Test small number of bins:\n for num_bins in range(1, 50):\n yield {\"predictions\": predictions, \"num_bins\": num_bins}\n # Test large numbers of bins, including ones where some bins are empty:\n for num_bins in range(495, 505):\n yield {\"predictions\": predictions, \"num_bins\": num_bins}\n # Test where most bins are empty:\n yield {\"predictions\": np.random.rand(5), \"num_bins\": 30}\n\n\ndef _get_bin_counts(predictions, num_bins):\n bin_edges = rm.metrics.uncertainty._get_adaptive_bins(predictions, num_bins)\n # Bins should work with np.digitize:\n bin_indices = np.digitize(predictions, bin_edges)\n return np.bincount(bin_indices)\n\n\nclass GetAdaptiveBinsTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(_get_adaptive_bins_test_parameters())\n def test_number_of_bins(self, predictions, num_bins):\n bin_counts = _get_bin_counts(predictions, num_bins)\n self.assertLen(bin_counts, num_bins)\n\n @parameterized.parameters(_get_adaptive_bins_test_parameters())\n def test_bins_include_all_datapoints(self, predictions, num_bins):\n bin_counts = _get_bin_counts(predictions, num_bins)\n self.assertLen(\n predictions, sum(bin_counts),\n msg=\"Sum of bin counts does not match length of predictions \"\n f\"({len(predictions)}): {bin_counts}\")\n\n @parameterized.parameters(_get_adaptive_bins_test_parameters())\n def test_bins_have_similar_size(self, predictions, num_bins):\n bin_counts = _get_bin_counts(predictions, num_bins)\n self.assertAlmostEqual(\n np.max(bin_counts), np.min(bin_counts), delta=1,\n msg=f\"Bin counts should differ by at most 1 but are {bin_counts}\")\n\n\nclass GeneralCalibrationErrorTest(parameterized.TestCase, tf.test.TestCase):\n\n def test_consistency(self):\n probs = np.array([[0.42610548, 0.41748077, 0.15641374],\n [0.44766216, 0.47721294, 0.0751249],\n [0.1862702, 0.15139402, 0.66233578],\n [0.05753544, 0.8561222, 0.08634236],\n [0.18697925, 0.29836466, 0.51465609]])\n labels = np.array([0, 1, 2, 1, 2])\n metric = rm.metrics.GeneralCalibrationError(\n _get_info(3),\n num_bins=30, binning_scheme=\"even\",\n class_conditional=False, max_prob=True, norm=\"l1\", threshold=0.)\n rm.metrics.add_batch(metric, probs, label=labels)\n self.assertAlmostEqual(metric.result()[\"gce\"], 0.412713502)\n\n def test_sweep(self):\n probs = np.array([[0.42610548, 0.41748077, 0.15641374],\n [0.44766216, 0.47721294, 0.0751249],\n [0.1862702, 0.15139402, 0.66233578],\n [0.05753544, 0.8561222, 0.08634236],\n [0.18697925, 0.29836466, 0.51465609]])\n labels = np.array([0, 1, 2, 1, 2])\n metric = rm.metrics.GeneralCalibrationError(\n _get_info(3), num_bins=None, binning_scheme=\"even\",\n class_conditional=False, max_prob=True, norm=\"l1\", threshold=0.)\n rm.metrics.add_batch(metric, probs, label=labels)\n self.assertAlmostEqual(metric.result()[\"gce\"], 0.412713502)\n\n def test_binary_1d(self):\n probs = np.array([.91, .32, .66, .67, .57, .98, .41, .19])\n labels = np.array([1, 0, 1, 1, 0, 1, 0, 0])\n metric = rm.metrics.GeneralCalibrationError(\n _get_info(2), num_bins=30, binning_scheme=\"even\",\n class_conditional=False, max_prob=True, norm=\"l1\", threshold=0.)\n rm.metrics.add_batch(metric, probs, label=labels)\n self.assertAlmostEqual(metric.result()[\"gce\"], 0.18124999999999997)\n\n def test_binary_2d(self):\n probs = np.array(\n [.91, .32, .66, .67, .57, .98, .41, .19]).reshape(8, 1)\n labels = np.array([1, 0, 1, 1, 0, 1, 0, 0])\n metric = rm.metrics.GeneralCalibrationError(\n _get_info(2), num_bins=30, binning_scheme=\"even\",\n class_conditional=False, max_prob=True, norm=\"l1\", threshold=0.)\n rm.metrics.add_batch(metric, probs, label=labels)\n self.assertAlmostEqual(metric.result()[\"gce\"], 0.18124999999999997)\n\n def test_correctness_ece(self):\n num_bins = 10\n pred_probs = [\n [0.31, 0.32, 0.27],\n [0.37, 0.33, 0.30],\n [0.30, 0.31, 0.39],\n [0.61, 0.38, 0.01],\n [0.10, 0.65, 0.25],\n [0.91, 0.05, 0.04],\n ]\n # max_pred_probs: [0.32, 0.37, 0.39, 0.61, 0.65, 0.91]\n # pred_class: [1, 0, 2, 0, 1, 0]\n labels = [1., 0, 0., 1., 0., 0.]\n n = len(pred_probs)\n\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = [0, 0, 0, 3, 0, 0, 2, 0, 0, 1]\n bin_correct_sums = [0, 0, 0, 2, 0, 0, 0, 0, 0, 1]\n bin_prob_sums = [0, 0, 0, 0.32 + 0.37 + 0.39, 0, 0, 0.61 + 0.65, 0, 0, 0.91]\n\n correct_ece = 0.\n bin_accs = [0.] * num_bins\n bin_confs = [0.] * num_bins\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])\n\n metric_ece = rm.metrics.get(\"ece\", _get_info(2))\n rm.metrics.add_batch(metric_ece, pred_probs, label=[int(i) for i in labels])\n self.assertAlmostEqual(correct_ece, metric_ece.result()[\"ece\"])\n\n def test_correctness_rmsce(self):\n num_bins = 10\n pred_probs = [\n [0.31, 0.32, 0.27],\n [0.37, 0.33, 0.30],\n [0.30, 0.31, 0.39],\n [0.61, 0.38, 0.01],\n [0.10, 0.65, 0.25],\n [0.91, 0.05, 0.04],\n ]\n # max_pred_probs: [0.32, 0.37, 0.39, 0.61, 0.65, 0.91]\n # pred_class: [1, 0, 2, 0, 1, 0]\n labels = [1., 0, 0., 1., 0., 0.]\n n = len(pred_probs)\n\n # Adaptive bins, so every datapoint is on its own:\n bin_counts = [1, 0, 1, 0, 1, 1, 0, 1, 0, 1]\n bin_correct_sums = [1, 0, 1, 0, 0, 0, 0, 0, 0, 1]\n bin_prob_sums = [0.32, 0, 0.37, 0, 0.39, 0.61, 0, 0.65, 0, 0.91]\n\n correct_ece = 0.\n bin_accs = [0.] * num_bins\n bin_confs = [0.] * num_bins\n for i in range(num_bins):\n if bin_counts[i] > 0:\n bin_accs[i] = bin_correct_sums[i] / bin_counts[i]\n bin_confs[i] = bin_prob_sums[i] / bin_counts[i]\n correct_ece += bin_counts[i] / n * np.square(bin_accs[i] - bin_confs[i])\n\n correct_rmsce = np.sqrt(correct_ece)\n\n metric_rmsce = rm.metrics.get(\"rmsce\", _get_info(2))\n rm.metrics.add_batch(\n metric_rmsce, pred_probs, label=[int(i) for i in labels])\n\n self.assertAlmostEqual(correct_rmsce, metric_rmsce.result()[\"gce\"])\n\n def generate_params(): # pylint: disable=no-method-argument\n # \"self\" object cannot be passes to parameterized.\n names = [\"binning_scheme\", \"max_probs\", \"class_conditional\",\n \"threshold\", \"norm\", \"num_bins\"]\n parameters = [[\"even\", \"adaptive\"], [True, False], [True, False],\n [0.0, 0.01], [\"l1\", \"l2\"], [30, None]]\n list(itertools.product(*parameters))\n count = 0\n dict_list = []\n for params in itertools.product(*parameters):\n param_dict = {}\n for i, v in enumerate(params):\n param_dict[names[i]] = v\n count += 1\n dict_list.append(param_dict)\n return dict_list\n\n @parameterized.parameters(generate_params())\n def test_generatable_metrics(self, class_conditional, threshold, max_probs,\n norm, binning_scheme, num_bins):\n probs = np.array([[0.42610548, 0.41748077, 0.15641374, 0],\n [0.44766216, 0.47721294, 0.0751249, 0],\n [0.1862702, 0.15139402, 0.66233578, 0],\n [0.05753544, 0.8561222, 0.08634236, 0],\n [0.18697925, 0.29836466, 0.51465609, 0]])\n\n labels = np.array([0, 1, 2, 1, 2])\n metric = rm.metrics.GeneralCalibrationError(\n _get_info(4),\n binning_scheme=binning_scheme, max_prob=max_probs,\n class_conditional=class_conditional, threshold=threshold, norm=norm,\n num_bins=num_bins)\n rm.metrics.add_batch(metric, probs, label=labels)\n calibration_error = metric.result()[\"gce\"]\n self.assertGreaterEqual(calibration_error, 0)\n self.assertLessEqual(calibration_error, 1)\n\n def test_get_bin_edges(self):\n bin_edges = rm.metrics.uncertainty._get_bin_edges([0, 0, 1, 1, 2, 2],\n [.2, .4, .6, .7, .9, .95])\n self.assertAlmostEqual(bin_edges, [.5, .8, .95])\n\n def test_monotonic(self):\n probs = np.array([[0.42610548, 0.41748077, 0.15641374],\n [0.44766216, 0.47721294, 0.0751249],\n [0.1862702, 0.15139402, 0.66233578],\n [0.05753544, 0.8561222, 0.08634236],\n [0.18697925, 0.29836466, 0.51465609]])\n labels = np.array([0, 1, 2, 1, 2])\n\n bin_assignment = [0, 0, 1, 2, 1]\n is_monotonic = rm.metrics.uncertainty._is_monotonic(\n 3, bin_assignment, labels)\n self.assertEqual(is_monotonic, False)\n\n bin_assign = rm.metrics.uncertainty._em_monotonic_sweep(\n probs.max(axis=1), labels)\n self.assertListEqual(bin_assign.tolist(), [0, 0, 1, 1, 0])\n\n bin_assign = rm.metrics.uncertainty._ew_monotonic_sweep(\n probs.max(axis=1), labels)\n self.assertListEqual(bin_assign.tolist(), [0, 0, 1, 1, 1])\n\n\nclass OracleCollaborativeAccuracyTest(parameterized.TestCase, tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.num_bins = 10\n self.fraction = 0.4\n self.dtype = \"float32\"\n\n self.pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85],\n dtype=self.dtype)\n # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]\n # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]\n self.labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.], dtype=self.dtype)\n\n def test_oracle_collaborative_accuracy(self):\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = np.array([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])\n bin_correct_sums = np.array([0, 0, 0, 0, 0, 1, 2, 0, 2, 0])\n bin_prob_sums = np.array(\n [0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71, 0.81 + 0.85, 0])\n # `(3 - 1)` refers to the rest examples in this bin\n # (minus the examples sent to the moderators), while `2/3` is\n # the accuracy in this bin.\n bin_collab_correct_sums = np.array(\n [0, 0, 0, 0, 0, 2, 1 * 1.0 + (3 - 1) * (2 / 3), 0, 2, 0])\n\n correct_acc = np.sum(bin_collab_correct_sums) / np.sum(bin_counts)\n\n metric = rm.metrics.uncertainty._KerasOracleCollaborativeAccuracyMetric(\n self.fraction, self.num_bins, name=\"collab_acc\", dtype=tf.float64)\n\n acc = metric(self.labels, self.pred_probs)\n\n actual_bin_counts = tf.convert_to_tensor(metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)\n actual_bin_bin_collab_correct_sums = tf.convert_to_tensor(\n metric.collab_correct_sums)\n\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n self.assertAllClose(bin_collab_correct_sums,\n actual_bin_bin_collab_correct_sums)\n\n self.assertAllClose(acc, correct_acc)\n\n def test_wrapped_oracle_collaborative_accuracy(self):\n # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,\n # [0.9, 1) and are numbered starting at zero.\n bin_counts = np.array([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])\n # `(3 - 1)` refers to the rest examples in this bin\n # (minus the examples sent to the moderators), while `2/3` is\n # the accuracy in this bin.\n bin_collab_correct_sums = np.array(\n [0, 0, 0, 0, 0, 2, 1 * 1.0 + (3 - 1) * (2 / 3), 0, 2, 0])\n\n correct_acc = np.sum(bin_collab_correct_sums) / np.sum(bin_counts)\n\n wrapped_metric = rm.metrics.OracleCollaborativeAccuracy(\n fraction=self.fraction, num_bins=self.num_bins)\n\n wrapped_metric.add_batch(self.pred_probs, label=self.labels)\n wrapped_metric_acc = wrapped_metric.result()[\"collaborative_accuracy\"]\n\n self.assertAllClose(wrapped_metric_acc, correct_acc)\n\n def test_wrapped_oracle_collaborative_accuracy_custom_binning_score(self):\n binning_score = tf.abs(self.pred_probs - 0.5)\n\n bin_counts = np.array([2, 3, 1, 2, 0, 0, 0, 0, 0, 0], dtype=self.dtype)\n bin_correct_sums = np.array([1, 2, 0, 2, 0, 0, 0, 0, 0, 0],\n dtype=self.dtype)\n bin_prob_sums = np.array(\n [0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71, 0.81 + 0.85, 0, 0, 0, 0, 0, 0],\n dtype=self.dtype)\n # `(3 - 1)` refers to the rest examples in this bin\n # (minus the examples sent to the moderators), while `2/3` is\n # the accuracy in this bin.\n bin_collab_correct_sums = np.array(\n [2, 1 * 1.0 + (3 - 1) * (2 / 3), 0, 2, 0, 0, 0, 0, 0, 0],\n dtype=self.dtype)\n\n correct_acc = np.sum(bin_collab_correct_sums) / np.sum(bin_counts)\n\n metric = rm.metrics.OracleCollaborativeAccuracy(\n fraction=self.fraction, num_bins=self.num_bins)\n\n metric.add_batch(\n self.pred_probs, label=self.labels, custom_binning_score=binning_score)\n acc = metric.result()[\"collaborative_accuracy\"]\n\n actual_bin_counts = tf.convert_to_tensor(metric._metric.counts)\n actual_bin_correct_sums = tf.convert_to_tensor(metric._metric.correct_sums)\n actual_bin_prob_sums = tf.convert_to_tensor(metric._metric.prob_sums)\n actual_bin_bin_collab_correct_sums = tf.convert_to_tensor(\n metric._metric.collab_correct_sums)\n\n self.assertAllEqual(bin_counts, actual_bin_counts)\n self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)\n self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)\n self.assertAllClose(bin_collab_correct_sums,\n actual_bin_bin_collab_correct_sums)\n\n self.assertAllClose(acc, correct_acc)\n\n\nclass OracleCollaborativeAUCTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.y_true = np.array([0., 1., 0., 1., 0., 1., 1., 0.])\n self.y_pred = np.array([0.31, 0.42, 0.33, 0.84, 0.75, 0.86, 0.57, 0.68])\n\n def test_no_examples_ROC(self):\n num_thresholds = 7\n num_bins = 14\n oracle_auc_roc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.5,\n num_thresholds=num_thresholds,\n num_bins=14,\n curve=\"ROC\")\n result = oracle_auc_roc.result()\n\n self.assertAllClose(oracle_auc_roc.binned_true_positives,\n tf.zeros([num_thresholds, num_bins]))\n self.assertAllClose(oracle_auc_roc.true_positives,\n tf.zeros([num_thresholds]))\n self.assertEqual(result, 0.)\n\n def test_no_examples_PR(self):\n num_thresholds = 8\n num_bins = 23\n oracle_auc_pr = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.5,\n num_thresholds=num_thresholds,\n curve=\"PR\",\n num_bins=num_bins)\n result = oracle_auc_pr.result()\n\n self.assertAllClose(oracle_auc_pr.binned_true_positives,\n tf.zeros([num_thresholds, num_bins]))\n self.assertAllClose(oracle_auc_pr.true_positives,\n tf.zeros([num_thresholds]))\n self.assertEqual(result, 0.)\n\n def test_reduces_to_AUC_zero_oracle_fraction(self):\n num_thresholds = 11\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0., num_thresholds=num_thresholds, num_bins=7)\n regular_auc = tf.keras.metrics.AUC(num_thresholds=num_thresholds)\n\n oracle_auc.update_state(self.y_true, self.y_pred)\n regular_auc.update_state(self.y_true, self.y_pred)\n\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_true_positives, axis=1),\n regular_auc.true_positives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_true_negatives, axis=1),\n regular_auc.true_negatives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_false_positives, axis=1),\n regular_auc.false_positives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_false_negatives, axis=1),\n regular_auc.false_negatives)\n\n oracle_auc_result = oracle_auc.result()\n regular_auc_result = regular_auc.result()\n\n self.assertAllClose(oracle_auc.true_positives, regular_auc.true_positives)\n self.assertAllClose(oracle_auc.true_negatives, regular_auc.true_negatives)\n self.assertAllClose(oracle_auc.false_positives, regular_auc.false_positives)\n self.assertAllClose(oracle_auc.false_negatives, regular_auc.false_negatives)\n self.assertEqual(oracle_auc_result, regular_auc_result)\n\n def test_ROC_perfect_AUC_with_unit_oracle_fraction(self):\n num_thresholds = 11\n curve = \"ROC\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=1.,\n num_thresholds=num_thresholds,\n num_bins=7,\n curve=curve)\n\n result = oracle_auc(self.y_true, self.y_pred)\n self.assertAllClose(oracle_auc.true_positives,\n [sum(self.y_true == 1)] * (num_thresholds - 1) + [0])\n self.assertAllClose(oracle_auc.true_negatives,\n [0] + [sum(self.y_true == 0)] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_positives,\n [sum(self.y_true == 0)] + [0] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_negatives,\n [0] * (num_thresholds - 1) + [sum(self.y_true == 1)])\n\n self.assertEqual(result, 1.)\n\n def test_PR_perfect_AUC_with_unit_oracle_fraction(self):\n num_thresholds = 11\n curve = \"PR\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=1.,\n num_thresholds=num_thresholds,\n num_bins=7,\n curve=curve)\n\n result = oracle_auc(self.y_true, self.y_pred)\n self.assertAllClose(oracle_auc.true_positives,\n [sum(self.y_true == 1)] * (num_thresholds - 1) + [0])\n self.assertAllClose(oracle_auc.true_negatives,\n [0] + [sum(self.y_true == 0)] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_positives,\n [sum(self.y_true == 0)] + [0] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_negatives,\n [0] * (num_thresholds - 1) + [sum(self.y_true == 1)])\n\n self.assertEqual(result, 1.)\n\n def test_reset_state(self):\n num_thresholds = 12\n num_bins = 8\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.37, num_thresholds=num_thresholds, num_bins=num_bins)\n\n oracle_auc.update_state(self.y_true, self.y_pred)\n _ = oracle_auc.result()\n\n oracle_auc.reset_state()\n\n self.assertAllClose(oracle_auc.binned_true_positives,\n tf.zeros((num_thresholds, num_bins)))\n self.assertAllClose(oracle_auc.binned_true_negatives,\n tf.zeros((num_thresholds, num_bins)))\n self.assertAllClose(oracle_auc.binned_true_negatives,\n tf.zeros((num_thresholds, num_bins)))\n self.assertAllClose(oracle_auc.binned_false_negatives,\n tf.zeros((num_thresholds, num_bins)))\n\n self.assertAllClose(oracle_auc.true_positives, tf.zeros((num_thresholds,)))\n self.assertAllClose(oracle_auc.true_negatives, tf.zeros((num_thresholds,)))\n self.assertAllClose(oracle_auc.false_positives, tf.zeros((num_thresholds,)))\n self.assertAllClose(oracle_auc.false_negatives, tf.zeros((num_thresholds,)))\n\n def test_PR_oracle_fraction_two_thirds(self):\n y_true = np.array([0., 0., 1., 1., 0., 1., 1., 0.])\n y_pred = np.array([0.31, 0.33, 0.42, 0.58, 0.69, 0.76, 0.84, 0.87])\n\n num_thresholds = 5 # -1e-7, 0.25, 0.5, 0.75, 1.0000001\n num_bins = 3\n curve = \"PR\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.67, # floor(0.67 * 8) = 5 examples sent to oracle\n num_thresholds=num_thresholds,\n num_bins=num_bins,\n curve=curve)\n\n result = oracle_auc(y_true, y_pred)\n self.assertAllClose(\n oracle_auc.binned_true_positives,\n # y_true's positives are 0.42, 0.58, 0.76, and 0.84 in y_pred.\n np.array([\n [0., 2., 2.], # Threshold -1e-7; bins are unmodified\n [2., 2., 0.], # Threshold 0.25; bins [0, 0.58), [0.58, 0.91)\n [2., 1., 0.], # Threshold 0.5: 0.42 is now a false positive.\n [2., 0., 0.], # Threshold 0.75: only 0.76 and 0.84 are positive.\n [0., 0., 0.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_true_negatives,\n # The possible true negatives are 0.31, 0.33, 0.69, and 0.87.\n np.array([\n [0., 0., 0.], # There are no negatives for threshold -1e-7.\n [0., 0., 0.], # Threshold 0.25: still no negatives.\n [2., 0., 0.], # Threshold 0.5: 0.31 and 0.33 are negative.\n [1., 2., 0.], # Threshold 0.75: only 0.69 in first bin.\n [2., 0., 2.], # Threshold 1.0000001: 0.76 and 0.84 in first bin.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_positives,\n # Compare these values with oracle_auc.binned_true_negatives.\n # For example, the total across their rows must always be 4.\n np.array([\n [2., 0., 2.], # 0.76 and 0.84 in bin 3 (greater than -1e-7 + 0.66).\n [2., 2., 0.], # Threshold 0.25: 0.76 and 0.84 move to second bin.\n [1., 1., 0.], # Threshold 0.5: 0.76 (0.84) in first (second) bin.\n [1., 0., 0.], # Threshold 0.75: only 0.87 remains in first bin.\n [0., 0., 0.], # Threshold 1.0000001: no more positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_negatives,\n # Compare these values with oracle_auc.binned_true_positives.\n np.array([\n [0., 0., 0.], # No negatives\n [0., 0., 0.], # No negatives\n [1., 0., 0.], # Threshold 0.5: only 0.42 is below threshold.\n [2., 0., 0.], # Threshold 0.75: 0.42 still in bin 1; 0.58 joins it.\n [2., 2., 0.], # Threshold 1.0000001: 0.42 and 0.58 in second bin.\n ]))\n\n # The first and last threshold are outside [0, 1] and are never corrected.\n # Second threshold: 0.5 corrected from fp to tn\n # Third threshold: 0.83 corrected from fp and fn each to tp and tn\n # Fourth threshold: 0.83 corrected from fp->tn, 1.67 corrected from fn->tp\n self.assertAllClose(oracle_auc.true_positives,\n np.array([4., 4., 3. + 5 / 6, 2. + 5 / 3, 0.]))\n self.assertAllClose(oracle_auc.true_negatives,\n np.array([0., 2. + 0.5, 2. + 5 / 6, 3. + 5 / 6, 4.]))\n self.assertAllClose(oracle_auc.false_positives,\n np.array([4., 2. - 0.5, 2. - 5 / 6, 1. - 5 / 6, 0.]))\n self.assertAllClose(oracle_auc.false_negatives,\n np.array([0., 0., 1. - 5 / 6, 2. - 5 / 3, 4.]))\n\n self.assertEqual(result, 0.9434595)\n\n def test_custom_binning_score(self):\n y_true = np.array([1., 0., 0., 1.])\n y_pred = np.array([0.31, 0.32, 0.83, 0.64])\n\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.5, # 2 examples sent to oracle\n num_bins=4, # (-inf, 0.25), [0.25, 0.5), [0.5, 0.75), [0.75, inf)\n num_thresholds=4, # -1e-7, 0.33, 0.67, 1.0000001\n )\n\n # This custom_binning_score means 0.31 and 0.32 are always sent to oracle.\n result = oracle_auc(y_true, y_pred, custom_binning_score=y_pred)\n\n self.assertAllClose(\n oracle_auc.binned_true_positives,\n # y_true's positives are 0.31 and 0.64 in y_pred.\n np.array([\n [0., 1., 1., 0.],\n [0., 0., 1., 0.], # 0.31 is no longer above threshold 0.33\n [0., 0., 0., 0.], # 0.64 is below threshold 0.67\n [0., 0., 0., 0.],\n ]))\n self.assertAllClose(\n oracle_auc.binned_true_negatives,\n # The possible true negatives are 0.32 and 0.83.\n np.array([\n [0., 0., 0., 0.],\n [0., 1., 0., 0.], # 0.32 is below threshold 0.33\n [0., 1., 0., 0.], # 0.84 is still above threshold 0.67\n [0., 1., 0., 1.],\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_positives,\n # Compare these values with oracle_auc.binned_true_negatives.\n # For example, the total across their rows must always be 2.\n np.array([\n [0., 1., 0., 1.], # 0.32 and 0.84 are both above threshold -1e-7\n [0., 0., 0., 1.], # 0.32 moves to true_negatives\n [0., 0., 0., 1.], # 0.84 still above threshold\n [0., 0., 0., 0.], # all examples moved to true_negatives\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_negatives,\n # Compare these values with oracle_auc.binned_true_positives.\n np.array([\n [0., 0., 0., 0.],\n [0., 1., 0., 0.], # 0.31 becomes a false negative at threshold 0.33\n [0., 1., 1., 0.], # 0.64 becomes a false negative at threshold 0.67\n [0., 1., 1., 0.],\n ]))\n\n # 0.31 is always corrected from false_positives to true_negatives.\n self.assertAllClose(oracle_auc.true_positives, np.array([2., 2., 1., 0.]))\n self.assertAllClose(oracle_auc.true_negatives, np.array([0., 1., 1., 2.]))\n self.assertAllClose(oracle_auc.false_positives, np.array([2., 1., 1., 0.]))\n self.assertAllClose(oracle_auc.false_negatives, np.array([0., 0., 1., 2.]))\n\n self.assertEqual(result, 0.625)\n\n def test_monotonic_with_increasing_oracle_fraction_and_dtype(self):\n y_true = np.array([1., 0., 0., 1., 1., 0., 1., 0., 1.])\n y_pred = np.array([0.11, 0.62, 0.33, 0.74, 0.35, 0.26, 0.67, 0.58, 0.89])\n tf_dtype = tf.float16\n np_dtype = np.float16\n\n auc00, auc03, auc06, auc09 = [\n rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=frac, num_thresholds=11, dtype=tf_dtype)\n for frac in np.array([0.0, 0.3, 0.6, 0.9])\n ]\n\n result00, result03, result06, result09 = [\n auc(y_true, y_pred) for auc in (auc00, auc03, auc06, auc09)\n ]\n\n self.assertDTypeEqual(auc00.binned_true_positives, np_dtype)\n self.assertDTypeEqual(auc00.true_positives, np_dtype)\n self.assertDTypeEqual(result00, np_dtype)\n self.assertBetween(result00, minv=0., maxv=result03)\n self.assertBetween(result06, minv=result03, maxv=result09)\n self.assertLessEqual(result09, 1.)\n\n def test_oracle_fraction_and_max_count_both_set(self):\n y_true = np.array([0., 0., 1., 1., 0., 1., 1., 0.])\n y_pred = np.array([0.31, 0.33, 0.42, 0.58, 0.69, 0.76, 0.84, 0.87])\n\n num_thresholds = 5 # -1e-7, 0.25, 0.5, 0.75, 1.0000001\n num_bins = 3\n curve = \"PR\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.9, # floor(0.9 * 8) = 7 examples sent to oracle\n max_oracle_count=5, # 5 overrides the limit 7 set on the line above\n num_thresholds=num_thresholds,\n num_bins=num_bins,\n curve=curve)\n\n result = oracle_auc(y_true, y_pred)\n self.assertAllClose(\n oracle_auc.binned_true_positives,\n # y_true's positives are 0.42, 0.58, 0.76, and 0.84 in y_pred.\n np.array([\n [0., 2., 2.], # Threshold -1e-7; bins are unmodified\n [2., 2., 0.], # Threshold 0.25; bins [0, 0.58), [0.58, 0.91)\n [2., 1., 0.], # Threshold 0.5: 0.42 is now a false positive.\n [2., 0., 0.], # Threshold 0.75: only 0.76 and 0.84 are positive.\n [0., 0., 0.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_true_negatives,\n # The possible true negatives are 0.31, 0.33, 0.69, and 0.87.\n np.array([\n [0., 0., 0.], # There are no negatives for threshold -1e-7.\n [0., 0., 0.], # Threshold 0.25: still no negatives.\n [2., 0., 0.], # Threshold 0.5: 0.31 and 0.33 are negative.\n [1., 2., 0.], # Threshold 0.75: only 0.69 in first bin.\n [2., 0., 2.], # Threshold 1.0000001: 0.76 and 0.84 in first bin.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_positives,\n # Compare these values with oracle_auc.binned_true_negatives.\n # For example, the total across their rows must always be 4.\n np.array([\n [2., 0., 2.], # 0.76 and 0.84 in bin 3 (greater than -1e-7 + 0.66).\n [2., 2., 0.], # Threshold 0.25: 0.76 and 0.84 move to second bin.\n [1., 1., 0.], # Threshold 0.5: 0.76 (0.84) in first (second) bin.\n [1., 0., 0.], # Threshold 0.75: only 0.87 remains in first bin.\n [0., 0., 0.], # Threshold 1.0000001: no more positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_negatives,\n # Compare these values with oracle_auc.binned_true_positives.\n np.array([\n [0., 0., 0.], # No negatives\n [0., 0., 0.], # No negatives\n [1., 0., 0.], # Threshold 0.5: only 0.42 is below threshold.\n [2., 0., 0.], # Threshold 0.75: 0.42 still in bin 1; 0.58 joins it.\n [2., 2., 0.], # Threshold 1.0000001: 0.42 and 0.58 in second bin.\n ]))\n\n # The first and last threshold are outside [0, 1] and are never corrected.\n # Second threshold: 0.5 corrected from fp to tn\n # Third threshold: 0.83 corrected from fp and fn each to tp and tn\n # Fourth threshold: 0.83 corrected from fp->tn, 1.67 corrected from fn->tp\n self.assertAllClose(oracle_auc.true_positives,\n np.array([4., 4., 3. + 5 / 6, 2. + 5 / 3, 0.]))\n self.assertAllClose(oracle_auc.true_negatives,\n np.array([0., 2. + 0.5, 2. + 5 / 6, 3. + 5 / 6, 4.]))\n self.assertAllClose(oracle_auc.false_positives,\n np.array([4., 2. - 0.5, 2. - 5 / 6, 1. - 5 / 6, 0.]))\n self.assertAllClose(oracle_auc.false_negatives,\n np.array([0., 0., 1. - 5 / 6, 2. - 5 / 3, 4.]))\n\n self.assertEqual(result, 0.9434595)\n\n def test_oracle_threshold_zero_reduces_to_regular_auc(self):\n num_thresholds = 5 # -1e-7, 0.25, 0.5, 0.75, 1.0000001\n num_bins = 3 # setting oracle_threshold will override this to 2\n curve = \"ROC\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.9,\n max_oracle_count=5,\n oracle_threshold=0.,\n num_thresholds=num_thresholds,\n num_bins=num_bins,\n curve=curve)\n regular_auc = tf.keras.metrics.AUC(num_thresholds=num_thresholds)\n\n oracle_auc.update_state(self.y_true, self.y_pred)\n regular_auc.update_state(self.y_true, self.y_pred)\n\n self.assertEqual(oracle_auc.num_bins, 2)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_true_positives, axis=1),\n regular_auc.true_positives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_true_negatives, axis=1),\n regular_auc.true_negatives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_false_positives, axis=1),\n regular_auc.false_positives)\n self.assertAllClose(\n tf.reduce_sum(oracle_auc.binned_false_negatives, axis=1),\n regular_auc.false_negatives)\n\n oracle_auc_result = oracle_auc.result()\n regular_auc_result = regular_auc.result()\n\n self.assertAllClose(oracle_auc.true_positives, regular_auc.true_positives)\n self.assertAllClose(oracle_auc.true_negatives, regular_auc.true_negatives)\n self.assertAllClose(oracle_auc.false_positives, regular_auc.false_positives)\n self.assertAllClose(oracle_auc.false_negatives, regular_auc.false_negatives)\n self.assertEqual(oracle_auc_result, regular_auc_result)\n\n def test_oracle_threshold_one_corrects_all_examples_perfect_auc(self):\n num_thresholds = 5 # -1e-7, 0.25, 0.5, 0.75, 1.0000001\n num_bins = 3 # setting oracle_threshold will override this to 2\n curve = \"ROC\"\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_fraction=0.9,\n max_oracle_count=5,\n oracle_threshold=1.,\n num_thresholds=num_thresholds,\n num_bins=num_bins,\n curve=curve)\n\n result = oracle_auc(self.y_true, self.y_pred)\n\n self.assertEqual(oracle_auc.num_bins, 2)\n self.assertAllClose(oracle_auc.true_positives,\n [sum(self.y_true == 1)] * (num_thresholds - 1) + [0])\n self.assertAllClose(oracle_auc.true_negatives,\n [0] + [sum(self.y_true == 0)] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_positives,\n [sum(self.y_true == 0)] + [0] * (num_thresholds - 1))\n self.assertAllClose(oracle_auc.false_negatives,\n [0] * (num_thresholds - 1) + [sum(self.y_true == 1)])\n\n self.assertEqual(result, 1.)\n\n def test_oracle_threshold_set(self):\n y_true = np.array([1., 0., 1., 1., 0., 0.])\n y_pred = np.array([0.5, 0.7, 0.2, 0.4, 0.3, 0.9])\n certainty_score = np.linspace(0.6, 0.7, 6) # 0.6, 0.62, 0.64, ..., 0.7\n\n num_thresholds = 4 # -1e-7, 0.33, 0.67, 1.0000001\n # Always send first three examples (0.5, 0.7, 0.2) to the oracle.\n # Because of this, they'll always be in the left confusion matrix bin.\n # Prediction 0.2 is included since its score is <= the oracle_threshold.\n oracle_threshold = 0.64\n\n oracle_auc = rm.metrics.uncertainty._KerasOracleCollaborativeAUCMetric(\n oracle_threshold=oracle_threshold,\n num_thresholds=num_thresholds,\n curve=\"PR\")\n result = oracle_auc(y_true, y_pred, custom_binning_score=certainty_score)\n\n self.assertAllClose(\n oracle_auc.binned_true_positives,\n np.array([\n [2., 1.], # Threshold -1e-7. All examples above threshold.\n [1., 1.], # Threshold 0.33. 0.2 moves below threshold.\n [0., 0.], # Threshold 0.67. 0.5 moves below threshold.\n [0., 0.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_true_negatives,\n np.array([\n [0., 0.], # Threshold -1e-7\n [0., 1.], # Threshold 0.33. 0.3 now a true negative.\n [0., 1.], # Threshold 0.67\n [1., 2.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_positives,\n np.array([\n [1., 2.], # Threshold -1e-7\n [1., 1.], # Threshold 0.33\n [1., 1.], # Threshold 0.67\n [0., 0.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n oracle_auc.binned_false_negatives,\n np.array([\n [0., 0.], # Threshold -1e-7\n [1., 0.], # Threshold 0.33\n [2., 1.], # Threshold 0.67\n [2., 1.], # Threshold 1.0000001: no positives.\n ]))\n\n # The first and last threshold are outside [0, 1] and are never corrected.\n # Predictions 0.5, 0.7, and 0.2 are always sent to the oracle.\n self.assertAllClose(oracle_auc.true_positives, np.array([3., 3., 2., 0.]))\n self.assertAllClose(oracle_auc.true_negatives, np.array([0., 2., 2., 3.]))\n self.assertAllClose(oracle_auc.false_positives, np.array([3., 1., 1., 0.]))\n self.assertAllClose(oracle_auc.false_negatives, np.array([0., 0., 1., 3.]))\n\n self.assertEqual(result, 0.68188375)\n\n def test_wrapped_oracle_collaborative_auc(self):\n y_true = np.array([0., 0., 1., 1., 0., 1., 1., 0.])\n y_pred = np.array([0.31, 0.33, 0.42, 0.58, 0.69, 0.76, 0.84, 0.87])\n\n num_thresholds = 5 # -1e-7, 0.25, 0.5, 0.75, 1.0000001\n num_bins = 3\n curve = \"PR\"\n wrapped_oracle_auc = rm.metrics.OracleCollaborativeAUC(\n oracle_fraction=0.67, # floor(0.67 * 8) = 5 examples sent to oracle\n num_thresholds=num_thresholds,\n num_bins=num_bins,\n curve=curve)\n\n wrapped_oracle_auc.add_batch(y_pred, label=y_true)\n result = wrapped_oracle_auc.result()[\"collaborative_auc\"]\n\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_true_positives,\n # y_true's positives are 0.42, 0.58, 0.76, and 0.84 in y_pred.\n np.array([\n [0., 2., 2.], # Threshold -1e-7; bins are unmodified\n [2., 2., 0.], # Threshold 0.25; bins [0, 0.58), [0.58, 0.91)\n [2., 1., 0.], # Threshold 0.5: 0.42 is now a false positive.\n [2., 0., 0.], # Threshold 0.75: only 0.76 and 0.84 are positive.\n [0., 0., 0.], # Threshold 1.0000001: no positives.\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_true_negatives,\n # The possible true negatives are 0.31, 0.33, 0.69, and 0.87.\n np.array([\n [0., 0., 0.], # There are no negatives for threshold -1e-7.\n [0., 0., 0.], # Threshold 0.25: still no negatives.\n [2., 0., 0.], # Threshold 0.5: 0.31 and 0.33 are negative.\n [1., 2., 0.], # Threshold 0.75: only 0.69 in first bin.\n [2., 0., 2.], # Threshold 1.0000001: 0.76 and 0.84 in first bin.\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_false_positives,\n # Compare these values with oracle_auc.binned_true_negatives.\n # For example, the total across their rows must always be 4.\n np.array([\n [2., 0., 2.], # 0.76 and 0.84 in bin 3 (greater than -1e-7 + 0.66).\n [2., 2., 0.], # Threshold 0.25: 0.76 and 0.84 move to second bin.\n [1., 1., 0.], # Threshold 0.5: 0.76 (0.84) in first (second) bin.\n [1., 0., 0.], # Threshold 0.75: only 0.87 remains in first bin.\n [0., 0., 0.], # Threshold 1.0000001: no more positives.\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_false_negatives,\n # Compare these values with oracle_auc.binned_true_positives.\n np.array([\n [0., 0., 0.], # No negatives\n [0., 0., 0.], # No negatives\n [1., 0., 0.], # Threshold 0.5: only 0.42 is below threshold.\n [2., 0., 0.], # Threshold 0.75: 0.42 still in bin 1; 0.58 joins it.\n [2., 2., 0.], # Threshold 1.0000001: 0.42 and 0.58 in second bin.\n ]))\n\n # The first and last threshold are outside [0, 1] and are never corrected.\n # Second threshold: 0.5 corrected from fp to tn\n # Third threshold: 0.83 corrected from fp and fn each to tp and tn\n # Fourth threshold: 0.83 corrected from fp->tn, 1.67 corrected from fn->tp\n self.assertAllClose(wrapped_oracle_auc._metric.true_positives,\n np.array([4., 4., 3. + 5 / 6, 2. + 5 / 3, 0.]))\n self.assertAllClose(wrapped_oracle_auc._metric.true_negatives,\n np.array([0., 2. + 0.5, 2. + 5 / 6, 3. + 5 / 6, 4.]))\n self.assertAllClose(wrapped_oracle_auc._metric.false_positives,\n np.array([4., 2. - 0.5, 2. - 5 / 6, 1. - 5 / 6, 0.]))\n self.assertAllClose(wrapped_oracle_auc._metric.false_negatives,\n np.array([0., 0., 1. - 5 / 6, 2. - 5 / 3, 4.]))\n\n self.assertAllClose(result, 0.9434595)\n\n def test_wrapped_oracle_collaborative_auc_custom_binning_score(self):\n y_true = np.array([1., 0., 0., 1.])\n y_pred = np.array([0.31, 0.32, 0.83, 0.64])\n\n wrapped_oracle_auc = rm.metrics.OracleCollaborativeAUC(\n oracle_fraction=0.5, # 2 examples sent to oracle\n num_bins=4, # (-inf, 0.25), [0.25, 0.5), [0.5, 0.75), [0.75, inf)\n num_thresholds=4, # -1e-7, 0.33, 0.67, 1.0000001\n )\n\n # This custom_binning_score means 0.31 and 0.32 are always sent to oracle.\n wrapped_oracle_auc.add_batch(\n y_pred, label=y_true, custom_binning_score=y_pred)\n result = wrapped_oracle_auc.result()[\"collaborative_auc\"]\n\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_true_positives,\n # y_true's positives are 0.31 and 0.64 in y_pred.\n np.array([\n [0., 1., 1., 0.],\n [0., 0., 1., 0.], # 0.31 is no longer above threshold 0.33\n [0., 0., 0., 0.], # 0.64 is below threshold 0.67\n [0., 0., 0., 0.],\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_true_negatives,\n # The possible true negatives are 0.32 and 0.83.\n np.array([\n [0., 0., 0., 0.],\n [0., 1., 0., 0.], # 0.32 is below threshold 0.33\n [0., 1., 0., 0.], # 0.84 is still above threshold 0.67\n [0., 1., 0., 1.],\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_false_positives,\n # Compare these values with oracle_auc.binned_true_negatives.\n # For example, the total across their rows must always be 2.\n np.array([\n [0., 1., 0., 1.], # 0.32 and 0.84 are both above threshold -1e-7\n [0., 0., 0., 1.], # 0.32 moves to true_negatives\n [0., 0., 0., 1.], # 0.84 still above threshold\n [0., 0., 0., 0.], # all examples moved to true_negatives\n ]))\n self.assertAllClose(\n wrapped_oracle_auc._metric.binned_false_negatives,\n # Compare these values with oracle_auc.binned_true_positives.\n np.array([\n [0., 0., 0., 0.],\n [0., 1., 0., 0.], # 0.31 becomes a false negative at threshold 0.33\n [0., 1., 1., 0.], # 0.64 becomes a false negative at threshold 0.67\n [0., 1., 1., 0.],\n ]))\n\n # 0.31 is always corrected from false_positives to true_negatives.\n self.assertAllClose(wrapped_oracle_auc._metric.true_positives,\n np.array([2., 2., 1., 0.]))\n self.assertAllClose(wrapped_oracle_auc._metric.true_negatives,\n np.array([0., 1., 1., 2.]))\n self.assertAllClose(wrapped_oracle_auc._metric.false_positives,\n np.array([2., 1., 1., 0.]))\n self.assertAllClose(wrapped_oracle_auc._metric.false_negatives,\n np.array([0., 0., 1., 2.]))\n\n self.assertAllClose(result, 0.625)\n\n\nclass CalibrationAUCTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self.num_thresholds = 10\n self.y_true = [0, 0, 0, 1, 1]\n self.y_pred = [0, 1, 1, 0, 1]\n\n @parameterized.named_parameters((\"perfect\", [1, 0, 0, 0, 1], 1.),\n (\"decent\", [1, 0, 0, 0.1, 0], 0.75),\n (\"medium\", [1, 0.5, 0.5, 0.5, 0], 0.5),\n (\"poor\", [0.5, 0.5, 0.5, 0.5, 0.5], 0.5),\n (\"wrong\", [0.1, 0.9, 0.9, 0.9, 0.1], 0.))\n def testAUCROC(self, confidence, auc_expected):\n # Tests the keras metric.\n m_auroc = rm.metrics.uncertainty._KerasCalibrationAUCMetric(\n num_thresholds=self.num_thresholds, curve=\"ROC\")\n m_auroc.update_state(self.y_true, self.y_pred, confidence)\n keras_result = m_auroc.result().numpy()\n\n # Tests the wrapped rm metric.\n m_auroc_wrapped = rm.metrics.CalibrationAUC(\n num_thresholds=self.num_thresholds, curve=\"ROC\")\n m_auroc_wrapped.add_batch(\n self.y_pred, label=self.y_true, confidence=confidence)\n rm_result = m_auroc_wrapped.result()[\"calibration_auc\"]\n\n self.assertEqual(keras_result, auc_expected)\n self.assertEqual(rm_result, auc_expected)\n\n @parameterized.named_parameters((\"perfect\", [1, 0, 0, 0, 1], 1.),\n (\"decent\", [1, 0, 0, 0.1, 1], 1.),\n (\"medium\", [1, 0.8, 0.5, 0.1, 0.5], 0.75),\n (\"poor\", [0.5, 0.5, 0.5, 0.5, 0.5], 0.4),\n (\"wrong\", [0.1, 0.9, 0.9, 0.9, 0.1], 0.234))\n def testAUCPR(self, confidence, auc_expected):\n # Tests the keras metric.\n m_aupr = rm.metrics.uncertainty._KerasCalibrationAUCMetric(\n num_thresholds=self.num_thresholds, curve=\"PR\")\n m_aupr.update_state(self.y_true, self.y_pred, confidence)\n keras_result = m_aupr.result().numpy()\n\n # Tests the wrapped rm metric.\n m_aupr_wrapped = rm.metrics.CalibrationAUC(\n num_thresholds=self.num_thresholds, curve=\"PR\")\n m_aupr_wrapped.add_batch(\n self.y_pred, label=self.y_true, confidence=confidence)\n rm_result = m_aupr_wrapped.result()[\"calibration_auc\"]\n\n self.assertAllClose(keras_result, auc_expected, atol=1e-3)\n self.assertAllClose(rm_result, auc_expected, atol=1e-3)\n\n def testAUCRankTwo(self):\n \"\"\"Checks if AUC indeed does not accept tensors with rank >= 2.\"\"\"\n y_pred_rank_2 = [self.y_pred]\n confidence = [0, 1, 1, 1, 0]\n\n m_auc = rm.metrics.uncertainty._KerasCalibrationAUCMetric(\n num_thresholds=self.num_thresholds)\n m_auc_wrapped = rm.metrics.CalibrationAUC(\n num_thresholds=self.num_thresholds)\n\n with self.assertRaises(ValueError):\n m_auc.update_state(self.y_true, y_pred_rank_2, confidence)\n\n with self.assertRaises(ValueError):\n m_auc_wrapped.add_batch(\n y_pred_rank_2, label=self.y_true, confidence=confidence)\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "numpy.random.rand", "numpy.min", "tensorflow.stack", "tensorflow.nn.softmax", "numpy.bincount", "numpy.max", "tensorflow.random.set_seed", "numpy.sqrt", "numpy.square", "tensorflow.abs", "numpy.array", "tensorflow.zeros", "tensorflow.random.uniform", "tensorflow.reduce_sum", "tensorflow.test.main", "tensorflow.keras.metrics.AUC", "tensorflow.convert_to_tensor", "tensorflow.linspace", "tensorflow.keras.layers.Lambda", "numpy.random.seed", "tensorflow.random.normal", "numpy.sum", "numpy.digitize", "numpy.linspace", "tensorflow.reduce_mean" ] ]
apoz00003/banana
[ "50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a" ]
[ "banana/interfaces/custom/coils.py" ]
[ "import os\nimport os.path as op\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport logging\nimport re\nimport numpy as np\nimport nibabel as nib\nfrom nipype.interfaces.base import (\n TraitedSpec, traits, BaseInterface, BaseInterfaceInputSpec, File,\n Directory, isdefined)\nfrom banana.exceptions import BananaUsageError\nlogger = logging.getLogger('banana')\n\n\nclass ToPolarCoordsInputSpec(BaseInterfaceInputSpec):\n in_dir = Directory(exists=True, mandatory=True)\n in_fname_re = traits.Str(\n r'.*_(?P<channel>\\d+)_(?P<echo>\\d+)_(?P<axis>[A-Z]+)\\.nii\\.gz',\n usedefault=True, desc=(\n \"Regex to extract the channel, echo and axis \"\n \"(i.e. real or imaginary) information from the input file name. \"\n \"Must incluce named groups for 'channel', 'echo' and 'axis'\"))\n out_fname_str = traits.Str(\n 'coil_{channel}_{echo}.nii.gz', usedefault=True,\n desc=(\"The format string used to generate the save channel filenames. \"\n \"Must use the 'channel' and 'echo' field names\"))\n real_label = traits.Str('REAL', usedefault=True, desc=(\n \"The label used to specify the real component image\"))\n imaginary_label = traits.Str('IMAGINARY', usedefault=True, desc=(\n \"The label used to specify the real component image\"))\n combined_dir = Directory(genfile=True, desc=(\n \"Output directory for coil magnitude and phase images. \"\n \"Files will be saved with the name \"\n \"'Raw_Coil_<channel>_<echo>.nii.gz'\"))\n magnitudes_dir = Directory(genfile=True, desc=(\n \"Output directory for coil magnitude images.\"))\n phases_dir = Directory(genfile=True, desc=(\n \"Output directory for coil phase images\"))\n\n\nclass ToPolarCoordsOutputSpec(TraitedSpec):\n combined_images = traits.List(\n File(exists=True),\n desc=\"List of combined images for each echo using least squares\")\n first_echo = File(exists=True,\n desc=\"The first echo of the combined images\")\n last_echo = File(exists=True,\n desc=\"The last echo of the combined images\")\n coil_magnitudes = traits.List(\n traits.List(File(exists=True)),\n desc=(\"List of magnitude images for each coil for each echo\"))\n coil_phases = traits.List(\n traits.List(File(exists=True)),\n desc=(\"List of magnitude images for each coil for each echo\"))\n combined_dir = Directory(exists=True, desc=(\n \"Output directory for combined magnitude images for each echo time \"))\n magnitudes_dir = Directory(exists=True, desc=(\n \"Output directory for coil magnitude images\"))\n phases_dir = Directory(exists=True, desc=(\n \"Output directory for coil phase images\"))\n\n\nclass ToPolarCoords(BaseInterface):\n \"\"\"\n Takes all REAL and IMAGINARY pairs in current directory and prepares\n them for Phase and QSM processing.\n\n 1. Existence of pairs is checked\n 2. Files are load/save cycled for formatting and rename for consistency\n 3. Magnitude and Phase components are produced\n 4. Coils are combined for single magnitude images per echo\n \"\"\"\n input_spec = ToPolarCoordsInputSpec\n output_spec = ToPolarCoordsOutputSpec\n\n def _run_interface(self, runtime):\n return runtime\n\n def _list_outputs(self):\n print(\"in-dir: \" + self.inputs.in_dir)\n outputs = self._outputs().get()\n # Get names for output directories\n combined_dir = outputs['combined_dir'] = self._gen_filename(\n 'combined_dir')\n mags_dir = outputs['magnitudes_dir'] = self._gen_filename(\n 'magnitudes_dir')\n phases_dir = outputs['phases_dir'] = self._gen_filename(\n 'phases_dir')\n # Ensure output directories exist\n os.makedirs(combined_dir, exist_ok=True)\n os.makedirs(mags_dir, exist_ok=True)\n os.makedirs(phases_dir, exist_ok=True)\n outputs['combined_images'] = []\n coil_mags = outputs['coil_magnitudes'] = []\n coil_phases = outputs['coil_phases'] = []\n # A default dict with three levels of keys to hold the file names\n # sorted into echo, channel and complex axis\n paths = defaultdict(lambda: defaultdict(dict))\n # Compile regular expression for extracting channel, echo and\n # complex axis indices from input file names\n fname_re = re.compile(self.inputs.in_fname_re)\n for fname in os.listdir(self.inputs.in_dir):\n match = fname_re.match(fname)\n if match is None:\n logger.warning(\"Skipping '{}' file in '{}' as it doesn't \"\n \"match expected filename pattern for raw \"\n \"channel files ('{}')\"\n .format(fname, self.inputs.in_dir,\n self.inputs.in_fname_re))\n continue\n paths[match.group('echo')][match.group('channel')][\n match.group('axis')] = op.join(self.inputs.in_dir, fname)\n\n first_echo_index = min(paths.keys())\n last_echo_index = max(paths.keys())\n\n for echo_i, channels in paths.items():\n # Variables to hold combined coil images\n combined_array = None\n normaliser_array = None\n echo_coil_mags = []\n echo_coil_phases = []\n for channel_i, axes in channels.items():\n # Load image real and imaginary data and remove extreme values\n img_arrays = {}\n for ax, fname in axes.items():\n img = nib.load(fname)\n img_array = img.get_fdata()\n # Replace extreme values with random value\n img_array[img_array == 2048] = 0.02 * np.random.rand()\n img_arrays[ax] = img_array\n\n # Calculate magnitude and phase from coil data\n cmplx = (img_arrays[self.inputs.real_label] +\n img_arrays[self.inputs.imaginary_label] * 1j)\n\n # Calculate and save magnitude image\n mag_array = np.abs(cmplx)\n mag_img = nib.Nifti1Image(mag_array, img.affine, img.header)\n mag_path = op.join(\n mags_dir,\n self.inputs.out_fname_str.format(channel=channel_i,\n echo=echo_i))\n echo_coil_mags.append(mag_path)\n nib.save(mag_img, mag_path)\n\n # Save phase image\n phase_array = np.angle(cmplx)\n phase_img = nib.Nifti1Image(phase_array, img.affine,\n img.header)\n phase_path = op.join(\n phases_dir,\n self.inputs.out_fname_str.format(channel=channel_i,\n echo=echo_i))\n echo_coil_phases.append(phase_path)\n nib.save(phase_img, phase_path)\n\n # Add coil data to combined coil data\n if combined_array is None:\n combined_array = deepcopy(mag_array) ** 2\n normaliser_array = deepcopy(mag_array)\n else:\n combined_array += mag_array ** 2\n normaliser_array += mag_array\n coil_mags.append(echo_coil_mags)\n coil_phases.append(echo_coil_phases)\n # Normalise combined sum of squares image, save and append\n # to list of combined echoes\n combined_array /= normaliser_array\n combined_array[np.isnan(combined_array)] = 0\n # Generate filename and append ot list of combined coil images\n combined_fname = op.join(combined_dir,\n 'echo_{}.nii.gz'.format(echo_i))\n combined_img = nib.Nifti1Image(combined_array, img.affine,\n img.header)\n nib.save(combined_img, combined_fname)\n outputs['combined_images'].append(combined_fname)\n if echo_i == first_echo_index:\n outputs['first_echo'] = combined_fname\n if echo_i == last_echo_index:\n outputs['last_echo'] = combined_fname\n return outputs\n\n def _gen_filename(self, name):\n if name == 'combined_dir':\n fname = op.abspath(self.inputs.combined_dir\n if isdefined(self.inputs.combined_dir)\n else 'combined_images')\n elif name == 'magnitudes_dir':\n fname = op.abspath(self.inputs.magnitudes_dir\n if isdefined(self.inputs.magnitudes_dir)\n else 'magnitudes_dir')\n elif name == 'phases_dir':\n fname = op.abspath(self.inputs.phases_dir\n if isdefined(self.inputs.phases_dir)\n else 'phases_dir')\n else:\n assert False\n return fname\n\n\nclass HIPCombineChannelsInputSpec(BaseInterfaceInputSpec):\n\n magnitudes_dir = Directory(exists=True, desc=(\n \"Input directory containing coil magnitude images.\"))\n phases_dir = Directory(exists=True, desc=(\n \"Input directory containing coil phase images.\"))\n in_fname_re = traits.Str(\n r'coil_(?P<channel>\\d+)_(?P<echo>\\d+)\\.nii\\.gz', usedefault=True,\n desc=(\"The format string used to generate the save channel filenames. \"\n \"Must use the 'channel' and 'echo' field names\"))\n magnitude = File(genfile=True, desc=\"Combined magnitude image\")\n phase = File(genfile=True, desc=\"Combined phase image\")\n q = File(genfile=True, desc=\"Q image\")\n\n\nclass HIPCombineChannelsOutputSpec(TraitedSpec):\n\n magnitude = File(exists=True, desc=\"Combined magnitude image\")\n phase = File(exists=True, desc=\"Combined phase image\")\n q = File(exists=True, desc=\"Q image\")\n\n\nclass HIPCombineChannels(BaseInterface):\n \"\"\"\n Apply Laplacian unwrapping from STI suite to each coil\n \"\"\"\n input_spec = HIPCombineChannelsInputSpec\n output_spec = HIPCombineChannelsOutputSpec\n\n def _run_interface(self, runtime):\n return runtime\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n mag_fname = outputs['magnitude'] = self._gen_filename('magnitude')\n phase_fname = outputs['phase'] = self._gen_filename('phase')\n q_fname = outputs['q'] = self._gen_filename('q')\n mag_paths = defaultdict(dict)\n phase_paths = defaultdict(dict)\n # Compile regular expression for extracting channel, echo and\n # complex axis indices from input file names\n fname_re = re.compile(self.inputs.in_fname_re)\n for dpath, dct in ((self.inputs.magnitudes_dir, mag_paths),\n (self.inputs.phases_dir, phase_paths)):\n for fname in os.listdir(dpath):\n match = fname_re.match(fname)\n if match is None:\n logger.warning(\"Skipping '{}' file in '{}' as it doesn't \"\n \"match expected filename pattern for raw \"\n \"channel files ('{}')\"\n .format(fname, dpath,\n self.inputs.in_fname_re))\n continue\n dct[match.group('channel')][match.group('echo')] = op.join(dpath,\n fname)\n if len(mag_paths) != len(phase_paths):\n raise BananaUsageError(\n \"Mismatching number of channels between magnitude and phase \"\n \"channels\")\n hip = None\n for chann_i in mag_paths:\n if len(mag_paths[chann_i]) != 2:\n raise BananaUsageError(\n \"Expected exactly two echos for channel magnitude {}, \"\n \"found {}\".format(chann_i, len(mag_paths[chann_i])))\n if len(phase_paths[chann_i]) != 2:\n raise BananaUsageError(\n \"Expected exactly two echos for channel magnitude {}, \"\n \"found {}\".format(chann_i, len(phase_paths[chann_i])))\n mag1 = nib.load(mag_paths[chann_i][0])\n phase1 = nib.load(phase_paths[chann_i][0])\n mag2 = nib.load(mag_paths[chann_i][1])\n phase2 = nib.load(phase_paths[chann_i][1])\n\n # Get array data\n mag1_array = mag1.get_fdata()\n phase1_array = phase1.get_fdata()\n mag2_array = mag2.get_fdata()\n phase2_array = phase2.get_fdata()\n\n if hip is None:\n hip = np.zeros(mag1_array.shape)\n sum_mag = np.zeros(mag1_array.shape)\n hip += mag1_array * mag2_array * np.exp(\n -1j * (phase1_array - phase2_array))\n sum_mag += mag1_array * mag2_array\n # Get magnitude and phase\n phase = np.angle(hip)\n mag = np.abs(hip)\n q = mag / sum_mag\n # Create NIfTI images\n phase_img = nib.Nifti1Image(phase, phase1.affine, phase1.header)\n mag_img = nib.Nifti1Image(mag, mag1.affine, mag1.header)\n q_img = nib.Nifti1Image(q, mag1.affine, mag1.header)\n # Save NIfTIs\n nib.save(phase_img, phase_fname)\n nib.save(mag_img, mag_fname)\n nib.save(q_img, q_fname)\n return outputs\n\n def _gen_filename(self, name):\n if name == 'magnitude':\n fname = op.abspath(self.inputs.magnitude\n if isdefined(self.inputs.magnitude)\n else 'magnitude')\n elif name == 'phase':\n fname = op.abspath(self.inputs.phase\n if isdefined(self.inputs.phase) else 'phase')\n elif name == 'q':\n fname = op.abspath(self.inputs.q if isdefined(self.inputs.q)\n else 'q')\n else:\n assert False\n return fname\n" ]
[ [ "numpy.isnan", "numpy.angle", "numpy.random.rand", "numpy.zeros", "numpy.exp", "numpy.abs" ] ]
Chick-star/sagemaker-xgboost-container
[ "e06e278b3a34515f79fa73ab770b574b9aafe5f0" ]
[ "test/unit/algorithm_mode/test_custom_metrics.py" ]
[ "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the 'license' file accompanying this file. This file is\n# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport numpy as np\nimport xgboost as xgb\n\nfrom sagemaker_xgboost_container.metrics.custom_metrics import accuracy, f1, mse\n\n\nbinary_train_data = np.random.rand(10, 2)\nbinary_train_label = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\nbinary_dtrain = xgb.DMatrix(binary_train_data, label=binary_train_label)\nbinary_preds = np.ones(10)\n\n\ndef test_binary_accuracy():\n accuracy_name, accuracy_result = accuracy(binary_preds, binary_dtrain)\n assert accuracy_name == 'accuracy'\n assert accuracy_result == .5\n\n\ndef test_binary_f1():\n f1_score_name, f1_score_result = f1(binary_preds, binary_dtrain)\n assert f1_score_name == 'f1'\n assert f1_score_result == 1/3\n\n\ndef test_mse():\n mse_score_name, mse_score_result = mse(binary_preds, binary_dtrain)\n assert mse_score_name == 'mse'\n assert mse_score_result == .5\n\n\nmulticlass_train_data = np.random.rand(10, 2)\nmulticlass_train_label = np.array([0, 0, 1, 1, 1, 1, 1, 2, 2, 2])\nmulticlass_dtrain = xgb.DMatrix(multiclass_train_data, label=multiclass_train_label)\nmulticlass_preds = np.ones(10)\n\n\ndef test_multiclass_accuracy():\n accuracy_name, accuracy_result = accuracy(multiclass_preds, multiclass_dtrain)\n assert accuracy_name == 'accuracy'\n assert accuracy_result == .5\n\n\ndef test_multiclass_f1():\n f1_score_name, f1_score_result = f1(multiclass_preds, multiclass_dtrain)\n assert f1_score_name == 'f1'\n assert f1_score_result == 2/9\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.random.rand" ] ]
MECLabTUDA/QA_Seg
[ "ef1b029e78a6fc5db07d127df375fef3b2c5b7a2", "72a961e081ac814243ae65b46e0276079af5680f" ]
[ "mp/utils/pytorch/compute_normalization_values.py", "mp/utils/preprocess_utility_functions.py" ]
[ "# ------------------------------------------------------------------------------\n# Torchvision requires the mean and standard deviation to be calculated manually \n# for normalization. This method can be used for that. However, this is mainly \n# for colored 2D images and therefore rarely relevant for medical data.\n# ------------------------------------------------------------------------------\n\nimport torch\n\ndef normalization_values(dataset):\n r\"\"\"Compute normalization values for a dataset.\"\"\"\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False)\n count = 0\n mean = torch.empty(3)\n std = torch.empty(3)\n\n for data, _ in dataloader:\n b, c, h, w = data.shape\n nb_pixels = b * h * w\n sum_ = torch.sum(data, dim=[0, 2, 3])\n sum_of_square = torch.sum(data ** 2, dim=[0, 2, 3])\n mean = (count * mean + sum_) / (count + nb_pixels)\n std = (count * std + sum_of_square) / (count + nb_pixels)\n count += nb_pixels\n\n return {'mean': mean, 'std': torch.sqrt(std - mean ** 2)}", "import shutil\r\nimport time\r\nimport os \r\nimport SimpleITK as sitk \r\nimport numpy as np \r\nimport torch\r\nimport json \r\nfrom mp.utils.feature_extractor import Feature_extractor\r\nfrom mp.utils.Iterators import Dataset_Iterator\r\nfrom mp.eval.metrics.simple_scores import dice_score\r\nfrom mp.data.pytorch.transformation import resize_3d\r\nfrom mp.utils.lung_captured import _extract_lung_segmentation\r\nimport multiprocessing as mup\r\n\r\ndef basic_preprocessing(label=1):\r\n '''does the basic preprocessing steps of copying the data into the right directory,\r\n resizing the images, masking the label and computing a segmentation of the lung '''\r\n copy_data_into_preprocess_dir()\r\n bring_all_data_into_right_size()\r\n mask_out_labels_all_seg(label=label)\r\n compute_lung_segmentations()\r\n # scale_all_images() # in the paper the images are NOT scaled\r\n \r\n# COPYING DATA\r\ndef copy_data_into_preprocess_dir():\r\n r\"\"\"Copies the data into the preprocess dir in order to iterate over it\r\n \"\"\"\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'inference':\r\n #get the paths \r\n input_path = os.path.join(os.environ[\"WORKFLOW_DIR\"],os.environ[\"OPERATOR_IN_DIR\"])\r\n output_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR\"])\r\n if not os.path.isdir(output_path):\r\n os.makedirs(output_path)\r\n _delete_images_and_labels(output_path)\r\n\r\n #copy the images and segmentations into right format\r\n ds_iterator = Dataset_Iterator(input_path, mode='JIP')\r\n ds_iterator.iterate_images(copy_img_seg,preprocess_mode=True)\r\n\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'train':\r\n # get the paths \r\n\r\n # for NEW DATA FORMAT on JIP platform\r\n input_path = os.environ[\"TRAIN_WORKFLOW_DIR\"]\r\n output_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"])\r\n if not os.path.isdir(output_path):\r\n os.makedirs(output_path)\r\n _delete_images_and_labels(output_path)\r\n\r\n #copy the images and segmentations into right format\r\n for id in os.listdir(input_path):\r\n start_time = time.time()\r\n id_path = os.path.join(input_path,id)\r\n img_path = os.path.join(id_path,'img','img.nii.gz')\r\n seg_path = os.path.join(id_path,'seg','001.nii.gz')\r\n copy_img_seg(img_path,seg_path,id)\r\n for pred in os.listdir(os.path.join(id_path,'pred')):\r\n new_pred_path = os.path.join(output_path,id,'pred',pred)\r\n if not os.path.isdir(new_pred_path):\r\n os.makedirs(new_pred_path)\r\n shutil.copyfile(os.path.join(id_path,'pred',pred,'pred.nii.gz'),os.path.join(new_pred_path,'pred.nii.gz'))\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('Copying on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\n # # For OLD DATA format OLD TRAIN PROCEDURE \r\n # output_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"])\r\n # gt_data = os.path.join(os.environ[\"TRAIN_WORKFLOW_DIR\"],os.environ[\"TRAIN_WORKFLOW_DIR_GT\"])\r\n # _delete_images_and_labels(output_path)\r\n\r\n # #copy the images and segmentations into right format\r\n # for task in os.listdir(gt_data):\r\n # ids = [id.split('_')[0] for id in os.listdir(os.path.join(gt_data,task,'imagesTr'))]\r\n # for id in ids:\r\n # start_time = time.time()\r\n # img_path = os.path.join(gt_data,task,'imagesTr',id+'_0000.'+os.environ[\"INPUT_FILE_ENDING\"])\r\n # seg_path = os.path.join(gt_data,task,'labelsTr',id+'.'+os.environ[\"INPUT_FILE_ENDING\"])\r\n # name = task + '_' + id\r\n # copy_img_seg(img_path,seg_path,name)\r\n # copy_predictions(task,id,name)\r\n # end_time = time.time()\r\n # dur = end_time-start_time\r\n # with open('logging_info_private.txt','a') as file: \r\n # file.write('Copying on {} {} took {}'.format(task,id,dur))\r\n # file.write(\"\\r\")\r\n\r\ndef copy_img_seg(img_path,seg_path,name):\r\n r\"\"\"Copies an img and a segmentation into the new folder structure\r\n\r\n Args:\r\n img_path (str): path to image \r\n seg_path (str): path to segmentation\r\n name (str): string to identify the image-seg pair \r\n \"\"\"\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'inference':\r\n save_path_i = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR\"],name,'img')\r\n save_path_s = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR\"],name,'seg')\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'train':\r\n save_path_i = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"],name,'img')\r\n save_path_s = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"],name,'seg')\r\n save_path_img = os.path.join(save_path_i,'img.nii.gz')\r\n save_path_seg = os.path.join(save_path_s,'001.nii.gz')\r\n #make directories \r\n if not os.path.isdir(save_path_i):\r\n os.makedirs(save_path_i)\r\n if not os.path.isdir(save_path_s):\r\n os.makedirs(save_path_s)\r\n \r\n shutil.copyfile(img_path,save_path_img)\r\n shutil.copyfile(seg_path,save_path_seg)\r\n\r\ndef _delete_images_and_labels(path):\r\n r\"\"\"This function deletes every nifti and json (labels) file in the path.\r\n\r\n Args:\r\n path (str): the path to go through and delete\r\n \"\"\"\r\n # Walk through path and delete all .nii files\r\n print('Walk trough directory \\'{}\\' and delete nifti files..'.format(path))\r\n for dname, dirs, files in os.walk(path):\r\n for num, fname in enumerate(files):\r\n msg = str(num + 1) + '_ of ' + str(len(files)) + '_ file(s).'\r\n print (msg, end = '\\r')\r\n # Check if file is a nifti file and delete it\r\n if '.nii' in fname or '.json' in fname:\r\n fpath = os.path.dirname(dname)\r\n shutil.rmtree(fpath)\r\n\r\ndef copy_predictions (task,id,name):\r\n r\"\"\"Copies all predictiosn in task, id into the new data format\r\n\r\n Args:\r\n task (str): The name of the data source\r\n id (str): the id of the image \r\n name (str): a combination of both from above (task_id) which is the destination of the copying process\r\n \"\"\"\r\n pred_data = os.path.join(os.environ[\"TRAIN_WORKFLOW_DIR\"],os.environ[\"TRAIN_WORKFLOW_DIR_PRED\"])\r\n #iterate over all models, that made predictions \r\n for model in os.listdir(pred_data):\r\n \r\n #look up, if there is a prediction for the img-seg pair\r\n origin_pred_path = os.path.join(pred_data,model,task,id+'.'+os.environ[\"INPUT_FILE_ENDING\"])\r\n if os.path.exists(origin_pred_path):\r\n path_to_id = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"],name)\r\n pred_path = os.path.join(path_to_id,'pred',model)\r\n\r\n #copy the prediction\r\n dst_pred_path = os.path.join(pred_path,'pred.nii.gz')\r\n if not os.path.isdir(pred_path):\r\n os.makedirs(pred_path)\r\n shutil.copyfile(origin_pred_path,dst_pred_path)\r\n\r\n## RESIZING \r\n#now resize images, pred, seg who are too big\r\ndef bring_all_data_into_right_size():\r\n r\"\"\"Goes through the working directory and resizes all images necessary \"\"\"\r\n work_path = get_workflow_dir()\r\n for id in os.listdir(work_path):\r\n start_time=time.time()\r\n id_path = os.path.join(work_path,id)\r\n img_path = os.path.join(id_path,'img','img.nii.gz')\r\n img = sitk.GetArrayFromImage(sitk.ReadImage(img_path))\r\n if check_needs_downsize(img):\r\n downsize_img_seg_pred(id_path,img) \r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('potential resizing on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\ndef check_needs_downsize(img):\r\n '''checks whether an image is too big and needs resizing \r\n Args: \r\n img(nd.array): the image\r\n \r\n Returns(bool): whether the image is too big\r\n '''\r\n shape = np.shape(img)\r\n if shape[0] > 50 or shape[1] > 512 or shape[2] > 512:\r\n return True \r\n else:\r\n return False \r\n\r\ndef downsize_img_seg_pred(id_path,img):\r\n '''if the image is too large in dimensions, the image is downsized\r\n\r\n Args: \r\n id_path (str): path to the directory where img,seg,pred are\r\n size (tuple): the size we want to resize to\r\n\r\n Returns 2(nd.array): img,seg resized'''\r\n #get the right shape \r\n shape = np.shape(img)\r\n new_shape_d = min(50,shape[0])\r\n new_shape_wh = min(512,shape[1])\r\n size = (1,new_shape_d,new_shape_wh,new_shape_wh)\r\n\r\n #downsize the image\r\n img_path = os.path.join(id_path,'img','img.nii.gz')\r\n img = torch.from_numpy(img)\r\n img.unsqueeze_(0)\r\n img = resize_3d(img,size,label=False)\r\n img = torch.squeeze(img)\r\n img = img.numpy()\r\n img = sitk.GetImageFromArray(img)\r\n sitk.WriteImage(img,img_path)\r\n\r\n #downsize the segmentation \r\n seg_path = os.path.join(id_path,'seg','001.nii.gz')\r\n seg = sitk.GetArrayFromImage(sitk.ReadImage(seg_path))\r\n seg = torch.from_numpy(seg)\r\n seg.unsqueeze_(0)\r\n seg = seg.type(torch.float64)\r\n seg = resize_3d(seg,size,label=True)\r\n seg = torch.squeeze(seg)\r\n seg = seg.numpy()\r\n seg = sitk.GetImageFromArray(seg)\r\n sitk.WriteImage(seg,seg_path)\r\n \r\n #downsize possible predictions, its l8, bad code\r\n all_pred_path = os.path.join(id_path,'pred')\r\n if os.path.exists(all_pred_path):\r\n for model in os.listdir(all_pred_path):\r\n pred_path = os.path.join(id_path,'pred',model,'pred.nii.gz')\r\n pred = sitk.GetArrayFromImage(sitk.ReadImage(pred_path))\r\n pred = torch.from_numpy(pred)\r\n pred.unsqueeze_(0)\r\n pred = pred.type(torch.float64)\r\n pred = resize_3d(pred,size,label=True)\r\n pred = torch.squeeze(pred)\r\n pred = pred.numpy()\r\n pred = sitk.GetImageFromArray(pred)\r\n sitk.WriteImage(pred,pred_path) \r\n\r\n## MASKING \r\n# mask out every label except for 0 (background) and the given label for all segmentations\r\ndef mask_out_labels_all_seg(label):\r\n \"\"\"Goes through work path and masks out all labels except the one given. \r\n All labels are mapped to 0, except label, which is mapped to 1 \r\n\r\n Args:\r\n arg_1 (type): The label to map to 1 instead of 0\r\n \"\"\"\r\n work_path = get_workflow_dir()\r\n for id in os.listdir(work_path):\r\n start_time = time.time()\r\n seg_path = os.path.join(work_path,id,'seg','001.nii.gz')\r\n mask_out_label(seg_path,label)\r\n # now do the same for predictions\r\n all_pred_path = os.path.join(work_path,id,'pred')\r\n if os.path.exists(all_pred_path):\r\n for model in os.listdir(all_pred_path):\r\n pred_path = os.path.join(all_pred_path,model,'pred.nii.gz')\r\n mask_out_label(pred_path, label)\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('Masking labels on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\ndef mask_out_label(seg_path,label):\r\n \"\"\"Masks out all labels excpet the one we want to use, which is converted to 1\r\n\r\n Args:\r\n seg_path (str): path to segmentation\r\n label (int): label to use\r\n \"\"\"\r\n seg = sitk.GetArrayFromImage(sitk.ReadImage(seg_path))\r\n seg = np.ma.masked_not_equal(seg,label)\r\n seg = np.ma.filled(seg,0)\r\n seg = np.ma.masked_not_equal(seg,0)\r\n seg = np.ma.filled(seg,1)\r\n sitk.WriteImage(sitk.GetImageFromArray(seg),seg_path)\r\n\r\n##COMPUTING LUNG SEGMENTATIONS\r\ndef compute_lung_segmentations():\r\n r\"\"\"Goes through work dir and computes the segmentations for all lung volumes.\r\n Saves them in the respectie folders\r\n \"\"\"\r\n work_path = get_workflow_dir()\r\n for id in os.listdir(work_path):\r\n start_time = time.time()\r\n compute_lung_segmentation(work_path,id)\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('lung segmentation on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\ndef compute_lung_segmentation(work_path,id):\r\n r\"\"\"Utility function for compute lung segmentations. It computes the segmentations of the \r\n lung volumes given an imahe.\r\n\r\n Args:\r\n work_path (str): the working dir \r\n id (str): the id identifying the image \r\n \"\"\"\r\n img_path = os.path.join(work_path,id,'img','img.nii.gz')\r\n lung_seg_path = os.path.join(work_path,id,'lung_seg')\r\n lung_seg_save_path = os.path.join(lung_seg_path,'lung_seg.nii.gz')\r\n if not os.path.exists(lung_seg_path):\r\n os.makedirs(lung_seg_path)\r\n if torch.cuda.is_available():\r\n segmentation = _extract_lung_segmentation(img_path, gpu=True, cuda=os.environ[\"CUDA_FOR_LUNG_SEG\"])\r\n else:\r\n segmentation = _extract_lung_segmentation(img_path, gpu=False, cuda='cpu')\r\n segmentation[segmentation==2] = 1\r\n segmentation = sitk.GetImageFromArray(segmentation)\r\n sitk.WriteImage(segmentation,lung_seg_save_path)\r\n \r\n#FEATURE EXTRACTION\r\ndef extract_features_all_data():\r\n r\"\"\"Goes through the working dir and computes all features for \r\n every image. The features are saved as dictionaries in the respective folders\r\n \"\"\"\r\n work_path = get_workflow_dir()\r\n feat_extr = Feature_extractor()\r\n for id in os.listdir(work_path):\r\n start_time = time.time()\r\n feat_extr.compute_features_id(id)\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('Feat_extr on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\n# TRAINING OF MODELS \r\n# !!!only for train time!!! compute the dice scores between the predictions and the ground truths\r\ndef compute_all_prediction_dice_scores():\r\n r\"\"\"Computes the dice score for all predictions. This is used to train the dice predictor.\r\n Saves the computed dice scores in the respective folders\r\n \"\"\"\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'train':\r\n work_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"])\r\n else:\r\n print('this can only be done during train time')\r\n RuntimeError\r\n for id in os.listdir(work_path):\r\n start_time = time.time()\r\n id_path = os.path.join(work_path,id)\r\n compute_prediction_dice_scores_for_id(id_path)\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('dice scores on predictions on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\ndef compute_prediction_dice_scores_for_id(id_path): \r\n r\"\"\"Recieves the path to an image and computes the dice score for every prediction on it.\r\n Then saves the computed dice scores \r\n\r\n Args:\r\n id_path (str): the path to the folder containing everything related to this id\r\n \"\"\"\r\n seg_path = os.path.join(id_path,'seg','001.nii.gz')\r\n all_pred_path = os.path.join(id_path,'pred')\r\n if os.path.exists(all_pred_path):\r\n seg = sitk.GetArrayFromImage(sitk.ReadImage(seg_path))\r\n for model in os.listdir(all_pred_path):\r\n pred_path = os.path.join(id_path,'pred',model,'pred.nii.gz')\r\n dice_score_save_path = os.path.join(id_path,'pred',model,'dice_score.json')\r\n prediction = sitk.GetArrayFromImage(sitk.ReadImage(pred_path))\r\n dice = dice_score(seg,prediction)\r\n with open(dice_score_save_path,'w') as file:\r\n json.dump(dice,file)\r\n\r\n#UTILITY\r\ndef get_workflow_dir():\r\n r\"\"\"depending on os.environ[\"INFERENCE_OR_TRAIN\"] return the work dir\r\n\r\n Returns (str): the path to the work dir \r\n \"\"\"\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'inference':\r\n work_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR\"])\r\n if os.environ[\"INFERENCE_OR_TRAIN\"] == 'train':\r\n work_path = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"])\r\n return work_path\r\n\r\n\r\n\r\n#then scale all images\r\ndef scale_all_images():\r\n '''now we can assume, all data is in the same JIP data - format'''\r\n #set the right directory\r\n work_path = get_workflow_dir()\r\n for id in os.listdir(work_path):\r\n start_time = time.time()\r\n img_path = os.path.join(work_path,id,'img','img.nii.gz')\r\n scale_image(img_path)\r\n end_time = time.time()\r\n dur = end_time-start_time\r\n with open('logging_info_private.txt','a') as file: \r\n file.write('Scaling on {} took {}'.format(id,dur))\r\n file.write(\"\\r\")\r\n\r\ndef scale_image(img_path,d_type=np.float32):\r\n ''' takes a path to an image, computes the a version with the values scaled to [0,1] \r\n and saves it in the same path \r\n Args:\r\n img_path(str): the path to the image\r\n d_type (np.datatype): a datatype the image shall have as output\r\n '''\r\n img = sitk.ReadImage(img_path)\r\n img = sitk.GetArrayFromImage(img)\r\n\r\n max_val = np.max(img)\r\n min_val = np.min(img)\r\n span = max_val - min_val\r\n if span == 0:\r\n print('The image has only one intensity value and thus cannot be rescaled')\r\n return RuntimeError\r\n \r\n shape = np.shape(img)\r\n\r\n add_array = np.ones(shape)*min_val\r\n img = img - add_array\r\n img = img * 1/span \r\n img = np.around(img,decimals=4)\r\n if d_type:\r\n img = np.array(img,dtype=d_type)\r\n \r\n img = sitk.GetImageFromArray(img)\r\n sitk.WriteImage(img,img_path)\r\n\r\n\r\n# Below unused functions in normal case \r\n\r\ndef copy_useable_predictions(task,id,name):\r\n # DEPRECATED\r\n global nr_train\r\n global nr_test\r\n nr_pred = 0\r\n pred_data = os.path.join(os.environ[\"TRAIN_WORKFLOW_DIR\"],os.environ[\"TRAIN_WORKFLOW_DIR_PRED\"])\r\n #iterate over all models, that made predictions \r\n for model in os.listdir(pred_data):\r\n \r\n #look up, if there is a prediction for the img-seg pair\r\n origin_pred_path = os.path.join(pred_data,model,task,id+'.'+os.environ[\"INPUT_FILE_ENDING\"])\r\n if os.path.exists(origin_pred_path) and is_useable_prediction(task,model):\r\n path_to_id = os.path.join(os.environ[\"PREPROCESSED_WORKFLOW_DIR\"],os.environ[\"PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN\"],name)\r\n pred_path = os.path.join(path_to_id,'pred','pred_{}'.format(nr_pred))\r\n\r\n #copy the prediction\r\n dst_pred_path = os.path.join(pred_path,'pred_'+str(nr_pred)+'.nii.gz')\r\n if not os.path.isdir(pred_path):\r\n os.makedirs(pred_path)\r\n shutil.copyfile(origin_pred_path,dst_pred_path)\r\n\r\n if get_task(task)==7:\r\n nr_train += 1\r\n if get_task(task) in [0,1,2,5]:\r\n nr_test += 1 \r\ndef get_task(task):\r\n #returns the first digit of the task number\r\n return int(task.split('_')[0][4])\r\n\r\n# returns whether there exists a prediction for that specific task\r\ndef useable_prediction_exists(task,id): \r\n pred_data = os.path.join(os.environ[\"TRAIN_WORKFLOW_DIR\"],os.environ[\"TRAIN_WORKFLOW_DIR_PRED\"])\r\n for model in os.listdir(pred_data):\r\n #look up, if there is a prediction for the img-seg pair\r\n origin_pred_path = os.path.join(pred_data,model,task,id+'.'+os.environ[\"INPUT_FILE_ENDING\"])\r\n if get_task(task) == 7: \r\n if get_task(task) == get_task(model) and os.path.exists(origin_pred_path):\r\n return True\r\n if get_task(task) in [0,1,2,5] and get_task(model) in [0,1,2,5]:\r\n if os.path.exists(origin_pred_path):\r\n return True\r\n return False\r\n \r\ndef is_useable_prediction(task,model):\r\n if get_task(task) == 7: \r\n return get_task(task) == get_task(model)\r\n if get_task(task) in [0,1,2,5]:\r\n return get_task(model) in [0,1,2,5]" ]
[ [ "torch.empty", "torch.utils.data.DataLoader", "torch.sqrt", "torch.sum" ], [ "numpy.max", "numpy.array", "numpy.ma.masked_not_equal", "numpy.ones", "numpy.min", "numpy.shape", "torch.from_numpy", "torch.squeeze", "numpy.ma.filled", "torch.cuda.is_available", "numpy.around" ] ]
khakhulin/Text2Img
[ "acb002904122e1f2c0abed3fff69daccfff88c12" ]
[ "modules/self_attention.py" ]
[ "# https://github.com/voletiv/self-attention-GAN-pytorch/blob/master/sagan_models.py\n\nimport torch\nimport torch.nn as nn\n\nfrom torch.nn.utils import spectral_norm\nfrom torch.nn.init import xavier_uniform_\n\n\ndef init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n xavier_uniform_(m.weight)\n m.bias.data.fill_(0.)\n\n\ndef snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))\n\n\ndef snlinear(in_features, out_features):\n return spectral_norm(nn.Linear(in_features=in_features, out_features=out_features))\n\n\ndef sn_embedding(num_embeddings, embedding_dim):\n return spectral_norm(nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim))\n\n\nclass SelfAttn(nn.Module):\n \"\"\"\n Self attention Layer\n https://github.com/voletiv/self-attention-GAN-pytorch/blob/master/sagan_models.py#L32\n \"\"\"\n\n def __init__(self, in_channels):\n super(SelfAttn, self).__init__()\n self.in_channels = in_channels\n self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)\n self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)\n self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)\n self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)\n self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)\n self.softmax = nn.Softmax(dim=-1)\n self.sigma = nn.Parameter(torch.zeros(1))\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps(B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n \"\"\"\n _, ch, h, w = x.size()\n # Theta path\n theta = self.snconv1x1_theta(x)\n theta = theta.view(-1, ch//8, h*w)\n # Phi path\n phi = self.snconv1x1_phi(x)\n phi = self.maxpool(phi)\n phi = phi.view(-1, ch//8, h*w//4)\n # Attn map\n attn = torch.bmm(theta.permute(0, 2, 1), phi)\n attn = self.softmax(attn)\n # g path\n g = self.snconv1x1_g(x)\n g = self.maxpool(g)\n g = g.view(-1, ch//2, h*w//4)\n # Attn_g\n attn_g = torch.bmm(g, attn.permute(0, 2, 1))\n attn_g = attn_g.view(-1, ch//2, h, w)\n attn_g = self.snconv1x1_attn(attn_g)\n # Out\n out = x + self.sigma*attn_g\n return out\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.Softmax", "torch.nn.MaxPool2d", "torch.nn.init.xavier_uniform_", "torch.nn.Conv2d", "torch.nn.Embedding" ] ]
jmuhlich/gr_metrics
[ "fb6175b08d036e83aa97a1fdc435250a1b6029a2" ]
[ "SRC/python/examples/plot_toy_example.py" ]
[ "import csv\nimport numpy as np\nimport pandas as pd\nimport os.path as path\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport gr50\n\n# Read the data file.\nbase_path = path.join(path.dirname(path.abspath(__file__)), '..', '..', '..')\ninput_path = path.join(base_path, 'OUTPUT', 'toy_example_output.tsv')\ndf = pd.read_csv(input_path, delimiter='\\t')\n\n# Filter down to only a manageable subset of the experiments.\nfilters = (('time', 72), ('perturbation', 0), ('replicate', 1))\nfor column, value in filters:\n df = df[df[column] == value]\n del df[column]\n\n# Compute the GR metrics from the data.\ngr_metrics = gr50.gr_metrics(df)\n\n# Produce a trellis plot showing the fitted curves and some of the metrics\n# across the different cell lines and drugs.\nsns.set(style=\"ticks\")\ngrid = sns.FacetGrid(df, row=\"cell_line\", col=\"agent\", margin_titles=True)\ngrid.set(xscale=\"log\")\ngrid.map(plt.plot, \"concentration\", \"GRvalue\", lw=0, marker='o', ms=4)\nx_min = df.concentration.min() / 10\nx_max = df.concentration.max() * 10\nfit_x = np.logspace(np.log10(x_min), np.log10(x_max))\nfor cell_line, row_axes in zip(grid.row_names, grid.axes):\n for agent, ax in zip(grid.col_names, row_axes):\n for m in gr_metrics[(gr_metrics.agent == agent) &\n (gr_metrics.cell_line == cell_line)].itertuples():\n fit_y = gr50.logistic(fit_x, [m.GRinf, np.log10(m.GEC50), m.h_GR])\n ax.hlines(0, x_min, x_max, '#707070', lw=0.5)\n ax.hlines(m.GRinf, x_min, x_max, '#ff00ff', linestyles='dashed',\n lw=0.5)\n ax.vlines(m.GEC50, -1, 1, 'b', linestyles='dashed', lw=0.5)\n ax.vlines(m.GR50, -1, 1, 'g', linestyles='dashed', lw=0.5)\n ax.plot(fit_x, fit_y, 'r', lw=1)\ngrid.set(ylim=(-1, 1.1))\ngrid.fig.tight_layout(w_pad=1)\nplt.show()\n" ]
[ [ "numpy.log10", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
rdn86/tensorflow
[ "adefac94c838c2c353ca62f8cc23dbb39a8cf6cd" ]
[ "tensorflow/contrib/gan/python/estimator/__init__.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TF-GAN estimator module.\n\nGANEstimator provides all the infrastructure support of a TensorFlow Estimator\nwith the feature support of TF-GAN.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Collapse `estimator` into a single namespace.\n# pylint: disable=unused-import,wildcard-import\nfrom tensorflow.contrib.gan.python.estimator.python import gan_estimator\nfrom tensorflow.contrib.gan.python.estimator.python import head\nfrom tensorflow.contrib.gan.python.estimator.python import stargan_estimator\nfrom tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator\n\nfrom tensorflow.contrib.gan.python.estimator.python.gan_estimator import *\nfrom tensorflow.contrib.gan.python.estimator.python.head import *\nfrom tensorflow.contrib.gan.python.estimator.python.stargan_estimator import *\nfrom tensorflow.contrib.gan.python.estimator.python.tpu_gan_estimator import *\n# pylint: enable=unused-import,wildcard-import\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = ([\n 'gan_estimator',\n 'stargan_estimator',\n 'tpu_gan_estimator',\n 'head',\n] + gan_estimator.__all__ + stargan_estimator.__all__ + head.__all__ +\n tpu_gan_estimator.__all__)\nremove_undocumented(__name__, _allowed_symbols)\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ] ]
cutz-j/ACCV
[ "3d20bccef0de85e667c5b42d4837e2fe3373e27b" ]
[ "critic/resnet_generator.py" ]
[ "\"\"\"\nStarGAN v2\nCopyright (c) 2020-present NAVER Corp.\n\nThis work is licensed under the Creative Commons Attribution-NonCommercial\n4.0 International License. To view a copy of this license, visit\nhttp://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\nCreative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\"\"\"\n\nimport copy\nimport math\n\nfrom munch import Munch\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ResBlk(nn.Module):\n def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),\n normalize=False, downsample=False):\n super().__init__()\n self.actv = actv\n self.normalize = normalize\n self.downsample = downsample\n self.learned_sc = dim_in != dim_out\n self._build_weights(dim_in, dim_out)\n\n def _build_weights(self, dim_in, dim_out):\n self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)\n self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)\n if self.normalize:\n self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)\n self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)\n if self.learned_sc:\n self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)\n\n def _shortcut(self, x):\n if self.learned_sc:\n x = self.conv1x1(x)\n if self.downsample:\n x = F.avg_pool2d(x, 2)\n return x\n\n def _residual(self, x):\n if self.normalize:\n x = self.norm1(x)\n x = self.actv(x)\n x = self.conv1(x)\n if self.downsample:\n x = F.avg_pool2d(x, 2)\n if self.normalize:\n x = self.norm2(x)\n x = self.actv(x)\n x = self.conv2(x)\n return x\n\n def forward(self, x):\n x = self._shortcut(x) + self._residual(x)\n return x / math.sqrt(2) # unit variance\n\n\nclass AdaIN(nn.Module):\n def __init__(self, style_dim, num_features):\n super().__init__()\n self.norm = nn.InstanceNorm2d(num_features, affine=False)\n self.fc = nn.Linear(style_dim, num_features*2)\n\n def forward(self, x, s):\n h = self.fc(s)\n h = h.view(h.size(0), h.size(1), 1, 1)\n gamma, beta = torch.chunk(h, chunks=2, dim=1)\n return (1 + gamma) * self.norm(x) + beta\n\n### Motion Encoder로 주입할 때 사용할 것 ##\nclass AdainResBlk(nn.Module):\n def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0,\n actv=nn.LeakyReLU(0.2), upsample=False):\n super().__init__()\n self.w_hpf = w_hpf\n self.actv = actv\n self.upsample = upsample\n self.learned_sc = dim_in != dim_out\n self._build_weights(dim_in, dim_out, style_dim)\n\n def _build_weights(self, dim_in, dim_out, style_dim=64):\n self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)\n self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)\n self.norm1 = nn.InstanceNorm2d(dim_in, affine=False)\n self.norm2 = nn.InstanceNorm2d(dim_out, affine=False)\n if self.learned_sc:\n self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)\n\n def _shortcut(self, x):\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n if self.learned_sc:\n x = self.conv1x1(x)\n return x\n\n def _residual(self, x):\n x = self.norm1(x)\n x = self.actv(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.conv1(x)\n x = self.norm2(x)\n x = self.actv(x)\n x = self.conv2(x)\n return x\n\n def forward(self, x):\n out = self._residual(x)\n if self.w_hpf == 0:\n out = (out + self._shortcut(x)) / math.sqrt(2)\n return out\n\n\nclass Generator(nn.Module):\n # based on resnet\n def __init__(self, args, img_size=600, max_conv_dim=512):\n super().__init__()\n dim_in = 2**14 // img_size # ?\n self.img_size = img_size\n self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)\n self.encode = nn.ModuleList()\n self.decode = nn.ModuleList()\n self.to_rgb = nn.Sequential(\n nn.InstanceNorm2d(dim_in, affine=True),\n nn.LeakyReLU(0.2),\n nn.Conv2d(dim_in, 3, 1, 1, 0),\n nn.Tanh(),)\n\n # down/up-sampling blocks\n repeat_num = 3\n for _ in range(repeat_num):\n dim_out = min(dim_in*2, max_conv_dim)\n self.encode.append(ResBlk(dim_in, dim_out, normalize=True, downsample=True))\n self.decode.insert(0, AdainResBlk(dim_out, dim_in, upsample=True))\n\n dim_in = dim_out\n\n # bottleneck blocks\n for _ in range(2):\n self.encode.append(ResBlk(dim_out, dim_out, normalize=True))\n self.decode.insert(0, AdainResBlk(dim_out, dim_out))\n\n def forward(self, x):\n x = self.from_rgb(x)\n for block in self.encode:\n x = block(x)\n for block in self.decode:\n x = block(x)\n return self.to_rgb(x)" ]
[ [ "torch.nn.Linear", "torch.nn.functional.avg_pool2d", "torch.nn.ModuleList", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.functional.interpolate", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.chunk" ] ]
cristipp/decaNLP
[ "d776d9f600c127a0a3e28c85960cf0ea615e8f63" ]
[ "models/self_attentive_pointer_generator.py" ]
[ "import os\nimport math\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\n\nfrom .common import positional_encodings_like, INF, EPSILON, TransformerEncoder, TransformerDecoder, PackedLSTM, LSTMDecoderAttention, LSTMDecoder, Embedding, Feedforward, mask\n\n\nclass SelfAttentivePointerGenerator(nn.Module):\n \n def __init__(self, field, args):\n super().__init__()\n self.field = field\n self.args = args\n self.pad_idx = self.field.vocab.stoi[self.field.pad_token]\n\n self.encoder_embeddings = Embedding(field, args.dimension, \n dropout=args.dropout_ratio)\n self.decoder_embeddings = Embedding(field, args.dimension, \n dropout=args.dropout_ratio)\n\n \n self.bilstm_before_coattention = PackedLSTM(args.dimension, args.dimension,\n batch_first=True, dropout=args.dropout_ratio, bidirectional=True, num_layers=1)\n dim = args.dimension + args.dimension\n\n self.context_bilstm_after_coattention = PackedLSTM(dim, args.dimension,\n batch_first=True, dropout=args.dropout_ratio, bidirectional=True, \n num_layers=args.rnn_layers)\n self.self_attentive_encoder_context = TransformerEncoder(args.dimension, args.transformer_heads, args.transformer_hidden, args.transformer_layers, args.dropout_ratio)\n self.bilstm_context = PackedLSTM(args.dimension, args.dimension,\n batch_first=True, dropout=args.dropout_ratio, bidirectional=True, \n num_layers=args.rnn_layers)\n\n self.self_attentive_decoder = TransformerDecoder(args.dimension, args.transformer_heads, args.transformer_hidden, args.transformer_layers, args.dropout_ratio)\n self.dual_ptr_rnn_decoder = DualPtrRNNDecoder(args.dimension, args.dimension,\n dropout=args.dropout_ratio, num_layers=args.rnn_layers)\n\n self.generative_vocab_size = min(len(field.vocab), args.max_generative_vocab)\n self.out = nn.Linear(args.dimension, self.generative_vocab_size)\n\n self.dropout = nn.Dropout(0.4)\n\n def set_embeddings(self, embeddings):\n self.encoder_embeddings.set_embeddings(embeddings)\n self.decoder_embeddings.set_embeddings(embeddings)\n\n\n def forward(self, batch):\n context, context_lengths, context_limited = batch.context_question, batch.context_question_lengths, batch.context_question_limited\n answer, answer_lengths, answer_limited = batch.answer, batch.answer_lengths, batch.answer_limited\n oov_to_limited_idx, limited_idx_to_full_idx = batch.oov_to_limited_idx, batch.limited_idx_to_full_idx\n\n def map_to_full(x):\n return limited_idx_to_full_idx[x]\n self.map_to_full = map_to_full\n\n context_embedded = self.encoder_embeddings(context)\n\n context_encoded = self.bilstm_before_coattention(context_embedded, context_lengths)[0]\n\n context_padding = context.data == self.pad_idx\n\n context_summary = torch.cat([context_encoded, context_embedded], -1)\n condensed_context, _ = self.context_bilstm_after_coattention(context_summary, context_lengths)\n self_attended_context = self.self_attentive_encoder_context(condensed_context, padding=context_padding)\n final_context, (context_rnn_h, context_rnn_c) = self.bilstm_context(self_attended_context[-1], context_lengths)\n context_rnn_state = [self.reshape_rnn_state(x) for x in (context_rnn_h, context_rnn_c)]\n\n context_indices = context_limited if context_limited is not None else context\n answer_indices = answer_limited if answer_limited is not None else answer\n\n pad_idx = self.field.decoder_stoi[self.field.pad_token]\n context_padding = context_indices.data == pad_idx\n\n self.dual_ptr_rnn_decoder.applyMasks(context_padding)\n\n if self.training:\n answer_padding = answer_indices.data == pad_idx\n answer_embedded = self.decoder_embeddings(answer)\n self_attended_decoded = self.self_attentive_decoder(answer_embedded[:, :-1].contiguous(), self_attended_context, context_padding=context_padding, answer_padding=answer_padding[:, :-1], positional_encodings=True)\n decoder_outputs = self.dual_ptr_rnn_decoder(self_attended_decoded, \n final_context, hidden=context_rnn_state)\n rnn_output, context_attention, context_alignment, vocab_pointer_switch, rnn_state = decoder_outputs\n\n probs = self.probs(self.out, rnn_output, vocab_pointer_switch, \n context_attention, \n context_indices, \n oov_to_limited_idx)\n\n probs, targets = mask(answer_indices[:, 1:].contiguous(), probs.contiguous(), pad_idx=pad_idx)\n loss = F.nll_loss(probs.log(), targets)\n return loss, None\n else:\n return None, self.greedy(self_attended_context, final_context, \n context_indices,\n oov_to_limited_idx, rnn_state=context_rnn_state).data\n \n def reshape_rnn_state(self, h):\n return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \\\n .transpose(1, 2).contiguous() \\\n .view(h.size(0) // 2, h.size(1), h.size(2) * 2).contiguous()\n\n def probs(self, generator, outputs, vocab_pointer_switches, \n context_attention, \n context_indices, \n oov_to_limited_idx):\n\n\n size = list(outputs.size())\n\n size[-1] = self.generative_vocab_size\n scores = generator(outputs.view(-1, outputs.size(-1))).view(size)\n p_vocab = F.softmax(scores, dim=scores.dim()-1)\n scaled_p_vocab = vocab_pointer_switches.expand_as(p_vocab) * p_vocab\n\n effective_vocab_size = self.generative_vocab_size + len(oov_to_limited_idx)\n if self.generative_vocab_size < effective_vocab_size:\n size[-1] = effective_vocab_size - self.generative_vocab_size\n buff = Variable(scaled_p_vocab.data.new(*size).fill_(EPSILON))\n scaled_p_vocab = torch.cat([scaled_p_vocab, buff], dim=buff.dim()-1)\n\n p_context_ptr = Variable(scaled_p_vocab.data.new(*scaled_p_vocab.size()).fill_(EPSILON))\n p_context_ptr.scatter_add_(p_context_ptr.dim()-1, context_indices.unsqueeze(1).expand_as(context_attention), context_attention)\n scaled_p_context_ptr = (1 - vocab_pointer_switches).expand_as(p_context_ptr) * p_context_ptr\n\n probs = scaled_p_vocab + scaled_p_context_ptr\n return probs\n\n\n def greedy(self, self_attended_context, context, context_indices, oov_to_limited_idx, rnn_state=None):\n B, TC, C = context.size()\n T = self.args.max_output_length\n outs = Variable(context.data.new(B, T).long().fill_(\n self.field.decoder_stoi['<pad>']), volatile=True)\n hiddens = [Variable(self_attended_context[0].data.new(B, T, C).zero_(), volatile=True)\n for l in range(len(self.self_attentive_decoder.layers) + 1)]\n hiddens[0] = hiddens[0] + positional_encodings_like(hiddens[0])\n eos_yet = context.data.new(B).byte().zero_()\n\n rnn_output, context_alignment = None, None\n for t in range(T):\n if t == 0:\n embedding = self.decoder_embeddings(Variable(\n self_attended_context[-1].data.new(B).long().fill_(\n self.field.vocab.stoi['<init>']), volatile=True).unsqueeze(1), [1]*B)\n else:\n embedding = self.decoder_embeddings(outs[:, t - 1].unsqueeze(1), [1]*B)\n hiddens[0][:, t] = hiddens[0][:, t] + (math.sqrt(self.self_attentive_decoder.d_model) * embedding).squeeze(1)\n for l in range(len(self.self_attentive_decoder.layers)):\n hiddens[l + 1][:, t] = self.self_attentive_decoder.layers[l].feedforward(\n self.self_attentive_decoder.layers[l].attention(\n self.self_attentive_decoder.layers[l].selfattn(hiddens[l][:, t], hiddens[l][:, :t + 1], hiddens[l][:, :t + 1])\n , self_attended_context[l], self_attended_context[l]))\n decoder_outputs = self.dual_ptr_rnn_decoder(hiddens[-1][:, t].unsqueeze(1),\n context, \n context_alignment=context_alignment,\n hidden=rnn_state, output=rnn_output)\n\n rnn_output, context_attention, context_alignment, vocab_pointer_switch, rnn_state = decoder_outputs\n probs = self.probs(self.out, rnn_output, vocab_pointer_switch, \n context_attention, \n context_indices, \n oov_to_limited_idx)\n pred_probs, preds = probs.max(-1)\n eos_yet = eos_yet | (preds.data == self.field.decoder_stoi['<eos>'])\n outs[:, t] = Variable(preds.data.cpu().apply_(self.map_to_full), volatile=True)\n if eos_yet.all():\n break\n return outs\n\n\nclass CoattentiveLayer(nn.Module):\n\n def __init__(self, d, dropout=0.2):\n super().__init__()\n self.proj = Feedforward(d, d, dropout=0.0)\n self.embed_sentinel = nn.Embedding(2, d)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, context, question, context_padding, question_padding): \n context_padding = torch.cat([context.data.new(context.size(0)).long().fill_(0).unsqueeze(1).long()==1, context_padding], 1)\n question_padding = torch.cat([question.data.new(question.size(0)).long().fill_(0).unsqueeze(1)==1, question_padding], 1)\n\n context_sentinel = self.embed_sentinel(Variable(context.data.new(context.size(0)).long().fill_(0)))\n context = torch.cat([context_sentinel.unsqueeze(1), self.dropout(context)], 1) # batch_size x (context_length + 1) x features\n\n question_sentinel = self.embed_sentinel(Variable(question.data.new(question.size(0)).long().fill_(1)))\n question = torch.cat([question_sentinel.unsqueeze(1), question], 1) # batch_size x (question_length + 1) x features\n question = F.tanh(self.proj(question)) # batch_size x (question_length + 1) x features\n\n affinity = context.bmm(question.transpose(1,2)) # batch_size x (context_length + 1) x (question_length + 1)\n attn_over_context = self.normalize(affinity, context_padding) # batch_size x (context_length + 1) x 1\n attn_over_question = self.normalize(affinity.transpose(1,2), question_padding) # batch_size x (question_length + 1) x 1\n sum_of_context = self.attn(attn_over_context, context) # batch_size x (question_length + 1) x features\n sum_of_question = self.attn(attn_over_question, question) # batch_size x (context_length + 1) x features\n coattn_context = self.attn(attn_over_question, sum_of_context) # batch_size x (context_length + 1) x features\n return torch.cat([coattn_context, sum_of_question], 2)[:, 1:]\n\n @staticmethod\n def attn(weights, candidates):\n w1, w2, w3 = weights.size()\n c1, c2, c3 = candidates.size()\n return weights.unsqueeze(3).expand(w1, w2, w3, c3).mul(candidates.unsqueeze(2).expand(c1, c2, w3, c3)).sum(1).squeeze(1)\n\n @staticmethod\n def normalize(original, padding):\n raw_scores = original.clone()\n raw_scores.data.masked_fill_(padding.unsqueeze(-1).expand_as(raw_scores), -INF)\n return F.softmax(raw_scores, dim=1)\n\nclass DualPtrRNNDecoder(nn.Module):\n\n def __init__(self, d_in, d_hid, dropout=0.0, num_layers=1):\n super().__init__()\n self.d_hid = d_hid\n self.d_in = d_in\n self.num_layers = num_layers\n self.dropout = nn.Dropout(dropout)\n\n self.input_feed = True\n if self.input_feed:\n d_in += 1 * d_hid\n\n self.rnn = LSTMDecoder(self.num_layers, d_in, d_hid, dropout)\n self.context_attn = LSTMDecoderAttention(d_hid, dot=True)\n\n self.vocab_pointer_switch = nn.Sequential(Feedforward(2 * self.d_hid + d_in, 1), nn.Sigmoid())\n\n def forward(self, input, context, output=None, hidden=None, context_alignment=None):\n context_output = output.squeeze(1) if output is not None else self.make_init_output(context)\n context_alignment = context_alignment if context_alignment is not None else self.make_init_output(context)\n\n context_outputs, vocab_pointer_switches, context_attentions, context_alignments = [], [], [], []\n for emb_t in input.split(1, dim=1):\n emb_t = emb_t.squeeze(1)\n context_output = self.dropout(context_output)\n if self.input_feed:\n emb_t = torch.cat([emb_t, context_output], 1)\n dec_state, hidden = self.rnn(emb_t, hidden)\n context_output, context_attention, context_alignment = self.context_attn(dec_state, context)\n vocab_pointer_switch = self.vocab_pointer_switch(torch.cat([dec_state, context_output, emb_t], -1))\n context_output = self.dropout(context_output)\n context_outputs.append(context_output)\n vocab_pointer_switches.append(vocab_pointer_switch)\n context_attentions.append(context_attention)\n context_alignments.append(context_alignment)\n context_outputs, vocab_pointer_switches, context_attention = [self.package_outputs(x) for x in [context_outputs, vocab_pointer_switches, context_attentions]]\n return context_outputs, context_attention, context_alignment, vocab_pointer_switches, hidden\n\n\n def applyMasks(self, context_mask):\n self.context_attn.applyMasks(context_mask)\n\n def make_init_output(self, context):\n batch_size = context.size(0)\n h_size = (batch_size, self.d_hid)\n return Variable(context.data.new(*h_size).zero_(), requires_grad=False)\n\n def package_outputs(self, outputs):\n outputs = torch.stack(outputs, dim=1)\n return outputs\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.cat", "torch.stack", "torch.nn.Sigmoid", "torch.nn.functional.softmax", "torch.nn.Embedding" ] ]
nicholasturner1/lecun1989-repro
[ "6931f1737964ba9e0da11411f3c91ab7c6aa4d3d" ]
[ "scripts/modern.py" ]
[ "\"\"\"\n\nrepro.py gives:\n23\neval: split train. loss 4.073383e-03. error 0.62%. misses: 45\neval: split test . loss 2.838382e-02. error 4.09%. misses: 82\n\nwe can try to use our knowledge from 33 years later to improve on this,\nbut keeping the model size same.\n\nChange 1: replace tanh on last layer with FC and use softmax. Had to\nlower the learning rate to 0.01 as well. This improves the optimization\nquite a lot, we now crush the training set:\n23\neval: split train. loss 9.536698e-06. error 0.00%. misses: 0\neval: split test . loss 9.536698e-06. error 4.38%. misses: 87\n\nChange 2: change from SGD to AdamW with LR 3e-4 because I find this\nto be significantly more stable and requires little to no tuning. Also\ndouble epochs to 46. I decay the LR to 1e-4 over course of training.\nThese changes make it so optimization is not culprit of bad performance\nwith high probability. We also seem to improve test set a bit:\n46\neval: split train. loss 0.000000e+00. error 0.00%. misses: 0\neval: split test . loss 0.000000e+00. error 3.59%. misses: 72\n\nChange 3: since we are overfitting we can introduce data augmentation,\ne.g. let's intro a shift by at most 1 pixel in both x/y directions. Also\nbecause we are augmenting we again want to bump up training time, e.g.\nto 60 epochs:\n60\neval: split train. loss 8.780676e-04. error 1.70%. misses: 123\neval: split test . loss 8.780676e-04. error 2.19%. misses: 43\n\nChange 4: we want to add dropout at the layer with most parameters (H3),\nbut in addition we also have to shift the activation function to relu so\nthat dropout makes sense. We also bring up iterations to 80:\n80\neval: split train. loss 2.601336e-03. error 1.47%. misses: 106\neval: split test . loss 2.601336e-03. error 1.59%. misses: 32\n\nTo be continued...\n\"\"\"\n\nimport os\nimport json\nimport argparse\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter # pip install tensorboardX\n\nfrom lecun1989repro import models\n\n\ndef main(learning_rate: float, output_dir: str) -> None:\n # init rng\n torch.manual_seed(1337)\n np.random.seed(1337)\n torch.use_deterministic_algorithms(True)\n\n # set up logging\n os.makedirs(args.output_dir, exist_ok=True)\n with open(os.path.join(args.output_dir, \"args.json\"), \"w\") as f:\n json.dump(vars(args), f, indent=2)\n writer = SummaryWriter(args.output_dir)\n\n # init a model\n model = models.ModernNet()\n print(\"model stats:\")\n print(\n \"# params: \", sum(p.numel() for p in model.parameters())\n ) # in paper total is 9,760\n print(\"# MACs: \", model.macs)\n print(\"# activations: \", model.acts)\n\n # init data\n Xtr, Ytr = torch.load(\"train1989.pt\")\n Xte, Yte = torch.load(\"test1989.pt\")\n\n # init optimizer\n optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate)\n\n def eval_split(split):\n # eval the full train/test set, batched implementation for efficiency\n model.eval()\n X, Y = (Xtr, Ytr) if split == \"train\" else (Xte, Yte)\n Yhat = model(X)\n loss = F.cross_entropy(yhat, y.argmax(dim=1))\n err = torch.mean((Y.argmax(dim=1) != Yhat.argmax(dim=1)).float())\n print(\n f\"eval: split {split:5s}.\"\n f\" loss {loss.item():e}.\"\n f\" error {err.item()*100:.2f}%.\"\n f\" misses: {int(err.item()*Y.size(0))}\"\n )\n writer.add_scalar(f\"error/{split}\", err.item() * 100, pass_num)\n writer.add_scalar(f\"loss/{split}\", loss.item(), pass_num)\n\n # train\n for pass_num in range(80):\n\n # learning rate decay\n alpha = pass_num / 79\n for g in optimizer.param_groups:\n g[\"lr\"] = (1 - alpha) * args.learning_rate + alpha * (\n args.learning_rate / 3\n )\n\n # perform one epoch of training\n model.train()\n for step_num in range(Xtr.size(0)):\n\n # fetch a single example into a batch of 1\n x, y = Xtr[[step_num]], Ytr[[step_num]]\n\n # forward the model and the loss\n yhat = model(x)\n loss = F.cross_entropy(yhat, y.argmax(dim=1))\n\n # calculate the gradient and update the parameters\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n\n # after epoch epoch evaluate the train and test error / metrics\n print(pass_num + 1)\n eval_split(\"train\")\n eval_split(\"test\")\n\n # save final model to file\n torch.save(model.state_dict(), os.path.join(args.output_dir, \"model.pt\"))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Train a 2022 but mini ConvNet on digits\"\n )\n\n parser.add_argument(\n \"--learning-rate\", \"-l\", type=float, default=3e-4, help=\"Learning rate\"\n )\n parser.add_argument(\n \"--output-dir\",\n \"-o\",\n type=str,\n default=\"out/modern\",\n help=\"output directory for training logs\",\n )\n\n args = parser.parse_args()\n\n print(vars(args))\n main(**vars(args))\n" ]
[ [ "torch.manual_seed", "torch.load", "torch.use_deterministic_algorithms", "numpy.random.seed" ] ]
rassouly/exopy_hqc_legacy
[ "9c26ef65950400b20c43695b54c7dab744e3133f" ]
[ "exopy_hqc_legacy/instruments/drivers/visa/agilent_psa.py" ]
[ "# -*- coding: utf-8 -*-\n#==============================================================================\n# module : agilent_psa.py\n# author : Benjamin Huard\n# license : MIT license\n#==============================================================================\n\"\"\"\nThis module defines drivers for agilent PSA.\n\n:Contains:\n SpecDescriptor\n AgilentPSA\n\n\"\"\"\nfrom inspect import cleandoc\nimport numpy as np\nfrom ..driver_tools import (InstrIOError, secure_communication,\n instrument_property)\nfrom ..visa_tools import VisaInstrument\n\n\nDATA_FORMATTING_DICT = {'raw I/Q data': 0,\n 'descriptor': 1,\n '(I,Q) vs time': 3,\n 'log(mag) vs freq': 4,\n 'average of log(mag) vs freq': 7,\n 'mag vs freq in Vrms': 11,\n 'average of mag vs freq in Vrms': 12}\n\n\nclass SpecDescriptor():\n def __init__(self):\n self.initialized = False\n self.FFTpeak = 0\n self.FFTfreq = 0\n self.FFTnbrSteps = 2\n self.Firstfreq = 0\n self.Freqstep = 0\n self.TimenbrSteps = 2\n self.firsttime = 0\n self.TimeStep = 0.1\n self.timedomaincheck = 1\n self.totaltime = 1.0\n self.averagenbr = 1\n\n\nclass AgilentPSA(VisaInstrument):\n \"\"\"\n \"\"\"\n caching_permissions = {'start_frequency_SA': False,\n 'stop_frequency_SA': False,\n 'mode': False}\n\n def __init__(self, connection_info, caching_allowed=True,\n caching_permissions={}, auto_open=True):\n super(AgilentPSA, self).__init__(connection_info,\n caching_allowed,\n caching_permissions,\n auto_open)\n self.write(\"ROSC:SOURCE EXT\") # 10 MHz clock bandwidth external\n self.write(\"ROSC:OUTP ON\") # 10 MHz clock bandwidth internal ON\n self.write(\"FORM:DATA ASCii\") # lots of data must be read in\n # ASCii format\n self.write(\"FORM:BORD NORMAL\") # (TO CHECK)\n self.mode = self.mode # initialize PSA properly if SPEC or WAV mode\n self.spec_header = SpecDescriptor()\n\n @secure_communication(2)\n def get_spec_header(self):\n \"\"\"\n \"\"\"\n if self.mode == 'SPEC':\n answer = self.query_ascii_values(\"FETCH:SPEC1?\")\n if answer:\n self.spec_header.initialized = True\n self.spec_header.FFTpeak = answer[0]\n self.spec_header.FFTfreq = answer[1]/1e9\n self.spec_header.FFTnbrSteps = answer[2]\n self.spec_header.Firstfreq = answer[3]/1e9\n self.spec_header.Freqstep = answer[4]/1e9\n self.spec_header.TimenbrSteps = answer[5]\n self.spec_header.firsttime = answer[6]\n self.spec_header.TimeStep = answer[7]\n self.spec_header.timedomaincheck = answer[8]\n self.spec_header.totaltime = answer[9]\n self.spec_header.averagenbr = answer[10]\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return its\n mode'''))\n else:\n raise '''PSA is not in Spectrum mode'''\n\n @secure_communication()\n def read_data(self, trace):\n \"\"\"\n \"\"\"\n DATA_FORMAT = ['raw I/Q data', 'descriptor', '0', '(I,Q) vs time',\n 'log(mag) vs freq', '0', '0',\n 'average of log(mag) vs freq', '0', '0', '0',\n 'mag vs freq in Vrms', 'average of mag vs freq in Vrms']\n if self.mode == 'SA':\n\n # must be read in ASCii format\n self.write(\"FORM:DATA ASCii\")\n # stop all the measurements\n self.write(\":ABORT\")\n # go to the \"Single sweep\" mode\n self.write(\":INIT:CONT OFF\")\n # initiate measurement\n self.write(\":INIT\")\n\n #\n self.query(\"SWEEP:TIME?\")\n\n self.write(\"*WAI\") # SA waits until the averaging is done\n # Loop to see when the averaging is done\n while True:\n try:\n self.query(\"SWEEP:TIME?\")\n break\n except:\n pass\n\n data = self.query_ascii_values('trace? trace{}'.format(trace))\n\n if data:\n freq = np.linspace(self.start_frequency_SA,\n self.stop_frequency_SA,\n self.sweep_points_SA)\n return np.rec.fromarrays([freq, np.array(data)],\n names=['Frequency',\n DATA_FORMAT[trace]])\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n trace {} data'''.format(trace)))\n\n elif self.mode == 'SPEC':\n self.get_spec_header()\n self.write(\"INIT:IMM;*WAI\") # start the acquisition and wait until\n # over\n # Check how *OPC? works\n self.query(\"*OPC?\")\n data = self.query_ascii_values(\"FETCH:SPEC{}?\".format(trace))\n if data:\n if trace in (4, 7, 11, 12):\n header = self.spec_header\n stop = header.Firstfreq +\\\n header.Freqstep*(header.FFTnbrSteps-1)\n freq = np.linspace(header.Firstfreq, stop,\n header.FFTnbrSteps)\n return np.rec.fromarrays([freq, np.array(data)],\n names=['Freq',\n DATA_FORMAT[trace]])\n elif trace in (0, 3):\n header = self.spec_header\n stop = header.firsttime +\\\n header.TimeStep*(header.TimenbrSteps-1)\n freq = np.linspace(header.firsttime, stop,\n header.TimenbrSteps)\n return np.rec.fromarrays([freq, np.array(data)],\n names=['Time',\n DATA_FORMAT[trace]])\n else:\n raise InstrIOError(cleandoc('''Wrong parameters for trace\n in Agilent E4440'''))\n\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n trace data'''))\n else:\n self.get_spec_header()\n self.write(\"INIT:IMM;*WAI\") # start the acquisition and wait until\n # over\n #Check how *OPC? works\n self.query(\"*OPC?\")\n\n # this will get the (I,Q) as a function of freq\n data = self.query_ascii_values(\"FETCH:WAV0?\")\n if data:\n return np.rec.fromarrays([data[::2], data[1::2]],\n 'Q', 'I')\n # one should get all the even indices (Q)\n # and odd indices (I) separately\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n trace data'''))\n\n @instrument_property\n @secure_communication()\n def mode(self):\n \"\"\"\n \"\"\"\n SAorBASIC = self.query('inst:sel?')\n if SAorBASIC == 'SA':\n return 'SA'\n elif SAorBASIC == 'BASIC':\n conf = self.query('conf?')\n if conf:\n return conf # SPEC if basic mode with spectral density\n # or WAV if basic mode with time domain\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return its\n mode'''))\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return its\n mode'''))\n\n @mode.setter\n @secure_communication()\n def mode(self, value):\n \"\"\"\n \"\"\"\n if value == 'SPEC':\n self.write('INST:SEL BASIC')\n self.write('CONF:SPECTRUM')\n self.write(\"INIT:CONT ON\") # set in continuous mode\n self.write(\"SENS:SPEC:IFP WIDE\") # set the wide bandWidth 80MHz\n # for spectrum\n self.write(\"SENS:SPEC:AVER OFF\") # set the average off\n # for spectrum\n self.write(\"INIT:CONT OFF\") # set in single sweep mode\n self.write(\"INIT:IMM\")\n elif value == \"WAV\":\n self.write('INST:SEL BASIC')\n self.write('CONF:WAV')\n self.write(\"SENS:WAV:IFP WIDE\") # set the wide bandWidth 80MHz\n # for timedomain\n self.write(\"SENS:WAV:AVER OFF\") # set the average off\n # for timedomain\n self.write(\"SENS:WAV:ADC:DITHER OFF\") # dither signal off\n self.write(\"INIT:CONT OFF\") # set in single sweep mode\n self.write(\"INIT:IMM\")\n else:\n self.write('INST:SEL SA')\n\n @instrument_property\n @secure_communication()\n def start_frequency_SA(self):\n \"\"\"Start frequency getter method\n\n \"\"\"\n\n if self.mode == 'SA':\n freq = self.query('FREQ:STAR?')\n if freq:\n return float(freq)/1e9\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n start frequency'''))\n elif self.mode == 'SPEC':\n if not self.spec_header.initialized:\n self.get_spec_header()\n\n return self.spec_header.Firstfreq\n\n else:\n raise '''PSA is not in the appropriate mode to get correctly the\n start frequency'''\n\n @start_frequency_SA.setter\n @secure_communication()\n def start_frequency_SA(self, value):\n \"\"\"Start frequency setter method\n \"\"\"\n if self.mode == 'SA':\n self.write('FREQ:STAR {} GHz'.format(value))\n result = self.query('FREQ:STAR?')\n if result:\n if abs(float(result)/1e9 - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the start frequency'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n start frequency'''))\n else:\n raise '''PSA is not in the appropriate mode to set correctly the\n start frequency'''\n\n @instrument_property\n @secure_communication()\n def stop_frequency_SA(self):\n \"\"\"Stop frequency getter method\n \"\"\"\n if self.mode == 'SA':\n freq = self.query('FREQ:STOP?')\n if freq:\n return float(freq)/1e9\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n stop frequency'''))\n\n else:\n raise '''PSA is not in the appropriate mode to get correctly the\n stop frequency'''\n\n @stop_frequency_SA.setter\n @secure_communication()\n def stop_frequency_SA(self, value):\n \"\"\"Stop frequency setter method\n\n \"\"\"\n if self.mode == 'SA':\n self.write('FREQ:STOP {} GHz'.format(value))\n result = self.query('FREQ:STOP?')\n if result:\n if abs(float(result)/1e9 - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the stop frequency'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n stop frequency'''))\n else:\n raise '''PSA is not in the appropriate mode to set correctly the\n stop frequency'''\n\n @instrument_property\n @secure_communication()\n def center_frequency(self):\n \"\"\"Center frequency getter method\n\n \"\"\"\n\n freq = self.query('FREQ:CENT?')\n if freq:\n return float(freq)/1e9\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n center frequency'''))\n\n @center_frequency.setter\n @secure_communication()\n def center_frequency(self, value):\n \"\"\"center frequency setter method\n\n \"\"\"\n\n self.write('FREQ:CENT {} GHz'.format(value))\n result = self.query('FREQ:CENT?')\n if result:\n if abs(float(result)/1e9 - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n center frequency'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n center frequency'''))\n\n @instrument_property\n @secure_communication()\n def span_frequency(self):\n \"\"\"Span frequency getter method\n\n \"\"\"\n\n if self.mode == 'SPEC':\n freq = self.query('SENS:SPEC:FREQ:SPAN?')\n if freq:\n return float(freq)/1e9\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n span frequency'''))\n elif self.mode == 'SA':\n freq = self.query('FREQ:SPAN?')\n if freq:\n return float(freq)/1e9\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n span frequency'''))\n\n else:\n raise '''PSA is not in the appropriate mode to get correctly the\n span frequency'''\n\n @span_frequency.setter\n @secure_communication()\n def span_frequency(self, value):\n \"\"\"span frequency setter method\n \"\"\"\n if self.mode == 'SA':\n self.write('FREQ:SPAN {} GHz'.format(value))\n result = self.query('FREQ:SPAN?')\n if result:\n if abs(float(result)/1e9 - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the span frequency'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n span frequency'''))\n\n elif self.mode == 'SPEC':\n self.write('SENS:SPEC:FREQ:SPAN {} GHz'.format(value))\n result = self.query('SENS:SPEC:FREQ:SPAN?')\n if result:\n if abs(float(result)/1e9 - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the span frequency'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n span frequency'''))\n\n else:\n raise '''PSA is not in the appropriate mode to set correctly the\n span frequency'''\n\n @instrument_property\n @secure_communication()\n def sweep_time(self):\n \"\"\"Sweep time getter method\n \"\"\"\n\n if self.mode == 'WAV':\n sweep = self.query('SENS:WAV:SWEEP:TIME?')\n if sweep:\n return float(sweep)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n sweep time'''))\n elif self.mode == 'SA':\n sweep = self.query('SWEEP:TIME?')\n if sweep:\n return float(sweep)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n sweep time'''))\n else:\n raise '''PSA is not in the appropriate mode to get correctly the\n sweep time'''\n\n @sweep_time.setter\n @secure_communication()\n def sweep_time(self, value):\n \"\"\"sweep time setter method\n \"\"\"\n\n if self.mode == 'WAV':\n self.write('SENS:WAV:SWEEP:TIME {}'.format(value))\n result = self.query('SENS:WAV:SWEEP:TIME?')\n if result:\n if abs(float(result) - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the sweep time'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n sweep time'''))\n elif self.mode == 'SA':\n self.write('SWEEP:TIME {}'.format(value))\n result = self.query('SWEEP:TIME?')\n if result:\n if abs(float(result) - value)/value > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the sweep time'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n sweep time'''))\n else:\n raise '''PSA is not in the appropriate mode to set correctly the\n sweep time'''\n\n @instrument_property\n @secure_communication()\n def RBW(self):\n \"\"\"\n \"\"\"\n if self.mode == 'WAV':\n rbw = self.query('SENS:WAV:BWIDTH?')\n if rbw:\n return float(rbw)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n RBW'''))\n elif self.mode == 'SPEC':\n rbw = self.query('SENS:SPEC:BWIDTH?')\n if rbw:\n return float(rbw)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n RBW'''))\n else:\n rbw = self.query('BWIDTH?')\n if rbw:\n return float(rbw)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n channel Resolution bandwidth'''))\n\n @RBW.setter\n @secure_communication()\n def RBW(self, value):\n \"\"\"\n \"\"\"\n if self.mode == 'WAV':\n self.write('SENS:WAV:BWIDTH {}'.format(value))\n result = self.query('SENS:WAV:BWIDTH?')\n if result:\n if abs(float(result) - value) > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the channel Resolution bandwidth'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Resolution bandwidth'''))\n\n elif self.mode == 'SPEC':\n self.write('SENS:SPEC:BWIDTH {}'.format(value))\n result = self.query('SENS:SPEC:BWIDTH?')\n if result:\n if abs(float(result) - value) > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the channel Resolution bandwidth'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Resolution bandwidth'''))\n else:\n self.write('BAND {}'.format(value))\n result = self.query('BWIDTH?')\n if result:\n if abs(float(result) - value) > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the channel Resolution bandwidth'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Resolution bandwidth'''))\n\n @instrument_property\n @secure_communication()\n def VBW_SA(self):\n \"\"\"\n \"\"\"\n if self.mode == 'SA':\n\n vbw = self.query('BAND:VID?')\n if vbw:\n return float(vbw)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n channel Video bandwidth'''))\n else:\n raise '''PSA is not in the appropriate mode to set correctly the\n sweep time'''\n\n @VBW_SA.setter\n @secure_communication()\n def VBW_SA(self, value):\n \"\"\"\n \"\"\"\n if self.mode == 'WAV':\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Resolution bandwidth'''))\n elif self.mode == 'SPEC':\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Resolution bandwidth'''))\n else:\n self.write('BAND:VID {}'.format(value))\n result = self.query('BAND:VID?')\n if result:\n if abs(float(result) - value) > 10**-12:\n raise InstrIOError(cleandoc('''PSA did not set correctly\n the channel Video bandwidth'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n channel Video bandwidth'''))\n\n @instrument_property\n @secure_communication()\n def sweep_points_SA(self):\n \"\"\"\n \"\"\"\n points = self.query('SENSe:SWEep:POINts?')\n if points:\n return int(points)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n sweep point number'''))\n\n @sweep_points_SA.setter\n @secure_communication()\n def sweep_points_SA(self, value):\n \"\"\"\n \"\"\"\n self.write('SENSe:SWEep:POINts {}'.format(value))\n result = self.query('SENSe:SWEep:POINts?')\n if result:\n if int(result) != value:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n sweep point number'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n sweep point number'''))\n\n @instrument_property\n @secure_communication()\n def average_count_SA(self):\n \"\"\"\n \"\"\"\n count = self.query('AVERage:COUNt?')\n if count:\n return int(count)\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n average count'''))\n\n @average_count_SA.setter\n @secure_communication()\n def average_count_SA(self, value):\n \"\"\"\n \"\"\"\n self.write('AVERage:COUNt {}'.format(value))\n result = self.query('AVERage:COUNt?')\n if result:\n if int(result) != value:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n average count'''))\n else:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n average count'''))\n\n @instrument_property\n @secure_communication()\n def average_state_SA(self):\n \"\"\"\n \"\"\"\n mode = self.query('AVERage?')\n if mode:\n return mode\n else:\n raise InstrIOError(cleandoc('''Agilent PSA did not return the\n average state'''))\n\n @average_state_SA.setter\n @secure_communication()\n def average_state_SA(self, value):\n \"\"\"\n \"\"\"\n self.write('AVERage:STATE {}'.format(value))\n result = self.query('AVERage?')\n\n if result.lower() != value.lower()[:len(result)]:\n raise InstrIOError(cleandoc('''PSA did not set correctly the\n average state'''))\n" ]
[ [ "numpy.array", "numpy.linspace", "numpy.rec.fromarrays" ] ]
Waztom/pipelines
[ "63ac14d05446ced622fd2acb86c9b84dcc5feae8" ]
[ "src/python/pipelines/rdkit/pbf_ev.py" ]
[ "#\n# Copyright (C) 2015 Greg Landrum\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n\nimport argparse\nfrom builtins import range\n\nfrom rdkit.Chem.Scaffolds import MurckoScaffold\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport numpy as np\nfrom numpy import linalg\n\nfrom pipelines_utils import parameter_utils, utils\nfrom pipelines_utils_rdkit import rdkit_utils\n\n\ndef write_out(mols,count,writer,file_format):\n for mol in mols:\n count += 1\n if mol is None: continue\n if file_format == 'sdf':\n writer.write(mol)\n elif file_format == 'json':\n writer.write(mol, format='mol')\n return count\n\ndef GetBestFitPlane(pts, weights=None):\n if weights is None:\n wSum = len(pts)\n origin = np.sum(pts, 0)\n origin /= wSum\n sums = np.zeros((3, 3), np.double)\n for pt in pts:\n dp = pt - origin\n for i in range(3):\n sums[i, i] += dp[i] * dp[i]\n for j in range(i + 1, 3):\n sums[i, j] += dp[i] * dp[j]\n sums[j, i] += dp[i] * dp[j]\n sums /= wSum\n vals, vects = linalg.eigh(sums)\n order = np.argsort(vals)\n normal = vects[:, order[0]]\n plane = np.zeros((4, ), np.double)\n plane[:3] = normal\n plane[3] = -1 * normal.dot(origin)\n return plane\n\ndef PBFRD(mol, confId=-1):\n conf = mol.GetConformer(confId)\n if not conf.Is3D():\n return 0\n\n pts = np.array([list(conf.GetAtomPosition(x)) for x in range(mol.GetNumAtoms())])\n plane = GetBestFitPlane(pts)\n denom = np.dot(plane[:3], plane[:3])\n denom = denom**0.5\n # add up the distance from the plane for each point:\n res = 0.0\n for pt in pts:\n res += np.abs(pt.dot(plane[:3]) + plane[3])\n res /= denom\n res /= len(pts)\n return res\n\ndef PBFev(mol):\n '''returns an array of exit vectors for this mol'''\n # Get murcko SMILES\n murcko = MurckoScaffold.GetScaffoldForMol(mol)\n\n # Get PBF plane for murcko scaffold only\n confId = -1\n conf = murcko.GetConformer(confId)\n if not conf.Is3D():\n print('This mol is not 3D - all PBFev angles will be 0 degrees')\n return [0]\n pts = np.array([list(conf.GetAtomPosition(i)) # Get atom coordinates\n for i in range(murcko.GetNumAtoms())])\n # GetBestFitPlane is in the RDKit Contrib directory as part of PBF\n # Plane is xyz vector with a c intercept adjustment\n plane = GetBestFitPlane(pts)\n\n # Map onto parent structure coords (this func adds exit vectors [*])\n murckoEv = Chem.ReplaceSidechains(mol, murcko)\n\n confId = -1 # embed 3D conf object with EVs (atom indices do not change)\n conf = murckoEv.GetConformer(confId)\n\n # Where [#0] matches exit vector SMILES [*]\n patt = Chem.MolFromSmarts('[#0]-[*]')\n matches = murckoEv.GetSubstructMatches(patt)\n if len(matches) == 0:\n return None\n\n # Calculate angles between exit vectors and the murcko plane of best fit\n exitVectors = np.zeros(len(matches))\n denom = np.dot(plane[:3], plane[:3])\n denom = denom**0.5\n for n, match in enumerate(matches):\n evCoords = conf.GetAtomPosition(match[0])\n anchorCoords = conf.GetAtomPosition(match[1])\n v = np.array(((evCoords[0]-anchorCoords[0]),\n (evCoords[1]-anchorCoords[1]),\n (evCoords[2]-anchorCoords[2])))\n angle = np.arcsin((np.dot(v, plane[:3])) /\n ((denom)*((np.dot(v, v))**0.5)))\n angle = np.abs(np.degrees(angle))\n exitVectors[n] = angle\n return exitVectors\n\ndef main():\n\n ### command line args defintions #########################################\n parser = argparse.ArgumentParser(description='Calculate plane of best fit for molecules')\n parameter_utils.add_default_io_args(parser)\n args = parser.parse_args()\n utils.log(\"PBFEV args: \", args)\n input ,output ,suppl ,writer ,output_base = rdkit_utils.default_open_input_output(args.input, args.informat, args.output, 'PBFEV', args.outformat)\n i=0\n count=0\n errors=0\n out_results = []\n for mol in suppl:\n i +=1\n AllChem.EmbedMolecule(mol)\n if mol is None:\n errors += 1\n continue\n out_vector = PBFev(mol)\n if out_vector is None:\n errors += 1\n continue\n rd = PBFRD(mol)\n mol.SetDoubleProp(\"distance\", rd)\n for j,angle in enumerate(out_vector):\n mol.SetDoubleProp(\"angle\" + \"_\" + str(j), angle)\n out_results.append(mol)\n count = write_out(out_results, count, writer, args.outformat)\n utils.log(\"Handled \" + str(i) + \" molecules, resulting in \"+ str(count)+ \" outputs and \" + str(errors) + ' errors')\n writer.flush()\n writer.close()\n input.close()\n output.close()\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "numpy.sum", "numpy.linalg.eigh", "numpy.degrees", "numpy.argsort" ] ]
seakers/dtnsim
[ "9ea1da84e0565d97b3ea184facb597302bf4d99e" ]
[ "RL/tabular_Q_learning.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport gym\nimport gym_cdtn\nimport itertools\nimport matplotlib\nimport matplotlib.style\nimport numpy as np\nimport pandas as pd\nimport sys\nimport pickle\n\nfrom collections import defaultdict\nimport plotting\n\nmatplotlib.style.use('ggplot')\n\ndef createEpsilonGreedyPolicy(Q, num_actions):\n \"\"\"\n Creates an epsilon-greedy policy based\n on a given Q-function and epsilon.\n\n Returns a function that takes the state\n as an input and returns the probabilities\n for each action in the form of a numpy array\n of length of the action space(set of possible actions).\n \"\"\"\n\n def policyFunction(state, epsilon):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction\n\n\ndef qLearning(env, num_episodes, log_dir, discount_factor=0.99,\n alpha=0.1, min_expl_rate=0.02, max_expl_rate=1, decay_rate=0.005):\n \"\"\"\n Q-Learning algorithm: Off-policy TD control.\n Finds the optimal greedy policy while improving\n following an epsilon-greedy policy\"\"\"\n\n # Action value function\n # A nested dictionary that maps\n # state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n # Create an epsilon greedy policy function\n # appropriately for environment action space\n policy = createEpsilonGreedyPolicy(Q, env.action_space.n)\n epsilon = max_expl_rate\n\n history_rewards = []\n # For every episode\n for ith_episode in range(num_episodes):\n\n # Reset the environment and pick the first action\n state = env.reset()\n\n for t in itertools.count():\n\n # get probabilities of all actions from current state\n action_probabilities = policy(state, epsilon)\n\n # choose action according to\n # the probability distribution\n action = np.random.choice(np.arange(\n len(action_probabilities)),\n p=action_probabilities)\n\n # take action and get reward, transit to next state\n next_state, reward, done, _ = env.step(action)\n\n # Update statistics\n stats.episode_rewards[ith_episode] += reward\n stats.episode_lengths[ith_episode] = t\n\n # TD Update\n best_next_action = np.argmax(Q[next_state])\n td_target = reward + discount_factor * Q[next_state][best_next_action]\n td_delta = td_target - Q[state][action]\n Q[state][action] += alpha * td_delta\n\n # done is True if episode terminated\n if done:\n history_rewards.append(stats.episode_rewards[ith_episode])\n print(history_rewards)\n print('Saving history of rewards...')\n data = np.array(history_rewards)\n np.savez(log_dir + \"history_rewards\", data)\n print('Saving Q Table...')\n with open(log_dir + '/qtable.p', 'wb') as fp:\n pickle.dump(data, fp, protocol=pickle.HIGHEST_PROTOCOL)\n break\n state = next_state\n\n epsilon = min_expl_rate + (max_expl_rate - min_expl_rate) * np.exp(-decay_rate * ith_episode)\n\n return Q, stats\n\n\n# Create log dir\nlog_dir = \"./RL/results/logs_q_learning_tabular/\"\nos.makedirs(log_dir, exist_ok=True)\n\n# Create and wrap the environment\nenv = gym.make('cdtn-ASCEND2020-v0')\n\n# start Q-learning\nQ, stats = qLearning(env, 1000, log_dir, discount_factor=0.99,\n alpha=0.1, min_expl_rate=0.02, max_expl_rate=1, decay_rate=0.005)\n# plot results\nplotting.plot_episode_stats(stats)\n\n\n\n\n" ]
[ [ "numpy.array", "matplotlib.style.use", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.savez", "numpy.argmax" ] ]
IBM/geospatial-event-observations
[ "6e9bd8759f8b66c841ce0afea1ddd173c668f293" ]
[ "be-image/server/execution/execution.py" ]
[ "#\n# Licensed Materials - Property of IBM\n# 6949-04J\n# © Copyright IBM Corp. 2020 All Rights Reserved\n#\nimport glob\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport requests\nfrom sqlalchemy import text, create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import QueuePool\nimport stomp\nimport sys\nimport time\nimport traceback\n\nsys.path.append('..')\n\nfrom classifier.image_classifier import evaluate, score, train_model\nfrom classifier.short_text_classifier import score_text_classifier, training_text_classifier, val_text_classifier\nfrom config import baseDir, get_diagnostics, set_diagnostics\n\ndb_path = os.path.join(baseDir, 'event-detection.db')\nBase = declarative_base()\nengine = create_engine('sqlite:///'+db_path, poolclass=QueuePool, connect_args={'check_same_thread': False})\nSession = sessionmaker(bind=engine)\n\ndef count_files(aPath):\n return len(os.listdir(aPath))\n\ndef create_activeMQ_connection(MQInfo):\n if get_diagnostics(): start = time.time()\n try:\n # parse broker info\n activeMQHost = MQInfo['brokerURL'].split(':')[1]\n # remove the stomp:// protocol declaration\n activeMQHost = activeMQHost[2:]\n print(activeMQHost)\n activeMQPort = int(MQInfo['brokerURL'].split(':')[2])\n print(activeMQPort)\n \n # connect to the activeMQ instance\n stompConnection = stomp.Connection([(activeMQHost, activeMQPort)])\n stompConnection.start()\n stompConnection.connect(MQInfo['brokerUsername'], MQInfo['brokerPassword'], wait=True)\n\n if get_diagnostics():\n end = time.time()\n print('%s Create ActiveMQ Connection completed in %f s' %(time.ctime(end), end-start), flush=True)\n\n return stompConnection\n except:\n traceback.print_exc()\n if get_diagnostics():\n end = time.time()\n print('%s Create ActiveMQ Connection completed in %f s' %(time.ctime(end), end-start), flush=True)\n return None\n\ndef send_execution_status(taskId, MQInfo, status):\n # set up the stomp connection\n MQConnection = create_activeMQ_connection(MQInfo)\n if MQConnection:\n print('Sending message to queue')\n # subscribe to the queue\n MQConnection.subscribe(MQInfo['brokerQueue'], taskId)\n \n # send the status\n MQConnection.send(MQInfo['brokerQueue'], json.dumps(status), 'application/json')\n #MQConnection.send(body=json.dumps(status), destination=MQInfo['brokerQueue'])\n\n # disconnect\n MQConnection.disconnect()\n\nif __name__ == \"__main__\":\n try:\n # MQ flag\n sendMQMessages = True\n # turn on Trace\n set_diagnostics(True)\n\n # parse command line args\n print(sys.argv[1])\n taskId = sys.argv[1]\n print(sys.argv[2])\n trainingDataPath = sys.argv[2]\n print(sys.argv[3])\n inputDataPath = sys.argv[3]\n print(sys.argv[4])\n outputDataPath = sys.argv[4]\n \n try:\n print(sys.argv[5])\n MQInfo = json.loads(sys.argv[5])\n except:\n print('There was a problem parsing MQ Info')\n traceback.print_exc()\n sendMQMessages = False\n\n print(sys.argv[6])\n status = json.loads(sys.argv[6])\n\n # classifier path\n classifierPath = os.path.dirname(os.path.abspath(__file__))\n classifierPath = classifierPath[:classifierPath.rfind('/')]\n classifierPath = os.path.join(classifierPath, 'classifier')\n print(classifierPath)\n\n # control flags\n builtImageModel = False\n builtTextModel = False\n\n # text training files\n short_text = 'training_text.csv'\n real_time = 'valid_text.csv'\n\n # image structure info\n image_size = (56, 56)\n imgFolder = 'Images'\n imgTrainFolder = 'train'\n imgValidFolder = 'valid'\n imgTestFolder = 'test'\n\n if trainingDataPath != '-':\n # try to estimate progress\n imagesFolder = os.path.join(trainingDataPath, imgFolder)\n keyword = os.listdir(os.path.join(imagesFolder, imgTrainFolder))[0]\n totalTrainImages = count_files(os.path.join(imagesFolder, imgTrainFolder, keyword))\n totalValidImages = count_files(os.path.join(imagesFolder, imgValidFolder, keyword))\n totalTestImages = count_files(os.path.join(imagesFolder, imgTestFolder, keyword))\n totalImages = totalTrainImages + totalValidImages + totalTestImages\n\n print('Building models...')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Training models...\",\n \"message\": \"Training models...\",\n \"percentage\": 0.0,\n \"eta\": 180\n }\n send_execution_status(taskId, MQInfo, status)\n\n # image training\n if trainingDataPath != '-' and totalImages:\n n_classes = 2\n model_path, loss, acc = train_model(classifierPath, imagesFolder, image_size, n_classes, True)\n builtImageModel = True\n \n print('Built image model...')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Image model created...\",\n \"message\": \"Image model created...\",\n \"percentage\": 0.2,\n \"eta\": 120\n }\n send_execution_status(taskId, MQInfo, status)\n\n # text training\n if trainingDataPath != '-' and os.path.exists(os.path.join(trainingDataPath, short_text)) and \\\n os.path.exists(os.path.join(trainingDataPath, real_time)):\n report = training_text_classifier(trainingDataPath, short_text, real_time)\n print(report)\n builtTextModel = True\n \n print('Built short text model...')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Text model created...\",\n \"message\": \"Text model created...\",\n \"percentage\": 0.4,\n \"eta\": 90\n }\n send_execution_status(taskId, MQInfo, status)\n \n # update the DB\n sql = text('UPDATE models SET status=\"Trained\", pid=-1 WHERE trainingDataPath=\"'+trainingDataPath+'\";')\n result = engine.execute(sql)\n\n # update the SSE API\n requests.post('http://localhost:5000/api/v1.0/updates', json={\"type\": \"models\"})\n\n if inputDataPath != '-':\n print('Scoring input data...')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Scoring input data...\",\n \"message\": \"Scoring input data...\",\n \"percentage\": 0.5,\n \"eta\": 80\n }\n send_execution_status(taskId, MQInfo, status)\n\n # text scroring\n text_file = None\n df = None\n try:\n text_file = glob.glob(os.path.join(inputDataPath, '*.csv'))[0]\n text_file = text_file[text_file.rfind(os.sep) + 1:]\n except:\n text_file = None\n\n if text_file:\n # filtering should be done at this point\n if trainingDataPath == '-':\n df = score_text_classifier(outputDataPath, inputDataPath, text_file)\n else:\n df = score_text_classifier(trainingDataPath, inputDataPath, text_file)\n df = df.reset_index()\n\n print('Text scoring completed...')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Input text scoring complete...\",\n \"message\": \"Input text scoring complete...\",\n \"percentage\": 0.75,\n \"eta\": 60\n }\n send_execution_status(taskId, MQInfo, status)\n\n # image scoring\n image_path = os.path.join(inputDataPath, 'Images')\n if len(glob.glob(os.path.join(inputDataPath, '*'))):\n n_classes = 1\n \n if df.empty:\n df = pd.read_csv(os.path.join(inputDataPath, text_file))\n\n if trainingDataPath == '-':\n df = score(df, outputDataPath, image_path, image_size, n_classes)\n else:\n df = score(df, trainingDataPath, image_path, image_size, n_classes)\n\n print('Scoring Completed')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Input image scoring complete...\",\n \"message\": \"Input image scoring complete...\",\n \"percentage\": 0.95,\n \"eta\": 30\n }\n send_execution_status(taskId, MQInfo, status)\n\n print('Writing output')\n if sendMQMessages:\n status = {\n \"operation\": \"INF_PROCESSING\",\n \"reason\": \"Writing output...\",\n \"message\": \"Writing output...\",\n \"percentage\": 0.99,\n \"eta\": 0\n }\n send_execution_status(taskId, MQInfo, status)\n\n # retrieve model information\n tPath = ''\n modelQueryString = 'SELECT name, icon, severity FROM models WHERE trainingDataPath=\"'\n if trainingDataPath == '-':\n tPath = outputDataPath\n else:\n tPath = trainingDataPath\n \n modelQueryString = modelQueryString + tPath + '\";'\n\n modelSelect = text(modelQueryString)\n modelResult = engine.execute(modelSelect)\n\n modelName = None\n modelIcon = None\n modelSev = None\n\n for row in modelResult:\n modelName = row['name']\n modelIcon = row['icon']\n modelSev = (row['severity'] + 1)\n\n # output\n if trainingDataPath == '-':\n opFile = os.path.join(inputDataPath, 'detected-events.out')\n else:\n opFile = os.path.join(outputDataPath, 'detected-events.out')\n df_sorted = df.sort_values(by=['PREDICTED_TOPIC','PROBABILITY','PREDICTED_IMAGE_CLASS','PROBABILITY_IMAGE'],\n ascending=[False,False,False,False])\n print(df_sorted)\n\n # anything above 0.8 prob gets included\n df_score = df_sorted.loc[((df_sorted['PREDICTED_TOPIC'] == modelName.lower()) & \n (df_sorted['PROBABILITY'] >= 0.8)) |\n ((df_sorted['PREDICTED_IMAGE_CLASS'] == modelName.lower()) &\n (df_sorted['PROBABILITY_IMAGE'] >= 0.9))]\n print(df_score) \n # insert to events DB\n conn = engine.connect()\n\n # event & item insert setup\n eventInsertString = 'INSERT INTO events (name, latitude, longitude, severity, icon) VALUES('\n insertStringEnd = ')'\n itemInsertString = 'INSERT INTO items (short_text, image_path, item_time, latitude, longitude, score, event) VALUES('\n\n # iterate over the df\n for index, row in df_score.iterrows():\n if pd.notna(row['LAT']) and pd.notna(row['LONG']):\n eventValues = '\"'+modelName+'\",'\n eventValues += str(row['LAT'])+','\n eventValues += str(row['LONG'])+','\n eventValues += str(modelSev)+','\n eventValues += str(modelIcon)\n\n eventInsert = text(eventInsertString + eventValues + insertStringEnd)\n eventResult = conn.execute(eventInsert)\n eventPK = eventResult.lastrowid\n\n # score\n score = 0.0\n if pd.notna(row['PROBABILITY']) and pd.notnull(row['PROBABILITY_IMAGE']):\n if row['PROBABILITY'] >= 0.8 and row['PROBABILITY_IMAGE'] >= 0.8:\n score = ( row['PROBABILITY'] + row['PROBABILITY_IMAGE']) / 2\n elif row['PROBABILITY'] >= 0.8:\n score = row['PROBABILITY']\n elif row['PROBABILITY_IMAGE'] >= 0.8:\n score = row['PROBABILITY_IMAGE']\n elif pd.notna(row['PROBABILITY']):\n score = row['PROBABILITY']\n elif pd.notna(row['PROBABILITY_IMAGE']):\n score = row['PROBABILITY_IMAGE']\n print(str(score))\n\n # datetime\n eventDatetime = time.gmtime(0)\n if pd.notna(row['date']):\n try:\n eventDatetime = time.strptime(str(row['date']), '%Y%m%d %H:%M:%S')\n except:\n pass\n \n # image\n eventImage = ''\n if pd.notna(row['IMAGE']):\n dataUUID = inputDataPath[inputDataPath.rfind('/')+1:]\n eventImage = os.path.join(dataUUID, \"Images\", row['IMAGE'])\n print(eventImage)\n\n itemValues = '\"'+row['TEXT']+'\",'\n itemValues += '\"'+eventImage+'\",'\n itemValues += '\"' + time.strftime('%Y%m%d %H:%M:%S', eventDatetime) +'\",'\n itemValues += str(row['LAT'])+','\n itemValues += str(row['LONG'])+','\n itemValues += str(score)+','\n itemValues += str(eventPK)\n\n itemInsert = text(itemInsertString + itemValues + insertStringEnd)\n itemResult = conn.execute(itemInsert)\n \n # cleanup\n conn.close()\n\n df_score.to_csv(opFile, index=None, header=True) \n \n print('Event Detection completed')\n if sendMQMessages:\n status = {\n \"operation\": \"RES_COMPLETED\",\n \"reason\": \"Event detection job complete.\",\n \"data\": {\n \"output_Folder\": opFile\n }\n }\n send_execution_status(taskId, MQInfo, status)\n \n # update the SSE API\n requests.post('http://localhost:5000/api/v1.0/updates', json={\"type\": \"events\"})\n except:\n traceback.print_exc()\n error_reason = 'An exception was thrown'\n error_message = traceback.format_exc()\n\n if trainingDataPath != '-':\n sql = text('UPDATE models SET status=\"Created\", pid=-1 WHERE trainingDataPath=\"'+trainingDataPath+'\";')\n result = engine.execute(sql)\n\n if sendMQMessages:\n status = {\n \"operation\": \"RES_FAILED\",\n \"reason\": error_reason,\n \"message\": error_message\n }\n send_execution_status(taskId, MQInfo, status)" ]
[ [ "pandas.notnull", "pandas.notna" ] ]