repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
anaeliaovalle/atc-mt-dti | [
"755bd175e852ef2a6792be7244b006ebed252d8d"
] | [
"src/bert/tfrecord_smiles.py"
] | [
"import os\nimport csv\nimport json\nimport pickle\nimport random\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport _pickle as cPickle\nfrom copy import deepcopy\nimport collections\nfrom collections import OrderedDict\n\n__author__ = 'Bonggun Shin'\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float(\"masked_lm_prob\", 0.15, \"Masked LM probability.\")\nflags.DEFINE_integer(\"max_seq_length\", 100, \"Maximum sequence length.\")\nflags.DEFINE_integer(\"max_predictions_per_seq\", 15,\n \"Maximum number of masked LM predictions per sequence.\")\nflags.DEFINE_integer(\"random_seed\", 12345, \"Random seed for data generation.\")\n# flags.DEFINE_string(\n# \"output_file\", \"./smiles01.tfrecord,./smiles02.tfrecord\",\n# \"Output TF example file (or comma-separated list of files).\")\nflags.DEFINE_string(\"base_path\", \"../../../data/pretrain\", \"base path for dataset\")\nflags.DEFINE_string(\n \"output_file\", \"%s/smiles.tfrecord\" % FLAGS.base_path,\n \"Output TF example file (or comma-separated list of files).\")\n\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ndef create_float_feature(values):\n feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return feature\n\n\ndef write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(instance.tokens))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n # masked_lm_prob 0.15\n # max_seq_length\", 170\n # max_predictions_per_seq\", 26 (170*.15)\n # vocab_words = list(tokenizer.vocab.keys())\n # rng = random.Random(FLAGS.random_seed)\n MaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n [\"index\", \"label\"])\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[BEGIN]\" or token == \"[END]\":\n continue\n cand_indexes.append(i)\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n if index in covered_indexes:\n continue\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(4, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\nclass SmilesTokenizer(object):\n def __init__(self, vocab_file):\n self.vocab = self.load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n # self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n # self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n tokens = []\n tokens.append(\"[BEGIN]\")\n for c in text:\n tokens.append(c)\n tokens.append(\"[END]\")\n\n return tokens\n\n def convert_tokens_to_ids(self, tokens):\n return self.convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n return self.convert_by_vocab(self.inv_vocab, ids)\n\n def convert_by_vocab(self, vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output\n\n def load_vocab(self, vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\nclass TrainingInstance(object):\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, tokens, masked_lm_positions, masked_lm_labels):\n self.tokens = tokens\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n\n def __str__(self):\n s = \"\"\n s += \"tokens: %s\\n\" % (\" \".join(self.tokens))\n s += \"masked_lm_positions: %s\\n\" % (\" \".join([str(x) for x in self.masked_lm_positions]))\n s += \"masked_lm_labels: %s\\n\" % (\" \".join(self.masked_lm_labels))\n s += \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\ndef truncate_seq_pair(tokens, max_num_tokens, rng):\n while True:\n total_length = len(tokens)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\ndef read_smiles(base_path):\n smiles_list = []\n len_list = []\n with open(\"%s/pretrain/smiles_sample.csv\" % (base_path), \"rt\") as f:\n # with open(\"%s/pretrain/00out_pubchem_cid_to_SMILES.csv\" % (base_path), \"rt\") as f: # 97,092,853 97M\n csvr = csv.reader(f, delimiter=',')\n for row in csvr:\n smiles_list.append(row[1])\n len_list.append(len(row[1]))\n\n tokenizer = SmilesTokenizer(\"%s/vocab_smiles.txt\" % FLAGS.base_path)\n vocab_words = list(tokenizer.vocab.keys())\n rng = random.Random(12345)\n max_num_tokens = FLAGS.max_seq_length - 1\n\n instances = []\n for s in smiles_list:\n tokens = tokenizer.tokenize(s)\n truncate_seq_pair(tokens, max_num_tokens, rng)\n\n tokens.insert(0, \"[CLS]\")\n\n (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, FLAGS.masked_lm_prob,\n FLAGS.max_predictions_per_seq,\n vocab_words, rng)\n\n instance = TrainingInstance(\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n # print(instance)\n instances.append(instance)\n\n return instances, tokenizer\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--base_path', type=str, default='../../../data', help='Directory for input data.')\n args, unparsed = parser.parse_known_args()\n\n instances, tokenizer = read_smiles(args.base_path)\n\n output_files = FLAGS.output_file.split(\",\")\n tf.logging.info(\"*** Writing to output files ***\")\n for output_file in output_files:\n tf.logging.info(\" %s\", output_file)\n\n write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,\n FLAGS.max_predictions_per_seq, output_files)\n\n"
] | [
[
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Features",
"tensorflow.logging.info",
"tensorflow.gfile.GFile"
]
] |
oasys-kit/OASYS-CRYSTALPY | [
"52b27d225c090894bc6b6d0bdf8d828f19aa3972"
] | [
"orangecontrib/crystalpy/widgets/elements/PhotonViewer.py"
] | [
"import matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nimport orangecanvas.resources as resources\nimport os\nimport sys\nimport numpy as np\nfrom orangewidget.settings import Setting\nfrom crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch\nfrom orangewidget import gui, widget\n\n\nclass PlotType:\n \"\"\"Contains the different plot types that can be plotted by the viewer.\"\"\"\n # STOKES_DEVIATION = {'subplots': (2, 2), 'x values': \"deviations\", 'y values': \"stokes\", 'style': \"grid\"}\n STOKES_DEVIATION = {'subplots': (2, 2), 'x values': \"deviations\", 'y values': \"stokes\", 'style': \"scatter\"}\n STOKES_ENERGY = { 'subplots': (2, 2), 'x values': \"energies\", 'y values': \"stokes\", 'style': \"scatter\"}\n # POLAR_DEVIATION = { 'subplots': (1, 1), 'x values': \"deviations\", 'y values': \"polarization degrees\", 'style': \"grid\"}\n POLAR_DEVIATION = { 'subplots': (1, 1), 'x values': \"deviations\", 'y values': \"polarization degrees\", 'style': \"scatter\"}\n POLAR_ENERGY = { 'subplots': (1, 1), 'x values': \"energies\", 'y values': \"polarization degrees\", 'style': \"scatter\"}\n\n\nclass OWPhotonViewer(widget.OWWidget):\n name = \"PhotonViewer\"\n id = \"orange.widgets.data.widget_name\"\n description = \"\"\n icon = \"icons/PhotonViewer.png\"\n author = \"\"\n maintainer_email = \"[email protected]\"\n priority = 40\n category = \"\"\n keywords = [\"PhotonViewer\", \"crystalpy\", \"viewer\", \"oasyscrystalpy\"]\n inputs = [{\"name\": \"photon bunch\",\n \"type\": PolarizedPhotonBunch,\n \"doc\": \"\",\n \"handler\": \"_set_input\"},\n ]\n\n PLOT_TYPE = Setting(0) # Stokes parameters (deviation)\n\n def __init__(self):\n super().__init__()\n\n self._input_available = False\n\n self.figure_canvas = None\n\n print(\"PhotonViewer: Photon viewer initialized.\\n\")\n\n # box0 = gui.widgetBox(self.controlArea, \" \", orientation=\"horizontal\")\n # # box0 = gui.widgetBox(self.controlArea, \" \", orientation=\"vertical\")\n #\n\n # # widget buttons: plot, help\n # gui.button(box0, self, \"Plot\", callback=self.do_plot)\n # gui.button(box0, self, \"Help\", callback=self.get_doc)\n\n box1 = gui.widgetBox(self.controlArea, \" \", orientation=\"vertical\")\n\n gui.comboBox(box1, self, \"PLOT_TYPE\", addSpace=True,\n items=[\"Stokes(deviation)\", \"Stokes(energy)\",\n \"Polarization degree(deviation)\", \"Polarization degree(energy)\"],\n orientation=\"horizontal\",callback=self.do_plot)\n\n def _set_input(self, photon_bunch):\n \"\"\"This function is called when the widget receives an input.\"\"\"\n if photon_bunch is not None:\n self._input_available = True # The input is now available.\n print(\"PhotonViewer: The viewer has received the data.\\n\")\n\n # Retrieve the results from input data.\n self.photon_bunch = photon_bunch\n self.bunch_size = photon_bunch.getArrayByKey(\"number of photons\") # int\n self.energies = photon_bunch.getArrayByKey(\"energies\") # eV\n self.deviations = photon_bunch.getArrayByKey(\"deviations\") # urad\n self.deviations *= 1e+6\n self.stokes_vectors = [photon_bunch.getArrayByKey(\"s0\"),\n photon_bunch.getArrayByKey(\"s1\"),\n photon_bunch.getArrayByKey(\"s2\"),\n photon_bunch.getArrayByKey(\"s3\")]\n self.polarization_degrees = photon_bunch.getArrayByKey(\"polarization degree\")\n\n self.do_plot()\n\n def get_plot_type(self):\n\n if self.PLOT_TYPE == 0:\n return PlotType.STOKES_DEVIATION\n\n elif self.PLOT_TYPE == 1:\n return PlotType.STOKES_ENERGY\n\n elif self.PLOT_TYPE == 2:\n return PlotType.POLAR_DEVIATION\n\n elif self.PLOT_TYPE == 3:\n return PlotType.POLAR_ENERGY\n\n else:\n raise ValueError(\"PhotonViewer: Plot Type not recognized!\\n\")\n\n def do_plot(self):\n try:\n # Check whether the input is available.\n if not self._input_available:\n raise Exception(\"PhotonViewer: Input data not available!\\n\")\n\n # If there is already a FigureCanvas, remove it so it can be substituted by a new one.\n if self.figure_canvas is not None:\n self.mainArea.layout().removeWidget(self.figure_canvas)\n\n plot_type = self.get_plot_type()\n\n # Create the subplots according to the PlotType.\n n = plot_type['subplots'][0]\n m = plot_type['subplots'][1]\n fig, axes = plt.subplots(n, m, sharex=\"all\", sharey=\"all\")\n self.axes = np.array(axes).flatten()\n\n self.figure_canvas = FigureCanvas(fig)\n self.mainArea.layout().addWidget(self.figure_canvas)\n\n #\n # Initialize plotting parameters according to PlotType.\n #\n if plot_type['x values'] == \"deviations\":\n x_values = self.deviations\n x_label = \"deviations [urad]\"\n\n elif plot_type['x values'] == \"energies\":\n x_values = self.energies\n x_label = \"energies [eV]\"\n\n else:\n raise Exception(\"PhotonViewer: The PlotType class might be badly defined!\\n\")\n\n if plot_type['y values'] == \"stokes\":\n y_values_array = self.stokes_vectors\n titles = [\"Stokes parameter S0\",\n \"Stokes parameter S1\",\n \"Stokes parameter S2\",\n \"Stokes parameter S3\"]\n\n elif plot_type['y values'] == \"polarization degrees\":\n y_values_array = [self.polarization_degrees]\n titles = [\"Degree of circular polarization\"]\n\n else:\n raise Exception(\"PhotonViewer: The PlotType class might be badly defined!\\n\")\n\n #\n # Deal with the special cases, where the plotting is straightforward.\n #\n if self.photon_bunch.isMonochromatic(places=6) or \\\n self.photon_bunch.isUnidirectional(): # unidirectional or monochromatic.\n\n self.plot(x_values, y_values_array, x_label=x_label, titles=titles)\n\n #\n # General case.\n #\n else:\n if plot_type['x values'] == \"energies\":\n\n self.plot(x_values, y_values_array, x_label=x_label, titles=titles)\n\n elif plot_type['x values'] == \"deviations\":\n\n # Create the empty arrays.\n x_values = np.array([])\n polarization_degrees = np.array([])\n s0 = np.array([])\n s1 = np.array([])\n s2 = np.array([])\n s3 = np.array([])\n\n energy = self.photon_bunch[0].energy()\n\n for i in range(self.photon_bunch.getNumberOfPhotons()):\n polarized_photon = self.photon_bunch.getPhotonIndex(i)\n if polarized_photon.energy() == energy: # Iterate over a monochromatic portion.\n x_values = np.append(x_values, np.multiply(polarized_photon.deviation(), 1e+6))\n polarization_degrees = np.append(polarization_degrees,\n polarized_photon.polarizationDegree())\n stokes_vector = polarized_photon.stokesVector()\n s0 = np.append(s0, stokes_vector.s0)\n s1 = np.append(s1, stokes_vector.s1)\n s2 = np.append(s2, stokes_vector.s2)\n s3 = np.append(s3, stokes_vector.s3)\n else:\n if plot_type['y values'] == \"stokes\":\n y_values_array = [s0, s1, s2, s3]\n\n elif plot_type['y values'] == \"polarization degrees\":\n y_values_array = [polarization_degrees]\n\n self.plot(x_values, y_values_array, x_label=x_label, titles=titles)\n\n energy = polarized_photon.energy() # Update the energy.\n x_values = np.array([]) # Clear the arrays.\n polarization_degrees = np.array([])\n s0 = np.array([])\n s1 = np.array([])\n s2 = np.array([])\n s3 = np.array([])\n\n except Exception as e:\n QtGui.QMessageBox.critical(self, \"Error\", str(e))\n\n def plot(self, x_values, y_values_array, x_label=\"\", y_label=\"\", titles=None):\n \"\"\"y_values_array can be an array of arrays.\"\"\"\n\n for index in range(0, len(self.axes)):\n\n if self.get_plot_type()['style'] == \"grid\":\n self.axes[index].plot(x_values, y_values_array[index], \"-\")\n\n elif self.get_plot_type()['style'] == \"scatter\":\n self.axes[index].scatter(x_values, y_values_array[index], marker=\"o\")\n\n # Embellish.\n self.axes[index].set_xlabel(x_label)\n self.axes[index].set_ylabel(y_label)\n self.axes[index].set_title(titles[index])\n self.axes[index].set_xlim([x_values.min(), x_values.max()])\n\n self.figure_canvas.draw()\n\n def get_doc(self):\n print(\"PhotonViewer: help pressed.\\n\")\n home_doc = resources.package_dirname(\"orangecontrib.oasyscrystalpy\") + \"/doc_files/\"\n filename1 = os.path.join(home_doc, 'CrystalViewer'+'.txt')\n print(\"PhotonViewer: Opening file %s\\n\" % filename1)\n if sys.platform == 'darwin':\n command = \"open -a TextEdit \"+filename1+\" &\"\n elif sys.platform == 'linux':\n command = \"gedit \"+filename1+\" &\"\n else:\n raise Exception(\"PhotonViewer: sys.platform did not yield an acceptable value!\\n\")\n os.system(command)\n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n\n from crystalpy.util.Vector import Vector\n from crystalpy.util.StokesVector import StokesVector\n from crystalpy.util.PolarizedPhoton import PolarizedPhoton\n from crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch\n\n app = QApplication([])\n ow = OWPhotonViewer()\n\n nphotons = 10\n\n from crystalpy.util.Vector import Vector\n from crystalpy.util.StokesVector import StokesVector\n\n bunch = PolarizedPhotonBunch([])\n for i in range(nphotons):\n polarized_photon = PolarizedPhoton(energy_in_ev=1000.0+i,\n direction_vector=Vector(0,1.0,0),\n stokes_vector=StokesVector([1.0,0,1.0,0]))\n bunch.addPhoton(polarized_photon)\n\n\n ow._set_input(bunch)\n ow.do_plot()\n ow.show()\n\n app.exec_()\n ow.saveSettings()\n"
] | [
[
"matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.append"
]
] |
jahfet/pandas | [
"1e4c50a56f7e953ab84308f000dff6fc1ac71171"
] | [
"pandas/tests/io/formats/test_format.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nTest output formatting for Series/DataFrame, including to_string & reprs\n\"\"\"\n\nfrom __future__ import print_function\nimport re\n\nimport pytz\nimport dateutil\nimport itertools\nfrom operator import methodcaller\nimport os\nimport sys\nimport warnings\nfrom datetime import datetime\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import (DataFrame, Series, Index, Timestamp, MultiIndex,\n date_range, NaT, read_table)\nfrom pandas.compat import (range, zip, lrange, StringIO, PY3,\n u, lzip, is_platform_windows,\n is_platform_32bit)\nimport pandas.compat as compat\n\nimport pandas.io.formats.format as fmt\nimport pandas.io.formats.printing as printing\n\nimport pandas.util.testing as tm\nfrom pandas.io.formats.terminal import get_terminal_size\nfrom pandas.core.config import (set_option, get_option, option_context,\n reset_option)\n\nuse_32bit_repr = is_platform_windows() or is_platform_32bit()\n\n_frame = DataFrame(tm.getSeriesData())\n\n\ndef curpath():\n pth, _ = os.path.split(os.path.abspath(__file__))\n return pth\n\n\ndef has_info_repr(df):\n r = repr(df)\n c1 = r.split('\\n')[0].startswith(\"<class\")\n c2 = r.split('\\n')[0].startswith(r\"<class\") # _repr_html_\n return c1 or c2\n\n\ndef has_non_verbose_info_repr(df):\n has_info = has_info_repr(df)\n r = repr(df)\n\n # 1. <class>\n # 2. Index\n # 3. Columns\n # 4. dtype\n # 5. memory usage\n # 6. trailing newline\n nv = len(r.split('\\n')) == 6\n return has_info and nv\n\n\ndef has_horizontally_truncated_repr(df):\n try: # Check header row\n fst_line = np.array(repr(df).splitlines()[0].split())\n cand_col = np.where(fst_line == '...')[0][0]\n except:\n return False\n # Make sure each row has this ... in the same place\n r = repr(df)\n for ix, l in enumerate(r.splitlines()):\n if not r.split()[cand_col] == '...':\n return False\n return True\n\n\ndef has_vertically_truncated_repr(df):\n r = repr(df)\n only_dot_row = False\n for row in r.splitlines():\n if re.match(r'^[\\.\\ ]+$', row):\n only_dot_row = True\n return only_dot_row\n\n\ndef has_truncated_repr(df):\n return has_horizontally_truncated_repr(\n df) or has_vertically_truncated_repr(df)\n\n\ndef has_doubly_truncated_repr(df):\n return has_horizontally_truncated_repr(\n df) and has_vertically_truncated_repr(df)\n\n\ndef has_expanded_repr(df):\n r = repr(df)\n for line in r.split('\\n'):\n if line.endswith('\\\\'):\n return True\n return False\n\n\nclass TestDataFrameFormatting(object):\n\n def setup_method(self, method):\n self.warn_filters = warnings.filters\n warnings.filterwarnings('ignore', category=FutureWarning,\n module=\".*format\")\n\n self.frame = _frame.copy()\n\n def teardown_method(self, method):\n warnings.filters = self.warn_filters\n\n def test_repr_embedded_ndarray(self):\n arr = np.empty(10, dtype=[('err', object)])\n for i in range(len(arr)):\n arr['err'][i] = np.random.randn(i)\n\n df = DataFrame(arr)\n repr(df['err'])\n repr(df)\n df.to_string()\n\n def test_eng_float_formatter(self):\n self.frame.loc[5] = 0\n\n fmt.set_eng_float_format()\n repr(self.frame)\n\n fmt.set_eng_float_format(use_eng_prefix=True)\n repr(self.frame)\n\n fmt.set_eng_float_format(accuracy=0)\n repr(self.frame)\n tm.reset_display_options()\n\n def test_show_null_counts(self):\n\n df = DataFrame(1, columns=range(10), index=range(10))\n df.iloc[1, 1] = np.nan\n\n def check(null_counts, result):\n buf = StringIO()\n df.info(buf=buf, null_counts=null_counts)\n assert ('non-null' in buf.getvalue()) is result\n\n with option_context('display.max_info_rows', 20,\n 'display.max_info_columns', 20):\n check(None, True)\n check(True, True)\n check(False, False)\n\n with option_context('display.max_info_rows', 5,\n 'display.max_info_columns', 5):\n check(None, False)\n check(True, False)\n check(False, False)\n\n def test_repr_tuples(self):\n buf = StringIO()\n\n df = DataFrame({'tups': lzip(range(10), range(10))})\n repr(df)\n df.to_string(col_space=10, buf=buf)\n\n def test_repr_truncation(self):\n max_len = 20\n with option_context(\"display.max_colwidth\", max_len):\n df = DataFrame({'A': np.random.randn(10),\n 'B': [tm.rands(np.random.randint(\n max_len - 1, max_len + 1)) for i in range(10)\n ]})\n r = repr(df)\n r = r[r.find('\\n') + 1:]\n\n adj = fmt._get_adjustment()\n\n for line, value in lzip(r.split('\\n'), df['B']):\n if adj.len(value) + 1 > max_len:\n assert '...' in line\n else:\n assert '...' not in line\n\n with option_context(\"display.max_colwidth\", 999999):\n assert '...' not in repr(df)\n\n with option_context(\"display.max_colwidth\", max_len + 2):\n assert '...' not in repr(df)\n\n def test_repr_chop_threshold(self):\n df = DataFrame([[0.1, 0.5], [0.5, -0.1]])\n pd.reset_option(\"display.chop_threshold\") # default None\n assert repr(df) == ' 0 1\\n0 0.1 0.5\\n1 0.5 -0.1'\n\n with option_context(\"display.chop_threshold\", 0.2):\n assert repr(df) == ' 0 1\\n0 0.0 0.5\\n1 0.5 0.0'\n\n with option_context(\"display.chop_threshold\", 0.6):\n assert repr(df) == ' 0 1\\n0 0.0 0.0\\n1 0.0 0.0'\n\n with option_context(\"display.chop_threshold\", None):\n assert repr(df) == ' 0 1\\n0 0.1 0.5\\n1 0.5 -0.1'\n\n def test_repr_chop_threshold_column_below(self):\n # GH 6839: validation case\n\n df = pd.DataFrame([[10, 20, 30, 40],\n [8e-10, -1e-11, 2e-9, -2e-11]]).T\n\n with option_context(\"display.chop_threshold\", 0):\n assert repr(df) == (' 0 1\\n'\n '0 10.0 8.000000e-10\\n'\n '1 20.0 -1.000000e-11\\n'\n '2 30.0 2.000000e-09\\n'\n '3 40.0 -2.000000e-11')\n\n with option_context(\"display.chop_threshold\", 1e-8):\n assert repr(df) == (' 0 1\\n'\n '0 10.0 0.000000e+00\\n'\n '1 20.0 0.000000e+00\\n'\n '2 30.0 0.000000e+00\\n'\n '3 40.0 0.000000e+00')\n\n with option_context(\"display.chop_threshold\", 5e-11):\n assert repr(df) == (' 0 1\\n'\n '0 10.0 8.000000e-10\\n'\n '1 20.0 0.000000e+00\\n'\n '2 30.0 2.000000e-09\\n'\n '3 40.0 0.000000e+00')\n\n def test_repr_obeys_max_seq_limit(self):\n with option_context(\"display.max_seq_items\", 2000):\n assert len(printing.pprint_thing(lrange(1000))) > 1000\n\n with option_context(\"display.max_seq_items\", 5):\n assert len(printing.pprint_thing(lrange(1000))) < 100\n\n def test_repr_set(self):\n assert printing.pprint_thing(set([1])) == '{1}'\n\n def test_repr_is_valid_construction_code(self):\n # for the case of Index, where the repr is traditional rather then\n # stylized\n idx = Index(['a', 'b'])\n res = eval(\"pd.\" + repr(idx))\n tm.assert_series_equal(Series(res), Series(idx))\n\n def test_repr_should_return_str(self):\n # https://docs.python.org/3/reference/datamodel.html#object.__repr__\n # \"...The return value must be a string object.\"\n\n # (str on py2.x, str (unicode) on py3)\n\n data = [8, 5, 3, 5]\n index1 = [u(\"\\u03c3\"), u(\"\\u03c4\"), u(\"\\u03c5\"), u(\"\\u03c6\")]\n cols = [u(\"\\u03c8\")]\n df = DataFrame(data, columns=cols, index=index1)\n assert type(df.__repr__()) == str # both py2 / 3\n\n def test_repr_no_backslash(self):\n with option_context('mode.sim_interactive', True):\n df = DataFrame(np.random.randn(10, 4))\n assert '\\\\' not in repr(df)\n\n def test_expand_frame_repr(self):\n df_small = DataFrame('hello', [0], [0])\n df_wide = DataFrame('hello', [0], lrange(10))\n df_tall = DataFrame('hello', lrange(30), lrange(5))\n\n with option_context('mode.sim_interactive', True):\n with option_context('display.max_columns', 10, 'display.width', 20,\n 'display.max_rows', 20,\n 'display.show_dimensions', True):\n with option_context('display.expand_frame_repr', True):\n assert not has_truncated_repr(df_small)\n assert not has_expanded_repr(df_small)\n assert not has_truncated_repr(df_wide)\n assert has_expanded_repr(df_wide)\n assert has_vertically_truncated_repr(df_tall)\n assert has_expanded_repr(df_tall)\n\n with option_context('display.expand_frame_repr', False):\n assert not has_truncated_repr(df_small)\n assert not has_expanded_repr(df_small)\n assert not has_horizontally_truncated_repr(df_wide)\n assert not has_expanded_repr(df_wide)\n assert has_vertically_truncated_repr(df_tall)\n assert not has_expanded_repr(df_tall)\n\n def test_repr_non_interactive(self):\n # in non interactive mode, there can be no dependency on the\n # result of terminal auto size detection\n df = DataFrame('hello', lrange(1000), lrange(5))\n\n with option_context('mode.sim_interactive', False, 'display.width', 0,\n 'display.max_rows', 5000):\n assert not has_truncated_repr(df)\n assert not has_expanded_repr(df)\n\n def test_repr_max_columns_max_rows(self):\n term_width, term_height = get_terminal_size()\n if term_width < 10 or term_height < 10:\n pytest.skip(\"terminal size too small, \"\n \"{0} x {1}\".format(term_width, term_height))\n\n def mkframe(n):\n index = ['{i:05d}'.format(i=i) for i in range(n)]\n return DataFrame(0, index, index)\n\n df6 = mkframe(6)\n df10 = mkframe(10)\n with option_context('mode.sim_interactive', True):\n with option_context('display.width', term_width * 2):\n with option_context('display.max_rows', 5,\n 'display.max_columns', 5):\n assert not has_expanded_repr(mkframe(4))\n assert not has_expanded_repr(mkframe(5))\n assert not has_expanded_repr(df6)\n assert has_doubly_truncated_repr(df6)\n\n with option_context('display.max_rows', 20,\n 'display.max_columns', 10):\n # Out off max_columns boundary, but no extending\n # since not exceeding width\n assert not has_expanded_repr(df6)\n assert not has_truncated_repr(df6)\n\n with option_context('display.max_rows', 9,\n 'display.max_columns', 10):\n # out vertical bounds can not result in exanded repr\n assert not has_expanded_repr(df10)\n assert has_vertically_truncated_repr(df10)\n\n # width=None in terminal, auto detection\n with option_context('display.max_columns', 100, 'display.max_rows',\n term_width * 20, 'display.width', None):\n df = mkframe((term_width // 7) - 2)\n assert not has_expanded_repr(df)\n df = mkframe((term_width // 7) + 2)\n printing.pprint_thing(df._repr_fits_horizontal_())\n assert has_expanded_repr(df)\n\n def test_str_max_colwidth(self):\n # GH 7856\n df = pd.DataFrame([{'a': 'foo',\n 'b': 'bar',\n 'c': 'uncomfortably long line with lots of stuff',\n 'd': 1}, {'a': 'foo',\n 'b': 'bar',\n 'c': 'stuff',\n 'd': 1}])\n df.set_index(['a', 'b', 'c'])\n assert str(df) == (\n ' a b c d\\n'\n '0 foo bar uncomfortably long line with lots of stuff 1\\n'\n '1 foo bar stuff 1')\n with option_context('max_colwidth', 20):\n assert str(df) == (' a b c d\\n'\n '0 foo bar uncomfortably lo... 1\\n'\n '1 foo bar stuff 1')\n\n def test_auto_detect(self):\n term_width, term_height = get_terminal_size()\n fac = 1.05 # Arbitrary large factor to exceed term width\n cols = range(int(term_width * fac))\n index = range(10)\n df = DataFrame(index=index, columns=cols)\n with option_context('mode.sim_interactive', True):\n with option_context('max_rows', None):\n with option_context('max_columns', None):\n # Wrap around with None\n assert has_expanded_repr(df)\n with option_context('max_rows', 0):\n with option_context('max_columns', 0):\n # Truncate with auto detection.\n assert has_horizontally_truncated_repr(df)\n\n index = range(int(term_height * fac))\n df = DataFrame(index=index, columns=cols)\n with option_context('max_rows', 0):\n with option_context('max_columns', None):\n # Wrap around with None\n assert has_expanded_repr(df)\n # Truncate vertically\n assert has_vertically_truncated_repr(df)\n\n with option_context('max_rows', None):\n with option_context('max_columns', 0):\n assert has_horizontally_truncated_repr(df)\n\n def test_to_string_repr_unicode(self):\n buf = StringIO()\n\n unicode_values = [u('\\u03c3')] * 10\n unicode_values = np.array(unicode_values, dtype=object)\n df = DataFrame({'unicode': unicode_values})\n df.to_string(col_space=10, buf=buf)\n\n # it works!\n repr(df)\n\n idx = Index(['abc', u('\\u03c3a'), 'aegdvg'])\n ser = Series(np.random.randn(len(idx)), idx)\n rs = repr(ser).split('\\n')\n line_len = len(rs[0])\n for line in rs[1:]:\n try:\n line = line.decode(get_option(\"display.encoding\"))\n except:\n pass\n if not line.startswith('dtype:'):\n assert len(line) == line_len\n\n # it works even if sys.stdin in None\n _stdin = sys.stdin\n try:\n sys.stdin = None\n repr(df)\n finally:\n sys.stdin = _stdin\n\n def test_to_string_unicode_columns(self):\n df = DataFrame({u('\\u03c3'): np.arange(10.)})\n\n buf = StringIO()\n df.to_string(buf=buf)\n buf.getvalue()\n\n buf = StringIO()\n df.info(buf=buf)\n buf.getvalue()\n\n result = self.frame.to_string()\n assert isinstance(result, compat.text_type)\n\n def test_to_string_utf8_columns(self):\n n = u(\"\\u05d0\").encode('utf-8')\n\n with option_context('display.max_rows', 1):\n df = DataFrame([1, 2], columns=[n])\n repr(df)\n\n def test_to_string_unicode_two(self):\n dm = DataFrame({u('c/\\u03c3'): []})\n buf = StringIO()\n dm.to_string(buf)\n\n def test_to_string_unicode_three(self):\n dm = DataFrame(['\\xc2'])\n buf = StringIO()\n dm.to_string(buf)\n\n def test_to_string_with_formatters(self):\n df = DataFrame({'int': [1, 2, 3],\n 'float': [1.0, 2.0, 3.0],\n 'object': [(1, 2), True, False]},\n columns=['int', 'float', 'object'])\n\n formatters = [('int', lambda x: '0x{x:x}'.format(x=x)),\n ('float', lambda x: '[{x: 4.1f}]'.format(x=x)),\n ('object', lambda x: '-{x!s}-'.format(x=x))]\n result = df.to_string(formatters=dict(formatters))\n result2 = df.to_string(formatters=lzip(*formatters)[1])\n assert result == (' int float object\\n'\n '0 0x1 [ 1.0] -(1, 2)-\\n'\n '1 0x2 [ 2.0] -True-\\n'\n '2 0x3 [ 3.0] -False-')\n assert result == result2\n\n def test_to_string_with_datetime64_monthformatter(self):\n months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]\n x = DataFrame({'months': months})\n\n def format_func(x):\n return x.strftime('%Y-%m')\n result = x.to_string(formatters={'months': format_func})\n expected = 'months\\n0 2016-01\\n1 2016-02'\n assert result.strip() == expected\n\n def test_to_string_with_datetime64_hourformatter(self):\n\n x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],\n format='%H:%M:%S.%f')})\n\n def format_func(x):\n return x.strftime('%H:%M')\n\n result = x.to_string(formatters={'hod': format_func})\n expected = 'hod\\n0 10:10\\n1 12:12'\n assert result.strip() == expected\n\n def test_to_string_with_formatters_unicode(self):\n df = DataFrame({u('c/\\u03c3'): [1, 2, 3]})\n result = df.to_string(\n formatters={u('c/\\u03c3'): lambda x: '{x}'.format(x=x)})\n assert result == u(' c/\\u03c3\\n') + '0 1\\n1 2\\n2 3'\n\n def test_east_asian_unicode_frame(self):\n if PY3:\n _rep = repr\n else:\n _rep = unicode # noqa\n\n # not alighned properly because of east asian width\n\n # mid col\n df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],\n 'b': [1, 222, 33333, 4]},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\na あ 1\\n\"\n u\"bb いいい 222\\nc う 33333\\n\"\n u\"ddd ええええええ 4\")\n assert _rep(df) == expected\n\n # last col\n df = DataFrame({'a': [1, 222, 33333, 4],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\na 1 あ\\n\"\n u\"bb 222 いいい\\nc 33333 う\\n\"\n u\"ddd 4 ええええええ\")\n assert _rep(df) == expected\n\n # all col\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\na あああああ あ\\n\"\n u\"bb い いいい\\nc う う\\n\"\n u\"ddd えええ ええええええ\")\n assert _rep(df) == expected\n\n # column name\n df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],\n u'あああああ': [1, 222, 33333, 4]},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" b あああああ\\na あ 1\\n\"\n u\"bb いいい 222\\nc う 33333\\n\"\n u\"ddd ええええええ 4\")\n assert _rep(df) == expected\n\n # index\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=[u'あああ', u'いいいいいい', u'うう', u'え'])\n expected = (u\" a b\\nあああ あああああ あ\\n\"\n u\"いいいいいい い いいい\\nうう う う\\n\"\n u\"え えええ ええええええ\")\n assert _rep(df) == expected\n\n # index name\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=pd.Index([u'あ', u'い', u'うう', u'え'],\n name=u'おおおお'))\n expected = (u\" a b\\n\"\n u\"おおおお \\n\"\n u\"あ あああああ あ\\n\"\n u\"い い いいい\\n\"\n u\"うう う う\\n\"\n u\"え えええ ええええええ\")\n assert _rep(df) == expected\n\n # all\n df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],\n u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},\n index=pd.Index([u'あ', u'いいい', u'うう', u'え'],\n name=u'お'))\n expected = (u\" あああ いいいいい\\n\"\n u\"お \\n\"\n u\"あ あああ あ\\n\"\n u\"いいい い いいい\\n\"\n u\"うう う う\\n\"\n u\"え えええええ ええ\")\n assert _rep(df) == expected\n\n # MultiIndex\n idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (\n u'おおお', u'かかかか'), (u'き', u'くく')])\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=idx)\n expected = (u\" a b\\n\"\n u\"あ いい あああああ あ\\n\"\n u\"う え い いいい\\n\"\n u\"おおお かかかか う う\\n\"\n u\"き くく えええ ええええええ\")\n assert _rep(df) == expected\n\n # truncate\n with option_context('display.max_rows', 3, 'display.max_columns', 3):\n df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ'],\n 'c': [u'お', u'か', u'ききき', u'くくくくくく'],\n u'ああああ': [u'さ', u'し', u'す', u'せ']},\n columns=['a', 'b', 'c', u'ああああ'])\n\n expected = (u\" a ... ああああ\\n0 あああああ ... さ\\n\"\n u\".. ... ... ...\\n3 えええ ... せ\\n\"\n u\"\\n[4 rows x 4 columns]\")\n assert _rep(df) == expected\n\n df.index = [u'あああ', u'いいいい', u'う', 'aaa']\n expected = (u\" a ... ああああ\\nあああ あああああ ... さ\\n\"\n u\".. ... ... ...\\naaa えええ ... せ\\n\"\n u\"\\n[4 rows x 4 columns]\")\n assert _rep(df) == expected\n\n # Emable Unicode option -----------------------------------------\n with option_context('display.unicode.east_asian_width', True):\n\n # mid col\n df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],\n 'b': [1, 222, 33333, 4]},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\na あ 1\\n\"\n u\"bb いいい 222\\nc う 33333\\n\"\n u\"ddd ええええええ 4\")\n assert _rep(df) == expected\n\n # last col\n df = DataFrame({'a': [1, 222, 33333, 4],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\na 1 あ\\n\"\n u\"bb 222 いいい\\nc 33333 う\\n\"\n u\"ddd 4 ええええええ\")\n assert _rep(df) == expected\n\n # all col\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" a b\\n\"\n u\"a あああああ あ\\n\"\n u\"bb い いいい\\n\"\n u\"c う う\\n\"\n u\"ddd えええ ええええええ\")\n assert _rep(df) == expected\n\n # column name\n df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],\n u'あああああ': [1, 222, 33333, 4]},\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\" b あああああ\\n\"\n u\"a あ 1\\n\"\n u\"bb いいい 222\\n\"\n u\"c う 33333\\n\"\n u\"ddd ええええええ 4\")\n assert _rep(df) == expected\n\n # index\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=[u'あああ', u'いいいいいい', u'うう', u'え'])\n expected = (u\" a b\\n\"\n u\"あああ あああああ あ\\n\"\n u\"いいいいいい い いいい\\n\"\n u\"うう う う\\n\"\n u\"え えええ ええええええ\")\n assert _rep(df) == expected\n\n # index name\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=pd.Index([u'あ', u'い', u'うう', u'え'],\n name=u'おおおお'))\n expected = (u\" a b\\n\"\n u\"おおおお \\n\"\n u\"あ あああああ あ\\n\"\n u\"い い いいい\\n\"\n u\"うう う う\\n\"\n u\"え えええ ええええええ\")\n assert _rep(df) == expected\n\n # all\n df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],\n u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},\n index=pd.Index([u'あ', u'いいい', u'うう', u'え'],\n name=u'お'))\n expected = (u\" あああ いいいいい\\n\"\n u\"お \\n\"\n u\"あ あああ あ\\n\"\n u\"いいい い いいい\\n\"\n u\"うう う う\\n\"\n u\"え えええええ ええ\")\n assert _rep(df) == expected\n\n # MultiIndex\n idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (\n u'おおお', u'かかかか'), (u'き', u'くく')])\n df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ']},\n index=idx)\n expected = (u\" a b\\n\"\n u\"あ いい あああああ あ\\n\"\n u\"う え い いいい\\n\"\n u\"おおお かかかか う う\\n\"\n u\"き くく えええ ええええええ\")\n assert _rep(df) == expected\n\n # truncate\n with option_context('display.max_rows', 3, 'display.max_columns',\n 3):\n\n df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],\n 'b': [u'あ', u'いいい', u'う', u'ええええええ'],\n 'c': [u'お', u'か', u'ききき', u'くくくくくく'],\n u'ああああ': [u'さ', u'し', u'す', u'せ']},\n columns=['a', 'b', 'c', u'ああああ'])\n\n expected = (u\" a ... ああああ\\n\"\n u\"0 あああああ ... さ\\n\"\n u\".. ... ... ...\\n\"\n u\"3 えええ ... せ\\n\"\n u\"\\n[4 rows x 4 columns]\")\n assert _rep(df) == expected\n\n df.index = [u'あああ', u'いいいい', u'う', 'aaa']\n expected = (u\" a ... ああああ\\n\"\n u\"あああ あああああ ... さ\\n\"\n u\"... ... ... ...\\n\"\n u\"aaa えええ ... せ\\n\"\n u\"\\n[4 rows x 4 columns]\")\n assert _rep(df) == expected\n\n # ambiguous unicode\n df = DataFrame({'b': [u'あ', u'いいい', u'¡¡', u'ええええええ'],\n u'あああああ': [1, 222, 33333, 4]},\n index=['a', 'bb', 'c', '¡¡¡'])\n expected = (u\" b あああああ\\n\"\n u\"a あ 1\\n\"\n u\"bb いいい 222\\n\"\n u\"c ¡¡ 33333\\n\"\n u\"¡¡¡ ええええええ 4\")\n assert _rep(df) == expected\n\n def test_to_string_buffer_all_unicode(self):\n buf = StringIO()\n\n empty = DataFrame({u('c/\\u03c3'): Series()})\n nonempty = DataFrame({u('c/\\u03c3'): Series([1, 2, 3])})\n\n print(empty, file=buf)\n print(nonempty, file=buf)\n\n # this should work\n buf.getvalue()\n\n def test_to_string_with_col_space(self):\n df = DataFrame(np.random.random(size=(1, 3)))\n c10 = len(df.to_string(col_space=10).split(\"\\n\")[1])\n c20 = len(df.to_string(col_space=20).split(\"\\n\")[1])\n c30 = len(df.to_string(col_space=30).split(\"\\n\")[1])\n assert c10 < c20 < c30\n\n # GH 8230\n # col_space wasn't being applied with header=False\n with_header = df.to_string(col_space=20)\n with_header_row1 = with_header.splitlines()[1]\n no_header = df.to_string(col_space=20, header=False)\n assert len(with_header_row1) == len(no_header)\n\n def test_to_string_truncate_indices(self):\n for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,\n tm.makeDateIndex, tm.makePeriodIndex]:\n for column in [tm.makeStringIndex]:\n for h in [10, 20]:\n for w in [10, 20]:\n with option_context(\"display.expand_frame_repr\",\n False):\n df = DataFrame(index=index(h), columns=column(w))\n with option_context(\"display.max_rows\", 15):\n if h == 20:\n assert has_vertically_truncated_repr(df)\n else:\n assert not has_vertically_truncated_repr(\n df)\n with option_context(\"display.max_columns\", 15):\n if w == 20:\n assert has_horizontally_truncated_repr(df)\n else:\n assert not (\n has_horizontally_truncated_repr(df))\n with option_context(\"display.max_rows\", 15,\n \"display.max_columns\", 15):\n if h == 20 and w == 20:\n assert has_doubly_truncated_repr(df)\n else:\n assert not has_doubly_truncated_repr(\n df)\n\n def test_to_string_truncate_multilevel(self):\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n df = DataFrame(index=arrays, columns=arrays)\n with option_context(\"display.max_rows\", 7, \"display.max_columns\", 7):\n assert has_doubly_truncated_repr(df)\n\n def test_truncate_with_different_dtypes(self):\n\n # 11594, 12045\n # when truncated the dtypes of the splits can differ\n\n # 11594\n import datetime\n s = Series([datetime.datetime(2012, 1, 1)] * 10 +\n [datetime.datetime(1012, 1, 2)] + [\n datetime.datetime(2012, 1, 3)] * 10)\n\n with pd.option_context('display.max_rows', 8):\n result = str(s)\n assert 'object' in result\n\n # 12045\n df = DataFrame({'text': ['some words'] + [None] * 9})\n\n with pd.option_context('display.max_rows', 8,\n 'display.max_columns', 3):\n result = str(df)\n assert 'None' in result\n assert 'NaN' not in result\n\n def test_datetimelike_frame(self):\n\n # GH 12211\n df = DataFrame(\n {'date': [pd.Timestamp('20130101').tz_localize('UTC')] +\n [pd.NaT] * 5})\n\n with option_context(\"display.max_rows\", 5):\n result = str(df)\n assert '2013-01-01 00:00:00+00:00' in result\n assert 'NaT' in result\n assert '...' in result\n assert '[6 rows x 1 columns]' in result\n\n dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5\n df = pd.DataFrame({\"dt\": dts,\n \"x\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})\n with option_context('display.max_rows', 5):\n expected = (' dt x\\n'\n '0 2011-01-01 00:00:00-05:00 1\\n'\n '1 2011-01-01 00:00:00-05:00 2\\n'\n '.. ... ..\\n'\n '8 NaT 9\\n'\n '9 NaT 10\\n\\n'\n '[10 rows x 2 columns]')\n assert repr(df) == expected\n\n dts = [pd.NaT] * 5 + [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5\n df = pd.DataFrame({\"dt\": dts,\n \"x\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})\n with option_context('display.max_rows', 5):\n expected = (' dt x\\n'\n '0 NaT 1\\n'\n '1 NaT 2\\n'\n '.. ... ..\\n'\n '8 2011-01-01 00:00:00-05:00 9\\n'\n '9 2011-01-01 00:00:00-05:00 10\\n\\n'\n '[10 rows x 2 columns]')\n assert repr(df) == expected\n\n dts = ([pd.Timestamp('2011-01-01', tz='Asia/Tokyo')] * 5 +\n [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5)\n df = pd.DataFrame({\"dt\": dts,\n \"x\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})\n with option_context('display.max_rows', 5):\n expected = (' dt x\\n'\n '0 2011-01-01 00:00:00+09:00 1\\n'\n '1 2011-01-01 00:00:00+09:00 2\\n'\n '.. ... ..\\n'\n '8 2011-01-01 00:00:00-05:00 9\\n'\n '9 2011-01-01 00:00:00-05:00 10\\n\\n'\n '[10 rows x 2 columns]')\n assert repr(df) == expected\n\n @pytest.mark.parametrize('start_date', [\n '2017-01-01 23:59:59.999999999',\n '2017-01-01 23:59:59.99999999',\n '2017-01-01 23:59:59.9999999',\n '2017-01-01 23:59:59.999999',\n '2017-01-01 23:59:59.99999',\n '2017-01-01 23:59:59.9999',\n ])\n def test_datetimeindex_highprecision(self, start_date):\n # GH19030\n # Check that high-precision time values for the end of day are\n # included in repr for DatetimeIndex\n df = DataFrame({'A': date_range(start=start_date,\n freq='D', periods=5)})\n result = str(df)\n assert start_date in result\n\n dti = date_range(start=start_date,\n freq='D', periods=5)\n df = DataFrame({'A': range(5)}, index=dti)\n result = str(df.index)\n assert start_date in result\n\n def test_nonunicode_nonascii_alignment(self):\n df = DataFrame([[\"aa\\xc3\\xa4\\xc3\\xa4\", 1], [\"bbbb\", 2]])\n rep_str = df.to_string()\n lines = rep_str.split('\\n')\n assert len(lines[1]) == len(lines[2])\n\n def test_unicode_problem_decoding_as_ascii(self):\n dm = DataFrame({u('c/\\u03c3'): Series({'test': np.nan})})\n compat.text_type(dm.to_string())\n\n def test_string_repr_encoding(self):\n filepath = tm.get_data_path('unicode_series.csv')\n df = pd.read_csv(filepath, header=None, encoding='latin1')\n repr(df)\n repr(df[1])\n\n def test_repr_corner(self):\n # representing infs poses no problems\n df = DataFrame({'foo': [-np.inf, np.inf]})\n repr(df)\n\n def test_frame_info_encoding(self):\n index = ['\\'Til There Was You (1997)',\n 'ldum klaka (Cold Fever) (1994)']\n fmt.set_option('display.max_rows', 1)\n df = DataFrame(columns=['a', 'b', 'c'], index=index)\n repr(df)\n repr(df.T)\n fmt.set_option('display.max_rows', 200)\n\n def test_pprint_thing(self):\n from pandas.io.formats.printing import pprint_thing as pp_t\n\n if PY3:\n pytest.skip(\"doesn't work on Python 3\")\n\n assert pp_t('a') == u('a')\n assert pp_t(u('a')) == u('a')\n assert pp_t(None) == 'None'\n assert pp_t(u('\\u05d0'), quote_strings=True) == u(\"u'\\u05d0'\")\n assert pp_t(u('\\u05d0'), quote_strings=False) == u('\\u05d0')\n assert (pp_t((u('\\u05d0'), u('\\u05d1')), quote_strings=True) ==\n u(\"(u'\\u05d0', u'\\u05d1')\"))\n assert (pp_t((u('\\u05d0'), (u('\\u05d1'), u('\\u05d2'))),\n quote_strings=True) == u(\"(u'\\u05d0', \"\n \"(u'\\u05d1', u'\\u05d2'))\"))\n assert (pp_t(('foo', u('\\u05d0'), (u('\\u05d0'), u('\\u05d0'))),\n quote_strings=True) == u(\"(u'foo', u'\\u05d0', \"\n \"(u'\\u05d0', u'\\u05d0'))\"))\n\n # gh-2038: escape embedded tabs in string\n assert \"\\t\" not in pp_t(\"a\\tb\", escape_chars=(\"\\t\", ))\n\n def test_wide_repr(self):\n with option_context('mode.sim_interactive', True,\n 'display.show_dimensions', True):\n max_cols = get_option('display.max_columns')\n df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))\n set_option('display.expand_frame_repr', False)\n rep_str = repr(df)\n\n assert \"10 rows x {c} columns\".format(c=max_cols - 1) in rep_str\n set_option('display.expand_frame_repr', True)\n wide_repr = repr(df)\n assert rep_str != wide_repr\n\n with option_context('display.width', 120):\n wider_repr = repr(df)\n assert len(wider_repr) < len(wide_repr)\n\n reset_option('display.expand_frame_repr')\n\n def test_wide_repr_wide_columns(self):\n with option_context('mode.sim_interactive', True):\n df = DataFrame(np.random.randn(5, 3),\n columns=['a' * 90, 'b' * 90, 'c' * 90])\n rep_str = repr(df)\n\n assert len(rep_str.splitlines()) == 20\n\n def test_wide_repr_named(self):\n with option_context('mode.sim_interactive', True):\n max_cols = get_option('display.max_columns')\n df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))\n df.index.name = 'DataFrame Index'\n set_option('display.expand_frame_repr', False)\n\n rep_str = repr(df)\n set_option('display.expand_frame_repr', True)\n wide_repr = repr(df)\n assert rep_str != wide_repr\n\n with option_context('display.width', 150):\n wider_repr = repr(df)\n assert len(wider_repr) < len(wide_repr)\n\n for line in wide_repr.splitlines()[1::13]:\n assert 'DataFrame Index' in line\n\n reset_option('display.expand_frame_repr')\n\n def test_wide_repr_multiindex(self):\n with option_context('mode.sim_interactive', True):\n midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))\n max_cols = get_option('display.max_columns')\n df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),\n index=midx)\n df.index.names = ['Level 0', 'Level 1']\n set_option('display.expand_frame_repr', False)\n rep_str = repr(df)\n set_option('display.expand_frame_repr', True)\n wide_repr = repr(df)\n assert rep_str != wide_repr\n\n with option_context('display.width', 150):\n wider_repr = repr(df)\n assert len(wider_repr) < len(wide_repr)\n\n for line in wide_repr.splitlines()[1::13]:\n assert 'Level 0 Level 1' in line\n\n reset_option('display.expand_frame_repr')\n\n def test_wide_repr_multiindex_cols(self):\n with option_context('mode.sim_interactive', True):\n max_cols = get_option('display.max_columns')\n midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))\n mcols = MultiIndex.from_arrays(\n tm.rands_array(3, size=(2, max_cols - 1)))\n df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),\n index=midx, columns=mcols)\n df.index.names = ['Level 0', 'Level 1']\n set_option('display.expand_frame_repr', False)\n rep_str = repr(df)\n set_option('display.expand_frame_repr', True)\n wide_repr = repr(df)\n assert rep_str != wide_repr\n\n with option_context('display.width', 150):\n wider_repr = repr(df)\n assert len(wider_repr) < len(wide_repr)\n\n reset_option('display.expand_frame_repr')\n\n def test_wide_repr_unicode(self):\n with option_context('mode.sim_interactive', True):\n max_cols = get_option('display.max_columns')\n df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))\n set_option('display.expand_frame_repr', False)\n rep_str = repr(df)\n set_option('display.expand_frame_repr', True)\n wide_repr = repr(df)\n assert rep_str != wide_repr\n\n with option_context('display.width', 150):\n wider_repr = repr(df)\n assert len(wider_repr) < len(wide_repr)\n\n reset_option('display.expand_frame_repr')\n\n def test_wide_repr_wide_long_columns(self):\n with option_context('mode.sim_interactive', True):\n df = DataFrame({'a': ['a' * 30, 'b' * 30],\n 'b': ['c' * 70, 'd' * 80]})\n\n result = repr(df)\n assert 'ccccc' in result\n assert 'ddddd' in result\n\n def test_long_series(self):\n n = 1000\n s = Series(\n np.random.randint(-50, 50, n),\n index=['s{x:04d}'.format(x=x) for x in range(n)], dtype='int64')\n\n import re\n str_rep = str(s)\n nmatches = len(re.findall('dtype', str_rep))\n assert nmatches == 1\n\n def test_index_with_nan(self):\n # GH 2850\n df = DataFrame({'id1': {0: '1a3',\n 1: '9h4'},\n 'id2': {0: np.nan,\n 1: 'd67'},\n 'id3': {0: '78d',\n 1: '79d'},\n 'value': {0: 123,\n 1: 64}})\n\n # multi-index\n y = df.set_index(['id1', 'id2', 'id3'])\n result = y.to_string()\n expected = u(\n ' value\\nid1 id2 id3 \\n'\n '1a3 NaN 78d 123\\n9h4 d67 79d 64')\n assert result == expected\n\n # index\n y = df.set_index('id2')\n result = y.to_string()\n expected = u(\n ' id1 id3 value\\nid2 \\n'\n 'NaN 1a3 78d 123\\nd67 9h4 79d 64')\n assert result == expected\n\n # with append (this failed in 0.12)\n y = df.set_index(['id1', 'id2']).set_index('id3', append=True)\n result = y.to_string()\n expected = u(\n ' value\\nid1 id2 id3 \\n'\n '1a3 NaN 78d 123\\n9h4 d67 79d 64')\n assert result == expected\n\n # all-nan in mi\n df2 = df.copy()\n df2.loc[:, 'id2'] = np.nan\n y = df2.set_index('id2')\n result = y.to_string()\n expected = u(\n ' id1 id3 value\\nid2 \\n'\n 'NaN 1a3 78d 123\\nNaN 9h4 79d 64')\n assert result == expected\n\n # partial nan in mi\n df2 = df.copy()\n df2.loc[:, 'id2'] = np.nan\n y = df2.set_index(['id2', 'id3'])\n result = y.to_string()\n expected = u(\n ' id1 value\\nid2 id3 \\n'\n 'NaN 78d 1a3 123\\n 79d 9h4 64')\n assert result == expected\n\n df = DataFrame({'id1': {0: np.nan,\n 1: '9h4'},\n 'id2': {0: np.nan,\n 1: 'd67'},\n 'id3': {0: np.nan,\n 1: '79d'},\n 'value': {0: 123,\n 1: 64}})\n\n y = df.set_index(['id1', 'id2', 'id3'])\n result = y.to_string()\n expected = u(\n ' value\\nid1 id2 id3 \\n'\n 'NaN NaN NaN 123\\n9h4 d67 79d 64')\n assert result == expected\n\n def test_to_string(self):\n\n # big mixed\n biggie = DataFrame({'A': np.random.randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n\n biggie.loc[:20, 'A'] = np.nan\n biggie.loc[:20, 'B'] = np.nan\n s = biggie.to_string()\n\n buf = StringIO()\n retval = biggie.to_string(buf=buf)\n assert retval is None\n assert buf.getvalue() == s\n\n assert isinstance(s, compat.string_types)\n\n # print in right order\n result = biggie.to_string(columns=['B', 'A'], col_space=17,\n float_format='%.5f'.__mod__)\n lines = result.split('\\n')\n header = lines[0].strip().split()\n joined = '\\n'.join(re.sub(r'\\s+', ' ', x).strip() for x in lines[1:])\n recons = read_table(StringIO(joined), names=header,\n header=None, sep=' ')\n tm.assert_series_equal(recons['B'], biggie['B'])\n assert recons['A'].count() == biggie['A'].count()\n assert (np.abs(recons['A'].dropna() -\n biggie['A'].dropna()) < 0.1).all()\n\n # expected = ['B', 'A']\n # assert header == expected\n\n result = biggie.to_string(columns=['A'], col_space=17)\n header = result.split('\\n')[0].strip().split()\n expected = ['A']\n assert header == expected\n\n biggie.to_string(columns=['B', 'A'],\n formatters={'A': lambda x: '{x:.1f}'.format(x=x)})\n\n biggie.to_string(columns=['B', 'A'], float_format=str)\n biggie.to_string(columns=['B', 'A'], col_space=12, float_format=str)\n\n frame = DataFrame(index=np.arange(200))\n frame.to_string()\n\n def test_to_string_no_header(self):\n df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n df_s = df.to_string(header=False)\n expected = \"0 1 4\\n1 2 5\\n2 3 6\"\n\n assert df_s == expected\n\n def test_to_string_specified_header(self):\n df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n df_s = df.to_string(header=['X', 'Y'])\n expected = ' X Y\\n0 1 4\\n1 2 5\\n2 3 6'\n\n assert df_s == expected\n\n with pytest.raises(ValueError):\n df.to_string(header=['X'])\n\n def test_to_string_no_index(self):\n df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n df_s = df.to_string(index=False)\n expected = \"x y\\n1 4\\n2 5\\n3 6\"\n\n assert df_s == expected\n\n def test_to_string_line_width_no_index(self):\n df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n df_s = df.to_string(line_width=1, index=False)\n expected = \"x \\\\\\n1 \\n2 \\n3 \\n\\ny \\n4 \\n5 \\n6\"\n\n assert df_s == expected\n\n def test_to_string_float_formatting(self):\n tm.reset_display_options()\n fmt.set_option('display.precision', 5, 'display.column_space', 12,\n 'display.notebook_repr_html', False)\n\n df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6, 1.7e+8,\n 1.253456, np.pi, -1e6]})\n\n df_s = df.to_string()\n\n # Python 2.5 just wants me to be sad. And debian 32-bit\n # sys.version_info[0] == 2 and sys.version_info[1] < 6:\n if _three_digit_exp():\n expected = (' x\\n0 0.00000e+000\\n1 2.50000e-001\\n'\n '2 3.45600e+003\\n3 1.20000e+046\\n4 1.64000e+006\\n'\n '5 1.70000e+008\\n6 1.25346e+000\\n7 3.14159e+000\\n'\n '8 -1.00000e+006')\n else:\n expected = (' x\\n0 0.00000e+00\\n1 2.50000e-01\\n'\n '2 3.45600e+03\\n3 1.20000e+46\\n4 1.64000e+06\\n'\n '5 1.70000e+08\\n6 1.25346e+00\\n7 3.14159e+00\\n'\n '8 -1.00000e+06')\n assert df_s == expected\n\n df = DataFrame({'x': [3234, 0.253]})\n df_s = df.to_string()\n\n expected = (' x\\n' '0 3234.000\\n' '1 0.253')\n assert df_s == expected\n\n tm.reset_display_options()\n assert get_option(\"display.precision\") == 6\n\n df = DataFrame({'x': [1e9, 0.2512]})\n df_s = df.to_string()\n # Python 2.5 just wants me to be sad. And debian 32-bit\n # sys.version_info[0] == 2 and sys.version_info[1] < 6:\n if _three_digit_exp():\n expected = (' x\\n'\n '0 1.000000e+009\\n'\n '1 2.512000e-001')\n else:\n expected = (' x\\n'\n '0 1.000000e+09\\n'\n '1 2.512000e-01')\n assert df_s == expected\n\n def test_to_string_small_float_values(self):\n df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})\n\n result = df.to_string()\n # sadness per above\n if '{x:.4g}'.format(x=1.7e8) == '1.7e+008':\n expected = (' a\\n'\n '0 1.500000e+000\\n'\n '1 1.000000e-017\\n'\n '2 -5.500000e-007')\n else:\n expected = (' a\\n'\n '0 1.500000e+00\\n'\n '1 1.000000e-17\\n'\n '2 -5.500000e-07')\n assert result == expected\n\n # but not all exactly zero\n df = df * 0\n result = df.to_string()\n expected = (' 0\\n' '0 0\\n' '1 0\\n' '2 -0')\n\n def test_to_string_float_index(self):\n index = Index([1.5, 2, 3, 4, 5])\n df = DataFrame(lrange(5), index=index)\n\n result = df.to_string()\n expected = (' 0\\n'\n '1.5 0\\n'\n '2.0 1\\n'\n '3.0 2\\n'\n '4.0 3\\n'\n '5.0 4')\n assert result == expected\n\n def test_to_string_ascii_error(self):\n data = [('0 ', u(' .gitignore '), u(' 5 '),\n ' \\xe2\\x80\\xa2\\xe2\\x80\\xa2\\xe2\\x80'\n '\\xa2\\xe2\\x80\\xa2\\xe2\\x80\\xa2')]\n df = DataFrame(data)\n\n # it works!\n repr(df)\n\n def test_to_string_int_formatting(self):\n df = DataFrame({'x': [-15, 20, 25, -35]})\n assert issubclass(df['x'].dtype.type, np.integer)\n\n output = df.to_string()\n expected = (' x\\n' '0 -15\\n' '1 20\\n' '2 25\\n' '3 -35')\n assert output == expected\n\n def test_to_string_index_formatter(self):\n df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])\n\n rs = df.to_string(formatters={'__index__': lambda x: 'abc' [x]})\n\n xp = \"\"\"\\\n 0 1 2 3 4\na 0 1 2 3 4\nb 5 6 7 8 9\nc 10 11 12 13 14\\\n\"\"\"\n\n assert rs == xp\n\n def test_to_string_left_justify_cols(self):\n tm.reset_display_options()\n df = DataFrame({'x': [3234, 0.253]})\n df_s = df.to_string(justify='left')\n expected = (' x \\n' '0 3234.000\\n' '1 0.253')\n assert df_s == expected\n\n def test_to_string_format_na(self):\n tm.reset_display_options()\n df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],\n 'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})\n result = df.to_string()\n\n expected = (' A B\\n'\n '0 NaN NaN\\n'\n '1 -1.0000 foo\\n'\n '2 -2.1234 foooo\\n'\n '3 3.0000 fooooo\\n'\n '4 4.0000 bar')\n assert result == expected\n\n df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],\n 'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})\n result = df.to_string()\n\n expected = (' A B\\n'\n '0 NaN NaN\\n'\n '1 -1.0 foo\\n'\n '2 -2.0 foooo\\n'\n '3 3.0 fooooo\\n'\n '4 4.0 bar')\n assert result == expected\n\n def test_to_string_line_width(self):\n df = DataFrame(123, lrange(10, 15), lrange(30))\n s = df.to_string(line_width=80)\n assert max(len(l) for l in s.split('\\n')) == 80\n\n def test_show_dimensions(self):\n df = DataFrame(123, lrange(10, 15), lrange(30))\n\n with option_context('display.max_rows', 10, 'display.max_columns', 40,\n 'display.width', 500, 'display.expand_frame_repr',\n 'info', 'display.show_dimensions', True):\n assert '5 rows' in str(df)\n assert '5 rows' in df._repr_html_()\n with option_context('display.max_rows', 10, 'display.max_columns', 40,\n 'display.width', 500, 'display.expand_frame_repr',\n 'info', 'display.show_dimensions', False):\n assert '5 rows' not in str(df)\n assert '5 rows' not in df._repr_html_()\n with option_context('display.max_rows', 2, 'display.max_columns', 2,\n 'display.width', 500, 'display.expand_frame_repr',\n 'info', 'display.show_dimensions', 'truncate'):\n assert '5 rows' in str(df)\n assert '5 rows' in df._repr_html_()\n with option_context('display.max_rows', 10, 'display.max_columns', 40,\n 'display.width', 500, 'display.expand_frame_repr',\n 'info', 'display.show_dimensions', 'truncate'):\n assert '5 rows' not in str(df)\n assert '5 rows' not in df._repr_html_()\n\n def test_repr_html(self):\n self.frame._repr_html_()\n\n fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)\n self.frame._repr_html_()\n\n fmt.set_option('display.notebook_repr_html', False)\n self.frame._repr_html_()\n\n tm.reset_display_options()\n\n df = DataFrame([[1, 2], [3, 4]])\n fmt.set_option('display.show_dimensions', True)\n assert '2 rows' in df._repr_html_()\n fmt.set_option('display.show_dimensions', False)\n assert '2 rows' not in df._repr_html_()\n\n tm.reset_display_options()\n\n def test_repr_html_wide(self):\n max_cols = get_option('display.max_columns')\n df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))\n reg_repr = df._repr_html_()\n assert \"...\" not in reg_repr\n\n wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))\n wide_repr = wide_df._repr_html_()\n assert \"...\" in wide_repr\n\n def test_repr_html_wide_multiindex_cols(self):\n max_cols = get_option('display.max_columns')\n\n mcols = MultiIndex.from_product([np.arange(max_cols // 2),\n ['foo', 'bar']],\n names=['first', 'second'])\n df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),\n columns=mcols)\n reg_repr = df._repr_html_()\n assert '...' not in reg_repr\n\n mcols = MultiIndex.from_product((np.arange(1 + (max_cols // 2)),\n ['foo', 'bar']),\n names=['first', 'second'])\n df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),\n columns=mcols)\n wide_repr = df._repr_html_()\n assert '...' in wide_repr\n\n def test_repr_html_long(self):\n with option_context('display.max_rows', 60):\n max_rows = get_option('display.max_rows')\n h = max_rows - 1\n df = DataFrame({'A': np.arange(1, 1 + h),\n 'B': np.arange(41, 41 + h)})\n reg_repr = df._repr_html_()\n assert '..' not in reg_repr\n assert str(41 + max_rows // 2) in reg_repr\n\n h = max_rows + 1\n df = DataFrame({'A': np.arange(1, 1 + h),\n 'B': np.arange(41, 41 + h)})\n long_repr = df._repr_html_()\n assert '..' in long_repr\n assert str(41 + max_rows // 2) not in long_repr\n assert u('{h} rows ').format(h=h) in long_repr\n assert u('2 columns') in long_repr\n\n def test_repr_html_float(self):\n with option_context('display.max_rows', 60):\n\n max_rows = get_option('display.max_rows')\n h = max_rows - 1\n df = DataFrame({'idx': np.linspace(-10, 10, h),\n 'A': np.arange(1, 1 + h),\n 'B': np.arange(41, 41 + h)}).set_index('idx')\n reg_repr = df._repr_html_()\n assert '..' not in reg_repr\n assert '<td>{val}</td>'.format(val=str(40 + h)) in reg_repr\n\n h = max_rows + 1\n df = DataFrame({'idx': np.linspace(-10, 10, h),\n 'A': np.arange(1, 1 + h),\n 'B': np.arange(41, 41 + h)}).set_index('idx')\n long_repr = df._repr_html_()\n assert '..' in long_repr\n assert '<td>{val}</td>'.format(val='31') not in long_repr\n assert u('{h} rows ').format(h=h) in long_repr\n assert u('2 columns') in long_repr\n\n def test_repr_html_long_multiindex(self):\n max_rows = get_option('display.max_rows')\n max_L1 = max_rows // 2\n\n tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))\n idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])\n df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx,\n columns=['A', 'B'])\n reg_repr = df._repr_html_()\n assert '...' not in reg_repr\n\n tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar']))\n idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])\n df = DataFrame(np.random.randn((max_L1 + 1) * 2, 2), index=idx,\n columns=['A', 'B'])\n long_repr = df._repr_html_()\n assert '...' in long_repr\n\n def test_repr_html_long_and_wide(self):\n max_cols = get_option('display.max_columns')\n max_rows = get_option('display.max_rows')\n\n h, w = max_rows - 1, max_cols - 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert '...' not in df._repr_html_()\n\n h, w = max_rows + 1, max_cols + 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert '...' in df._repr_html_()\n\n def test_info_repr(self):\n max_rows = get_option('display.max_rows')\n max_cols = get_option('display.max_columns')\n # Long\n h, w = max_rows + 1, max_cols - 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert has_vertically_truncated_repr(df)\n with option_context('display.large_repr', 'info'):\n assert has_info_repr(df)\n\n # Wide\n h, w = max_rows - 1, max_cols + 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert has_horizontally_truncated_repr(df)\n with option_context('display.large_repr', 'info'):\n assert has_info_repr(df)\n\n def test_info_repr_max_cols(self):\n # GH #6939\n df = DataFrame(np.random.randn(10, 5))\n with option_context('display.large_repr', 'info',\n 'display.max_columns', 1,\n 'display.max_info_columns', 4):\n assert has_non_verbose_info_repr(df)\n\n with option_context('display.large_repr', 'info',\n 'display.max_columns', 1,\n 'display.max_info_columns', 5):\n assert not has_non_verbose_info_repr(df)\n\n # test verbose overrides\n # fmt.set_option('display.max_info_columns', 4) # exceeded\n\n def test_info_repr_html(self):\n max_rows = get_option('display.max_rows')\n max_cols = get_option('display.max_columns')\n # Long\n h, w = max_rows + 1, max_cols - 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert r'<class' not in df._repr_html_()\n with option_context('display.large_repr', 'info'):\n assert r'<class' in df._repr_html_()\n\n # Wide\n h, w = max_rows - 1, max_cols + 1\n df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})\n assert '<class' not in df._repr_html_()\n with option_context('display.large_repr', 'info'):\n assert '<class' in df._repr_html_()\n\n def test_fake_qtconsole_repr_html(self):\n def get_ipython():\n return {'config': {'KernelApp':\n {'parent_appname': 'ipython-qtconsole'}}}\n\n repstr = self.frame._repr_html_()\n assert repstr is not None\n\n fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)\n repstr = self.frame._repr_html_()\n\n assert 'class' in repstr # info fallback\n tm.reset_display_options()\n\n def test_pprint_pathological_object(self):\n \"\"\"\n If the test fails, it at least won't hang.\n \"\"\"\n\n class A:\n def __getitem__(self, key):\n return 3 # obviously simplified\n\n df = DataFrame([A()])\n repr(df) # just don't die\n\n def test_float_trim_zeros(self):\n vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,\n 2.03954217305e+10, 5.59897817305e+10]\n skip = True\n for line in repr(DataFrame({'A': vals})).split('\\n')[:-2]:\n if line.startswith('dtype:'):\n continue\n if _three_digit_exp():\n assert ('+010' in line) or skip\n else:\n assert ('+10' in line) or skip\n skip = False\n\n def test_dict_entries(self):\n df = DataFrame({'A': [{'a': 1, 'b': 2}]})\n\n val = df.to_string()\n assert \"'a': 1\" in val\n assert \"'b': 2\" in val\n\n def test_period(self):\n # GH 12615\n df = pd.DataFrame({'A': pd.period_range('2013-01',\n periods=4, freq='M'),\n 'B': [pd.Period('2011-01', freq='M'),\n pd.Period('2011-02-01', freq='D'),\n pd.Period('2011-03-01 09:00', freq='H'),\n pd.Period('2011-04', freq='M')],\n 'C': list('abcd')})\n exp = (\" A B C\\n0 2013-01 2011-01 a\\n\"\n \"1 2013-02 2011-02-01 b\\n2 2013-03 2011-03-01 09:00 c\\n\"\n \"3 2013-04 2011-04 d\")\n assert str(df) == exp\n\n\ndef gen_series_formatting():\n s1 = pd.Series(['a'] * 100)\n s2 = pd.Series(['ab'] * 100)\n s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])\n s4 = s3[::-1]\n test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}\n return test_sers\n\n\nclass TestSeriesFormatting(object):\n\n def setup_method(self, method):\n self.ts = tm.makeTimeSeries()\n\n def test_repr_unicode(self):\n s = Series([u('\\u03c3')] * 10)\n repr(s)\n\n a = Series([u(\"\\u05d0\")] * 1000)\n a.name = 'title1'\n repr(a)\n\n def test_to_string(self):\n buf = StringIO()\n\n s = self.ts.to_string()\n\n retval = self.ts.to_string(buf=buf)\n assert retval is None\n assert buf.getvalue().strip() == s\n\n # pass float_format\n format = '%.4f'.__mod__\n result = self.ts.to_string(float_format=format)\n result = [x.split()[1] for x in result.split('\\n')[:-1]]\n expected = [format(x) for x in self.ts]\n assert result == expected\n\n # empty string\n result = self.ts[:0].to_string()\n assert result == 'Series([], Freq: B)'\n\n result = self.ts[:0].to_string(length=0)\n assert result == 'Series([], Freq: B)'\n\n # name and length\n cp = self.ts.copy()\n cp.name = 'foo'\n result = cp.to_string(length=True, name=True, dtype=True)\n last_line = result.split('\\n')[-1].strip()\n assert last_line == (\"Freq: B, Name: foo, \"\n \"Length: {cp}, dtype: float64\".format(cp=len(cp)))\n\n def test_freq_name_separation(self):\n s = Series(np.random.randn(10),\n index=date_range('1/1/2000', periods=10), name=0)\n\n result = repr(s)\n assert 'Freq: D, Name: 0' in result\n\n def test_to_string_mixed(self):\n s = Series(['foo', np.nan, -1.23, 4.56])\n result = s.to_string()\n expected = (u('0 foo\\n') + u('1 NaN\\n') + u('2 -1.23\\n') +\n u('3 4.56'))\n assert result == expected\n\n # but don't count NAs as floats\n s = Series(['foo', np.nan, 'bar', 'baz'])\n result = s.to_string()\n expected = (u('0 foo\\n') + '1 NaN\\n' + '2 bar\\n' + '3 baz')\n assert result == expected\n\n s = Series(['foo', 5, 'bar', 'baz'])\n result = s.to_string()\n expected = (u('0 foo\\n') + '1 5\\n' + '2 bar\\n' + '3 baz')\n assert result == expected\n\n def test_to_string_float_na_spacing(self):\n s = Series([0., 1.5678, 2., -3., 4.])\n s[::2] = np.nan\n\n result = s.to_string()\n expected = (u('0 NaN\\n') + '1 1.5678\\n' + '2 NaN\\n' +\n '3 -3.0000\\n' + '4 NaN')\n assert result == expected\n\n def test_to_string_without_index(self):\n # GH 11729 Test index=False option\n s = Series([1, 2, 3, 4])\n result = s.to_string(index=False)\n expected = (u('1\\n') + '2\\n' + '3\\n' + '4')\n assert result == expected\n\n def test_unicode_name_in_footer(self):\n s = Series([1, 2], name=u('\\u05e2\\u05d1\\u05e8\\u05d9\\u05ea'))\n sf = fmt.SeriesFormatter(s, name=u('\\u05e2\\u05d1\\u05e8\\u05d9\\u05ea'))\n sf._get_footer() # should not raise exception\n\n def test_east_asian_unicode_series(self):\n if PY3:\n _rep = repr\n else:\n _rep = unicode # noqa\n # not aligned properly because of east asian width\n\n # unicode index\n s = Series(['a', 'bb', 'CCC', 'D'],\n index=[u'あ', u'いい', u'ううう', u'ええええ'])\n expected = (u\"あ a\\nいい bb\\nううう CCC\\n\"\n u\"ええええ D\\ndtype: object\")\n assert _rep(s) == expected\n\n # unicode values\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\"a あ\\nbb いい\\nc ううう\\n\"\n u\"ddd ええええ\\ndtype: object\")\n assert _rep(s) == expected\n\n # both\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=[u'ああ', u'いいいい', u'う', u'えええ'])\n expected = (u\"ああ あ\\nいいいい いい\\nう ううう\\n\"\n u\"えええ ええええ\\ndtype: object\")\n assert _rep(s) == expected\n\n # unicode footer\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=[u'ああ', u'いいいい', u'う', u'えええ'],\n name=u'おおおおおおお')\n expected = (u\"ああ あ\\nいいいい いい\\nう ううう\\n\"\n u\"えええ ええええ\\nName: おおおおおおお, dtype: object\")\n assert _rep(s) == expected\n\n # MultiIndex\n idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (\n u'おおお', u'かかかか'), (u'き', u'くく')])\n s = Series([1, 22, 3333, 44444], index=idx)\n expected = (u\"あ いい 1\\n\"\n u\"う え 22\\n\"\n u\"おおお かかかか 3333\\n\"\n u\"き くく 44444\\ndtype: int64\")\n assert _rep(s) == expected\n\n # object dtype, shorter than unicode repr\n s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])\n expected = (u\"1 1\\nAB 22\\nNaN 3333\\n\"\n u\"あああ 44444\\ndtype: int64\")\n assert _rep(s) == expected\n\n # object dtype, longer than unicode repr\n s = Series([1, 22, 3333, 44444],\n index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])\n expected = (u\"1 1\\n\"\n u\"AB 22\\n\"\n u\"2011-01-01 00:00:00 3333\\n\"\n u\"あああ 44444\\ndtype: int64\")\n assert _rep(s) == expected\n\n # truncate\n with option_context('display.max_rows', 3):\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n name=u'おおおおおおお')\n\n expected = (u\"0 あ\\n ... \\n\"\n u\"3 ええええ\\n\"\n u\"Name: おおおおおおお, Length: 4, dtype: object\")\n assert _rep(s) == expected\n\n s.index = [u'ああ', u'いいいい', u'う', u'えええ']\n expected = (u\"ああ あ\\n ... \\n\"\n u\"えええ ええええ\\n\"\n u\"Name: おおおおおおお, Length: 4, dtype: object\")\n assert _rep(s) == expected\n\n # Emable Unicode option -----------------------------------------\n with option_context('display.unicode.east_asian_width', True):\n\n # unicode index\n s = Series(['a', 'bb', 'CCC', 'D'],\n index=[u'あ', u'いい', u'ううう', u'ええええ'])\n expected = (u\"あ a\\nいい bb\\nううう CCC\\n\"\n u\"ええええ D\\ndtype: object\")\n assert _rep(s) == expected\n\n # unicode values\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=['a', 'bb', 'c', 'ddd'])\n expected = (u\"a あ\\nbb いい\\nc ううう\\n\"\n u\"ddd ええええ\\ndtype: object\")\n assert _rep(s) == expected\n\n # both\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=[u'ああ', u'いいいい', u'う', u'えええ'])\n expected = (u\"ああ あ\\n\"\n u\"いいいい いい\\n\"\n u\"う ううう\\n\"\n u\"えええ ええええ\\ndtype: object\")\n assert _rep(s) == expected\n\n # unicode footer\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n index=[u'ああ', u'いいいい', u'う', u'えええ'],\n name=u'おおおおおおお')\n expected = (u\"ああ あ\\n\"\n u\"いいいい いい\\n\"\n u\"う ううう\\n\"\n u\"えええ ええええ\\n\"\n u\"Name: おおおおおおお, dtype: object\")\n assert _rep(s) == expected\n\n # MultiIndex\n idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (\n u'おおお', u'かかかか'), (u'き', u'くく')])\n s = Series([1, 22, 3333, 44444], index=idx)\n expected = (u\"あ いい 1\\n\"\n u\"う え 22\\n\"\n u\"おおお かかかか 3333\\n\"\n u\"き くく 44444\\n\"\n u\"dtype: int64\")\n assert _rep(s) == expected\n\n # object dtype, shorter than unicode repr\n s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])\n expected = (u\"1 1\\nAB 22\\nNaN 3333\\n\"\n u\"あああ 44444\\ndtype: int64\")\n assert _rep(s) == expected\n\n # object dtype, longer than unicode repr\n s = Series([1, 22, 3333, 44444],\n index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])\n expected = (u\"1 1\\n\"\n u\"AB 22\\n\"\n u\"2011-01-01 00:00:00 3333\\n\"\n u\"あああ 44444\\ndtype: int64\")\n assert _rep(s) == expected\n\n # truncate\n with option_context('display.max_rows', 3):\n s = Series([u'あ', u'いい', u'ううう', u'ええええ'],\n name=u'おおおおおおお')\n expected = (u\"0 あ\\n ... \\n\"\n u\"3 ええええ\\n\"\n u\"Name: おおおおおおお, Length: 4, dtype: object\")\n assert _rep(s) == expected\n\n s.index = [u'ああ', u'いいいい', u'う', u'えええ']\n expected = (u\"ああ あ\\n\"\n u\" ... \\n\"\n u\"えええ ええええ\\n\"\n u\"Name: おおおおおおお, Length: 4, dtype: object\")\n assert _rep(s) == expected\n\n # ambiguous unicode\n s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],\n index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])\n expected = (u\"ああ ¡¡\\n\"\n u\"¡¡¡¡いい い¡¡\\n\"\n u\"¡¡ ううう\\n\"\n u\"えええ ええええ\\ndtype: object\")\n assert _rep(s) == expected\n\n def test_float_trim_zeros(self):\n vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,\n 2.03954217305e+10, 5.59897817305e+10]\n for line in repr(Series(vals)).split('\\n'):\n if line.startswith('dtype:'):\n continue\n if _three_digit_exp():\n assert '+010' in line\n else:\n assert '+10' in line\n\n def test_datetimeindex(self):\n\n index = date_range('20130102', periods=6)\n s = Series(1, index=index)\n result = s.to_string()\n assert '2013-01-02' in result\n\n # nat in index\n s2 = Series(2, index=[Timestamp('20130111'), NaT])\n s = s2.append(s)\n result = s.to_string()\n assert 'NaT' in result\n\n # nat in summary\n result = str(s2.index)\n assert 'NaT' in result\n\n @pytest.mark.parametrize('start_date', [\n '2017-01-01 23:59:59.999999999',\n '2017-01-01 23:59:59.99999999',\n '2017-01-01 23:59:59.9999999',\n '2017-01-01 23:59:59.999999',\n '2017-01-01 23:59:59.99999',\n '2017-01-01 23:59:59.9999'\n ])\n def test_datetimeindex_highprecision(self, start_date):\n # GH19030\n # Check that high-precision time values for the end of day are\n # included in repr for DatetimeIndex\n s1 = Series(date_range(start=start_date, freq='D', periods=5))\n result = str(s1)\n assert start_date in result\n\n dti = date_range(start=start_date, freq='D', periods=5)\n s2 = Series(3, index=dti)\n result = str(s2.index)\n assert start_date in result\n\n def test_timedelta64(self):\n\n from datetime import datetime, timedelta\n\n Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()\n\n s = Series(date_range('2012-1-1', periods=3, freq='D'))\n\n # GH2146\n\n # adding NaTs\n y = s - s.shift(1)\n result = y.to_string()\n assert '1 days' in result\n assert '00:00:00' not in result\n assert 'NaT' in result\n\n # with frac seconds\n o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)\n y = s - o\n result = y.to_string()\n assert '-1 days +23:59:59.999850' in result\n\n # rounding?\n o = Series([datetime(2012, 1, 1, 1)] * 3)\n y = s - o\n result = y.to_string()\n assert '-1 days +23:00:00' in result\n assert '1 days 23:00:00' in result\n\n o = Series([datetime(2012, 1, 1, 1, 1)] * 3)\n y = s - o\n result = y.to_string()\n assert '-1 days +22:59:00' in result\n assert '1 days 22:59:00' in result\n\n o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)\n y = s - o\n result = y.to_string()\n assert '-1 days +22:58:59.999850' in result\n assert '0 days 22:58:59.999850' in result\n\n # neg time\n td = timedelta(minutes=5, seconds=3)\n s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td\n y = s - s2\n result = y.to_string()\n assert '-1 days +23:54:57' in result\n\n td = timedelta(microseconds=550)\n s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td\n y = s - td\n result = y.to_string()\n assert '2012-01-01 23:59:59.999450' in result\n\n # no boxing of the actual elements\n td = Series(pd.timedelta_range('1 days', periods=3))\n result = td.to_string()\n assert result == u(\"0 1 days\\n1 2 days\\n2 3 days\")\n\n def test_mixed_datetime64(self):\n df = DataFrame({'A': [1, 2], 'B': ['2012-01-01', '2012-01-02']})\n df['B'] = pd.to_datetime(df.B)\n\n result = repr(df.loc[0])\n assert '2012-01-01' in result\n\n def test_period(self):\n # GH 12615\n index = pd.period_range('2013-01', periods=6, freq='M')\n s = Series(np.arange(6, dtype='int64'), index=index)\n exp = (\"2013-01 0\\n2013-02 1\\n2013-03 2\\n2013-04 3\\n\"\n \"2013-05 4\\n2013-06 5\\nFreq: M, dtype: int64\")\n assert str(s) == exp\n\n s = Series(index)\n exp = (\"0 2013-01\\n1 2013-02\\n2 2013-03\\n3 2013-04\\n\"\n \"4 2013-05\\n5 2013-06\\ndtype: object\")\n assert str(s) == exp\n\n # periods with mixed freq\n s = Series([pd.Period('2011-01', freq='M'),\n pd.Period('2011-02-01', freq='D'),\n pd.Period('2011-03-01 09:00', freq='H')])\n exp = (\"0 2011-01\\n1 2011-02-01\\n\"\n \"2 2011-03-01 09:00\\ndtype: object\")\n assert str(s) == exp\n\n def test_max_multi_index_display(self):\n # GH 7101\n\n # doc example (indexing.rst)\n\n # multi-index\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n tuples = list(zip(*arrays))\n index = MultiIndex.from_tuples(tuples, names=['first', 'second'])\n s = Series(np.random.randn(8), index=index)\n\n with option_context(\"display.max_rows\", 10):\n assert len(str(s).split('\\n')) == 10\n with option_context(\"display.max_rows\", 3):\n assert len(str(s).split('\\n')) == 5\n with option_context(\"display.max_rows\", 2):\n assert len(str(s).split('\\n')) == 5\n with option_context(\"display.max_rows\", 1):\n assert len(str(s).split('\\n')) == 4\n with option_context(\"display.max_rows\", 0):\n assert len(str(s).split('\\n')) == 10\n\n # index\n s = Series(np.random.randn(8), None)\n\n with option_context(\"display.max_rows\", 10):\n assert len(str(s).split('\\n')) == 9\n with option_context(\"display.max_rows\", 3):\n assert len(str(s).split('\\n')) == 4\n with option_context(\"display.max_rows\", 2):\n assert len(str(s).split('\\n')) == 4\n with option_context(\"display.max_rows\", 1):\n assert len(str(s).split('\\n')) == 3\n with option_context(\"display.max_rows\", 0):\n assert len(str(s).split('\\n')) == 9\n\n # Make sure #8532 is fixed\n def test_consistent_format(self):\n s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)\n with option_context(\"display.max_rows\", 10,\n \"display.show_dimensions\", False):\n res = repr(s)\n exp = ('0 1.0000\\n1 1.0000\\n2 1.0000\\n3 '\n '1.0000\\n4 1.0000\\n ... \\n125 '\n '1.0000\\n126 1.0000\\n127 0.9999\\n128 '\n '1.0000\\n129 1.0000\\ndtype: float64')\n assert res == exp\n\n def chck_ncols(self, s):\n with option_context(\"display.max_rows\", 10):\n res = repr(s)\n lines = res.split('\\n')\n lines = [line for line in repr(s).split('\\n')\n if not re.match(r'[^\\.]*\\.+', line)][:-1]\n ncolsizes = len({len(line.strip()) for line in lines})\n assert ncolsizes == 1\n\n def test_format_explicit(self):\n test_sers = gen_series_formatting()\n with option_context(\"display.max_rows\", 4,\n \"display.show_dimensions\", False):\n res = repr(test_sers['onel'])\n exp = '0 a\\n1 a\\n ..\\n98 a\\n99 a\\ndtype: object'\n assert exp == res\n res = repr(test_sers['twol'])\n exp = ('0 ab\\n1 ab\\n ..\\n98 ab\\n99 ab\\ndtype:'\n ' object')\n assert exp == res\n res = repr(test_sers['asc'])\n exp = ('0 a\\n1 ab\\n ... \\n4 abcde\\n5'\n ' abcdef\\ndtype: object')\n assert exp == res\n res = repr(test_sers['desc'])\n exp = ('5 abcdef\\n4 abcde\\n ... \\n1 ab\\n0'\n ' a\\ndtype: object')\n assert exp == res\n\n def test_ncols(self):\n test_sers = gen_series_formatting()\n for s in test_sers.values():\n self.chck_ncols(s)\n\n def test_max_rows_eq_one(self):\n s = Series(range(10), dtype='int64')\n with option_context(\"display.max_rows\", 1):\n strrepr = repr(s).split('\\n')\n exp1 = ['0', '0']\n res1 = strrepr[0].split()\n assert exp1 == res1\n exp2 = ['..']\n res2 = strrepr[1].split()\n assert exp2 == res2\n\n def test_truncate_ndots(self):\n def getndots(s):\n return len(re.match(r'[^\\.]*(\\.*)', s).groups()[0])\n\n s = Series([0, 2, 3, 6])\n with option_context(\"display.max_rows\", 2):\n strrepr = repr(s).replace('\\n', '')\n assert getndots(strrepr) == 2\n\n s = Series([0, 100, 200, 400])\n with option_context(\"display.max_rows\", 2):\n strrepr = repr(s).replace('\\n', '')\n assert getndots(strrepr) == 3\n\n def test_show_dimensions(self):\n # gh-7117\n s = Series(range(5))\n\n assert 'Length' not in repr(s)\n\n with option_context(\"display.max_rows\", 4):\n assert 'Length' in repr(s)\n\n with option_context(\"display.show_dimensions\", True):\n assert 'Length' in repr(s)\n\n with option_context(\"display.max_rows\", 4,\n \"display.show_dimensions\", False):\n assert 'Length' not in repr(s)\n\n def test_to_string_name(self):\n s = Series(range(100), dtype='int64')\n s.name = 'myser'\n res = s.to_string(max_rows=2, name=True)\n exp = '0 0\\n ..\\n99 99\\nName: myser'\n assert res == exp\n res = s.to_string(max_rows=2, name=False)\n exp = '0 0\\n ..\\n99 99'\n assert res == exp\n\n def test_to_string_dtype(self):\n s = Series(range(100), dtype='int64')\n res = s.to_string(max_rows=2, dtype=True)\n exp = '0 0\\n ..\\n99 99\\ndtype: int64'\n assert res == exp\n res = s.to_string(max_rows=2, dtype=False)\n exp = '0 0\\n ..\\n99 99'\n assert res == exp\n\n def test_to_string_length(self):\n s = Series(range(100), dtype='int64')\n res = s.to_string(max_rows=2, length=True)\n exp = '0 0\\n ..\\n99 99\\nLength: 100'\n assert res == exp\n\n def test_to_string_na_rep(self):\n s = pd.Series(index=range(100))\n res = s.to_string(na_rep='foo', max_rows=2)\n exp = '0 foo\\n ..\\n99 foo'\n assert res == exp\n\n def test_to_string_float_format(self):\n s = pd.Series(range(10), dtype='float64')\n res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),\n max_rows=2)\n exp = '0 0.0\\n ..\\n9 9.0'\n assert res == exp\n\n def test_to_string_header(self):\n s = pd.Series(range(10), dtype='int64')\n s.index.name = 'foo'\n res = s.to_string(header=True, max_rows=2)\n exp = 'foo\\n0 0\\n ..\\n9 9'\n assert res == exp\n res = s.to_string(header=False, max_rows=2)\n exp = '0 0\\n ..\\n9 9'\n assert res == exp\n\n\ndef _three_digit_exp():\n return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'\n\n\nclass TestFloatArrayFormatter(object):\n\n def test_misc(self):\n obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))\n result = obj.get_result()\n assert len(result) == 0\n\n def test_format(self):\n obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))\n result = obj.get_result()\n assert result[0] == \" 12.0\"\n assert result[1] == \" 0.0\"\n\n def test_output_significant_digits(self):\n # Issue #9764\n\n # In case default display precision changes:\n with pd.option_context('display.precision', 6):\n # DataFrame example from issue #9764\n d = pd.DataFrame(\n {'col1': [9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7,\n 5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6,\n 4.999e-6, 5e-6, 5.0001e-6, 6e-6]})\n\n expected_output = {\n (0, 6):\n ' col1\\n'\n '0 9.999000e-08\\n'\n '1 1.000000e-07\\n'\n '2 1.000100e-07\\n'\n '3 2.000000e-07\\n'\n '4 4.999000e-07\\n'\n '5 5.000000e-07',\n (1, 6):\n ' col1\\n'\n '1 1.000000e-07\\n'\n '2 1.000100e-07\\n'\n '3 2.000000e-07\\n'\n '4 4.999000e-07\\n'\n '5 5.000000e-07',\n (1, 8):\n ' col1\\n'\n '1 1.000000e-07\\n'\n '2 1.000100e-07\\n'\n '3 2.000000e-07\\n'\n '4 4.999000e-07\\n'\n '5 5.000000e-07\\n'\n '6 5.000100e-07\\n'\n '7 6.000000e-07',\n (8, 16):\n ' col1\\n'\n '8 9.999000e-07\\n'\n '9 1.000000e-06\\n'\n '10 1.000100e-06\\n'\n '11 2.000000e-06\\n'\n '12 4.999000e-06\\n'\n '13 5.000000e-06\\n'\n '14 5.000100e-06\\n'\n '15 6.000000e-06',\n (9, 16):\n ' col1\\n'\n '9 0.000001\\n'\n '10 0.000001\\n'\n '11 0.000002\\n'\n '12 0.000005\\n'\n '13 0.000005\\n'\n '14 0.000005\\n'\n '15 0.000006'\n }\n\n for (start, stop), v in expected_output.items():\n assert str(d[start:stop]) == v\n\n def test_too_long(self):\n # GH 10451\n with pd.option_context('display.precision', 4):\n # need both a number > 1e6 and something that normally formats to\n # having length > display.precision + 6\n df = pd.DataFrame(dict(x=[12345.6789]))\n assert str(df) == ' x\\n0 12345.6789'\n df = pd.DataFrame(dict(x=[2e6]))\n assert str(df) == ' x\\n0 2000000.0'\n df = pd.DataFrame(dict(x=[12345.6789, 2e6]))\n assert str(df) == ' x\\n0 1.2346e+04\\n1 2.0000e+06'\n\n\nclass TestRepr_timedelta64(object):\n\n def test_none(self):\n delta_1d = pd.to_timedelta(1, unit='D')\n delta_0d = pd.to_timedelta(0, unit='D')\n delta_1s = pd.to_timedelta(1, unit='s')\n delta_500ms = pd.to_timedelta(500, unit='ms')\n\n drepr = lambda x: x._repr_base()\n assert drepr(delta_1d) == \"1 days\"\n assert drepr(-delta_1d) == \"-1 days\"\n assert drepr(delta_0d) == \"0 days\"\n assert drepr(delta_1s) == \"0 days 00:00:01\"\n assert drepr(delta_500ms) == \"0 days 00:00:00.500000\"\n assert drepr(delta_1d + delta_1s) == \"1 days 00:00:01\"\n assert drepr(-delta_1d + delta_1s) == \"-1 days +00:00:01\"\n assert drepr(delta_1d + delta_500ms) == \"1 days 00:00:00.500000\"\n assert drepr(-delta_1d + delta_500ms) == \"-1 days +00:00:00.500000\"\n\n def test_sub_day(self):\n delta_1d = pd.to_timedelta(1, unit='D')\n delta_0d = pd.to_timedelta(0, unit='D')\n delta_1s = pd.to_timedelta(1, unit='s')\n delta_500ms = pd.to_timedelta(500, unit='ms')\n\n drepr = lambda x: x._repr_base(format='sub_day')\n assert drepr(delta_1d) == \"1 days\"\n assert drepr(-delta_1d) == \"-1 days\"\n assert drepr(delta_0d) == \"00:00:00\"\n assert drepr(delta_1s) == \"00:00:01\"\n assert drepr(delta_500ms) == \"00:00:00.500000\"\n assert drepr(delta_1d + delta_1s) == \"1 days 00:00:01\"\n assert drepr(-delta_1d + delta_1s) == \"-1 days +00:00:01\"\n assert drepr(delta_1d + delta_500ms) == \"1 days 00:00:00.500000\"\n assert drepr(-delta_1d + delta_500ms) == \"-1 days +00:00:00.500000\"\n\n def test_long(self):\n delta_1d = pd.to_timedelta(1, unit='D')\n delta_0d = pd.to_timedelta(0, unit='D')\n delta_1s = pd.to_timedelta(1, unit='s')\n delta_500ms = pd.to_timedelta(500, unit='ms')\n\n drepr = lambda x: x._repr_base(format='long')\n assert drepr(delta_1d) == \"1 days 00:00:00\"\n assert drepr(-delta_1d) == \"-1 days +00:00:00\"\n assert drepr(delta_0d) == \"0 days 00:00:00\"\n assert drepr(delta_1s) == \"0 days 00:00:01\"\n assert drepr(delta_500ms) == \"0 days 00:00:00.500000\"\n assert drepr(delta_1d + delta_1s) == \"1 days 00:00:01\"\n assert drepr(-delta_1d + delta_1s) == \"-1 days +00:00:01\"\n assert drepr(delta_1d + delta_500ms) == \"1 days 00:00:00.500000\"\n assert drepr(-delta_1d + delta_500ms) == \"-1 days +00:00:00.500000\"\n\n def test_all(self):\n delta_1d = pd.to_timedelta(1, unit='D')\n delta_0d = pd.to_timedelta(0, unit='D')\n delta_1ns = pd.to_timedelta(1, unit='ns')\n\n drepr = lambda x: x._repr_base(format='all')\n assert drepr(delta_1d) == \"1 days 00:00:00.000000000\"\n assert drepr(-delta_1d) == \"-1 days +00:00:00.000000000\"\n assert drepr(delta_0d) == \"0 days 00:00:00.000000000\"\n assert drepr(delta_1ns) == \"0 days 00:00:00.000000001\"\n assert drepr(-delta_1d + delta_1ns) == \"-1 days +00:00:00.000000001\"\n\n\nclass TestTimedelta64Formatter(object):\n\n def test_days(self):\n x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')\n result = fmt.Timedelta64Formatter(x, box=True).get_result()\n assert result[0].strip() == \"'0 days'\"\n assert result[1].strip() == \"'1 days'\"\n\n result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()\n assert result[0].strip() == \"'1 days'\"\n\n result = fmt.Timedelta64Formatter(x, box=False).get_result()\n assert result[0].strip() == \"0 days\"\n assert result[1].strip() == \"1 days\"\n\n result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()\n assert result[0].strip() == \"1 days\"\n\n def test_days_neg(self):\n x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')\n result = fmt.Timedelta64Formatter(-x, box=True).get_result()\n assert result[0].strip() == \"'0 days'\"\n assert result[1].strip() == \"'-1 days'\"\n\n def test_subdays(self):\n y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')\n result = fmt.Timedelta64Formatter(y, box=True).get_result()\n assert result[0].strip() == \"'00:00:00'\"\n assert result[1].strip() == \"'00:00:01'\"\n\n def test_subdays_neg(self):\n y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')\n result = fmt.Timedelta64Formatter(-y, box=True).get_result()\n assert result[0].strip() == \"'00:00:00'\"\n assert result[1].strip() == \"'-1 days +23:59:59'\"\n\n def test_zero(self):\n x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')\n result = fmt.Timedelta64Formatter(x, box=True).get_result()\n assert result[0].strip() == \"'0 days'\"\n\n x = pd.to_timedelta(list(range(1)), unit='D')\n result = fmt.Timedelta64Formatter(x, box=True).get_result()\n assert result[0].strip() == \"'0 days'\"\n\n\nclass TestDatetime64Formatter(object):\n\n def test_mixed(self):\n x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 00:00:00\"\n assert result[1].strip() == \"2013-01-01 12:00:00\"\n\n def test_dates(self):\n x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01\"\n assert result[1].strip() == \"2013-01-02\"\n\n def test_date_nanos(self):\n x = Series([Timestamp(200)])\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"1970-01-01 00:00:00.000000200\"\n\n def test_dates_display(self):\n\n # 10170\n # make sure that we are consistently display date formatting\n x = Series(date_range('20130101 09:00:00', periods=5, freq='D'))\n x.iloc[1] = np.nan\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 09:00:00\"\n assert result[1].strip() == \"NaT\"\n assert result[4].strip() == \"2013-01-05 09:00:00\"\n\n x = Series(date_range('20130101 09:00:00', periods=5, freq='s'))\n x.iloc[1] = np.nan\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 09:00:00\"\n assert result[1].strip() == \"NaT\"\n assert result[4].strip() == \"2013-01-01 09:00:04\"\n\n x = Series(date_range('20130101 09:00:00', periods=5, freq='ms'))\n x.iloc[1] = np.nan\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 09:00:00.000\"\n assert result[1].strip() == \"NaT\"\n assert result[4].strip() == \"2013-01-01 09:00:00.004\"\n\n x = Series(date_range('20130101 09:00:00', periods=5, freq='us'))\n x.iloc[1] = np.nan\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 09:00:00.000000\"\n assert result[1].strip() == \"NaT\"\n assert result[4].strip() == \"2013-01-01 09:00:00.000004\"\n\n x = Series(date_range('20130101 09:00:00', periods=5, freq='N'))\n x.iloc[1] = np.nan\n result = fmt.Datetime64Formatter(x).get_result()\n assert result[0].strip() == \"2013-01-01 09:00:00.000000000\"\n assert result[1].strip() == \"NaT\"\n assert result[4].strip() == \"2013-01-01 09:00:00.000000004\"\n\n def test_datetime64formatter_yearmonth(self):\n x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])\n\n def format_func(x):\n return x.strftime('%Y-%m')\n\n formatter = fmt.Datetime64Formatter(x, formatter=format_func)\n result = formatter.get_result()\n assert result == ['2016-01', '2016-02']\n\n def test_datetime64formatter_hoursecond(self):\n\n x = Series(pd.to_datetime(['10:10:10.100', '12:12:12.120'],\n format='%H:%M:%S.%f'))\n\n def format_func(x):\n return x.strftime('%H:%M')\n\n formatter = fmt.Datetime64Formatter(x, formatter=format_func)\n result = formatter.get_result()\n assert result == ['10:10', '12:12']\n\n\nclass TestNaTFormatting(object):\n\n def test_repr(self):\n assert repr(pd.NaT) == \"NaT\"\n\n def test_str(self):\n assert str(pd.NaT) == \"NaT\"\n\n\nclass TestDatetimeIndexFormat(object):\n\n def test_datetime(self):\n formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()\n assert formatted[0] == \"2003-01-01 12:00:00\"\n assert formatted[1] == \"NaT\"\n\n def test_date(self):\n formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()\n assert formatted[0] == \"2003-01-01\"\n assert formatted[1] == \"NaT\"\n\n def test_date_tz(self):\n formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()\n assert formatted[0] == \"2013-01-01 00:00:00+00:00\"\n\n formatted = pd.to_datetime(\n [datetime(2013, 1, 1), pd.NaT], utc=True).format()\n assert formatted[0] == \"2013-01-01 00:00:00+00:00\"\n\n def test_date_explicit_date_format(self):\n formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(\n date_format=\"%m-%d-%Y\", na_rep=\"UT\")\n assert formatted[0] == \"02-01-2003\"\n assert formatted[1] == \"UT\"\n\n\nclass TestDatetimeIndexUnicode(object):\n\n def test_dates(self):\n text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)\n ]))\n assert \"['2013-01-01',\" in text\n assert \", '2014-01-01']\" in text\n\n def test_mixed(self):\n text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(\n 2014, 1, 1, 12), datetime(2014, 1, 1)]))\n assert \"'2013-01-01 00:00:00',\" in text\n assert \"'2014-01-01 00:00:00']\" in text\n\n\nclass TestStringRepTimestamp(object):\n\n def test_no_tz(self):\n dt_date = datetime(2013, 1, 2)\n assert str(dt_date) == str(Timestamp(dt_date))\n\n dt_datetime = datetime(2013, 1, 2, 12, 1, 3)\n assert str(dt_datetime) == str(Timestamp(dt_datetime))\n\n dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)\n assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))\n\n ts_nanos_only = Timestamp(200)\n assert str(ts_nanos_only) == \"1970-01-01 00:00:00.000000200\"\n\n ts_nanos_micros = Timestamp(1200)\n assert str(ts_nanos_micros) == \"1970-01-01 00:00:00.000001200\"\n\n def test_tz_pytz(self):\n dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)\n assert str(dt_date) == str(Timestamp(dt_date))\n\n dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)\n assert str(dt_datetime) == str(Timestamp(dt_datetime))\n\n dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)\n assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))\n\n def test_tz_dateutil(self):\n utc = dateutil.tz.tzutc()\n\n dt_date = datetime(2013, 1, 2, tzinfo=utc)\n assert str(dt_date) == str(Timestamp(dt_date))\n\n dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)\n assert str(dt_datetime) == str(Timestamp(dt_datetime))\n\n dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)\n assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))\n\n def test_nat_representations(self):\n for f in (str, repr, methodcaller('isoformat')):\n assert f(pd.NaT) == 'NaT'\n\n\ndef test_format_percentiles():\n result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])\n expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%']\n assert result == expected\n\n result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])\n expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']\n assert result == expected\n\n pytest.raises(ValueError, fmt.format_percentiles, [0.1, np.nan, 0.5])\n pytest.raises(ValueError, fmt.format_percentiles, [-0.001, 0.1, 0.5])\n pytest.raises(ValueError, fmt.format_percentiles, [2, 0.1, 0.5])\n pytest.raises(ValueError, fmt.format_percentiles, [0.1, 0.5, 'a'])\n"
] | [
[
"pandas.compat.StringIO",
"pandas.reset_option",
"pandas.core.config.option_context",
"pandas.compat.u",
"pandas.core.config.set_option",
"pandas.io.formats.printing.pprint_thing",
"pandas.Timestamp",
"numpy.where",
"pandas.io.formats.format.Datetime64Formatter",
"pandas.util.testing.makeStringIndex",
"pandas.util.testing.getSeriesData",
"pandas.compat.range",
"pandas.read_csv",
"pandas.period_range",
"numpy.random.random",
"pandas.compat.lzip",
"numpy.empty",
"pandas.core.config.reset_option",
"pandas.util.testing.rands_array",
"pandas.DataFrame",
"pandas.compat.is_platform_windows",
"pandas.io.formats.format.format_percentiles",
"pandas.util.testing.get_data_path",
"pandas.timedelta_range",
"pandas.compat.is_platform_32bit",
"numpy.random.randint",
"numpy.arange",
"pandas.util.testing.makeTimeSeries",
"pandas.Period",
"pandas.core.config.get_option",
"pandas.io.formats.terminal.get_terminal_size",
"pandas.to_datetime",
"numpy.array",
"pandas.io.formats.format.set_option",
"pandas.util.testing.reset_display_options",
"pandas.MultiIndex.from_tuples",
"numpy.random.randn",
"pandas.compat.lrange",
"pandas.Index",
"pandas.io.formats.format.Timedelta64Formatter",
"pandas.io.formats.format._get_adjustment",
"pandas.date_range",
"pandas.to_timedelta",
"pandas.compat.zip",
"pandas.io.formats.format.set_eng_float_format",
"pandas.util.testing.assert_series_equal",
"pandas.option_context",
"pandas.Series",
"numpy.linspace"
]
] |
qguyk/entropy | [
"e43077026c83fe84de022cf8636b2c9d42f1d330"
] | [
"entropylab/pipeline/api/tests/test_plot.py"
] | [
"import numpy as np\nimport plotly\n\nfrom entropylab.pipeline.api.plot import CirclePlotGenerator, ImShowPlotGenerator\nfrom plotly.graph_objects import Figure\n\n\ndef test_circle_plot_plotly():\n target = CirclePlotGenerator()\n figure = Figure()\n data = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]\n target.plot_plotly(figure, data)\n i = 0\n\n\ndef test_imshow_plot_plotly():\n target = ImShowPlotGenerator()\n figure = Figure()\n data = np.random.rand(10, 10)\n target.plot_plotly(figure, data)\n assert isinstance(figure.data[0], plotly.graph_objs._heatmap.Heatmap)\n"
] | [
[
"numpy.random.rand"
]
] |
richardqiu/pyjanitor | [
"aa3150e7b8e2adc4733ea206ea9c3093e21d4025"
] | [
"tests/functions/test_convert_unix_date.py"
] | [
"import os\n\nimport pandas as pd\nimport pytest\n\n\[email protected](\n os.name == \"nt\", reason=\"Skip *nix-specific tests on Windows\"\n)\ndef test_convert_unix_date():\n unix = [\n \"1284101485\",\n 1_284_101_486,\n \"1284101487000\",\n 1_284_101_488_000,\n \"1284101489\",\n \"1284101490\",\n -2_147_483_648,\n 2_147_483_648,\n ]\n df = pd.DataFrame(unix, columns=[\"dates\"]).convert_unix_date(\"dates\")\n\n assert df[\"dates\"].dtype == \"M8[ns]\"\n"
] | [
[
"pandas.DataFrame"
]
] |
MatthijsdeJ/GNN_PN_Operation_MSc_Thesis | [
"593857abfb15290dde2800cbbbaba0f4b480c990"
] | [
"data_preprocessing_analysis/imitation_data_preprocessing.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 28 16:30:55 2021\n\n@author: matthijs\n\"\"\"\nimport grid2op\nimport numpy as np\nfrom typing import List, Tuple, Callable, Sequence\nfrom pathlib import Path, PosixPath\nimport re\nimport json\nimport auxiliary.grid2op_util as g2o_util\nimport auxiliary.util as util\nfrom auxiliary.util import NumpyEncoder\nfrom auxiliary.config import get_config\nfrom tqdm import tqdm\nfrom auxiliary.generate_action_space import action_identificator\nfrom collections import Counter\nimport os\nfrom random import shuffle\nimport shutil\n\n\ndef get_filepaths(tutor_data_path: str) -> List[Path]:\n \"\"\"\n Get the paths of the .npy data files in the directory, with recursive\n effect.\n\n Parameters\n ----------\n tutor_data_path : str\n String representing the directory path.\n\n Returns\n -------\n List\n List of the paths of the files.\n\n \"\"\"\n return list(Path(tutor_data_path).rglob('*.npy'))\n\n\ndef extract_data_from_filepath(relat_fp: PosixPath) \\\n -> Tuple[int, float, int, int]:\n \"\"\"\n Given a relative filepath, extract the information contained in this\n filepath.\n\n Parameters\n ----------\n relat_fp : PosixPath\n The relative filepath.\n\n Returns\n -------\n Tuple[int, float, int, int]\n Tuple containing the values of the index of the disabled line,\n the threshold at which no actions were taken, the id of the chronic,\n and the number of days completed.\n \"\"\"\n regex_str = 'records_chronics_lout:(.*)_dnthreshold:(.*)' + \\\n '/records_chronic:(.*)_dayscomp:(.*).npy'\n line_disabled, dn_threshold, chronic_id, dayscomp = \\\n re.search(regex_str, str(relat_fp)).groups()\n return int(line_disabled), float(dn_threshold), int(chronic_id), \\\n int(dayscomp)\n\n\ndef extract_data_from_single_ts(ts_vect: np.array, grid2op_vect_len: int,\n vect2obs_func: Callable, line_disabled: int,\n env_info_dict: dict, thermal_limits: Sequence[int]) -> dict:\n \"\"\"\n Given the vector of a datapoint representing a single timestep, extract\n the interesting data from this vector and return it as a dictionary.\n\n Parameters\n ----------\n ts_vect : np.array\n The vector.\n grid2op_vect_len : int\n The length of the vector that represents a grid2op observation.\n vect2obs_func : Callable\n Function for transferring a vector representation of a grid2op\n observation to the corresponding grid2op observation object.\n line_disabled : int\n The line index to be disabled. -1 if no line is disabled.\n env_info_dict: dict\n Dictionary with variables from the environment. Important here,\n the index in the topo vect of the disabled line origin/extremity.\n thermal_limits : Sequence[int]\n Sequence with the thermal limits of the lines.\n\n Returns\n -------\n dict\n The dictionary containing the relevant data.\n \"\"\"\n grid2op_obs_vect = ts_vect[-grid2op_vect_len:]\n obs = vect2obs_func(grid2op_obs_vect)\n obs_dict = obs.to_dict()\n\n data = {'action_index': int(ts_vect[0]),\n 'timestep': int(ts_vect[4]),\n 'gen_features': g2o_util.extract_gen_features(obs_dict),\n 'load_features': g2o_util.extract_load_features(obs_dict),\n 'or_features': g2o_util.extract_or_features(obs_dict,\n thermal_limits),\n 'ex_features': g2o_util.extract_ex_features(obs_dict,\n thermal_limits),\n 'topo_vect': obs_dict['topo_vect'].copy()\n }\n\n # Remove the disabled line from the data, if necessary\n if line_disabled != -1:\n data['or_features'] = np.delete(data['or_features'], line_disabled, axis=0)\n data['ex_features'] = np.delete(data['ex_features'], line_disabled, axis=0)\n data['topo_vect'] = np.delete(data['topo_vect'], [\n env_info_dict['dis_line_or_tv'],\n env_info_dict['dis_line_ex_tv']])\n\n # Assert the topo_vect has the same length as the features\n assert len(data['topo_vect']) == len(data['gen_features']) + \\\n len(data['load_features']) + \\\n len(data['or_features']) + \\\n len(data['ex_features'])\n return data\n\n\ndef env_info_line_disabled(env: grid2op.Environment.Environment,\n line_disabled: int) -> dict:\n \"\"\"\n Generates the adapted grid2op environment variables for the possible\n disablement of a line. This essentially removes the corresponding\n origin and extremity from the variables.\n\n Parameters\n ----------\n env : grid2op.Environment.Environment\n The grid2op environment.\n line_disabled : int\n The line index to be disabled. -1 if no line is disabled.\n\n Returns\n -------\n dict\n The dictionary with the information. Entries:\n 'sub_info': number of elements per substation\n 'gen_pos_topo_vect': indices in the topo vect for each generator\n object\n 'load_pos_topo_vect': indices in the topo vect for each load\n object\n 'line_or_pos_topo_vect': indices in the topo vect for each origin\n object\n 'line_ex_pos_topo_vect': indices in the topo vect for each extremity\n object\n POSSIBLY:\n 'dis_line_or_tv': index in the topo vect of the disabled line origin\n 'dis_line_ex_tv': index in the topo vect of the disabled line extremity\n \"\"\"\n sub_info = env.sub_info.copy()\n gen_pos_topo_vect = env.gen_pos_topo_vect.copy()\n load_pos_topo_vect = env.load_pos_topo_vect.copy()\n line_or_pos_topo_vect = env.line_or_pos_topo_vect.copy()\n line_ex_pos_topo_vect = env.line_ex_pos_topo_vect.copy()\n\n if line_disabled != -1:\n dis_line_or_tv = line_or_pos_topo_vect[line_disabled]\n dis_line_ex_tv = line_ex_pos_topo_vect[line_disabled]\n\n # Remove line at index from line_or/ex_pos_topo_vect\n line_or_pos_topo_vect = np.delete(line_or_pos_topo_vect, line_disabled)\n line_ex_pos_topo_vect = np.delete(line_ex_pos_topo_vect, line_disabled)\n\n # Lowering numbers in the sub_info array\n sub_info[env.line_or_to_subid[line_disabled]] -= 1\n sub_info[env.line_ex_to_subid[line_disabled]] -= 1\n\n # Lowering indices in the rest of the arrays indexing the topo_vect\n gen_pos_topo_vect = np.array([i - (i > dis_line_or_tv) -\n (i > dis_line_ex_tv) for i in gen_pos_topo_vect])\n load_pos_topo_vect = np.array([i - (i > dis_line_or_tv) -\n (i > dis_line_ex_tv) for i in load_pos_topo_vect])\n line_or_pos_topo_vect = np.array([i - (i > dis_line_or_tv) -\n (i > dis_line_ex_tv) for i in line_or_pos_topo_vect])\n line_ex_pos_topo_vect = np.array([i - (i > dis_line_or_tv) -\n (i > dis_line_ex_tv) for i in line_ex_pos_topo_vect])\n\n concat_ptvs = np.concatenate([gen_pos_topo_vect, load_pos_topo_vect,\n line_or_pos_topo_vect, line_ex_pos_topo_vect])\n # Check that the arrays indexing the topo vect are disjoint\n assert len(set(concat_ptvs)) == len(gen_pos_topo_vect) + len(load_pos_topo_vect) + \\\n len(line_or_pos_topo_vect) + len(line_ex_pos_topo_vect)\n # Check that the sub_info max. index (plus one) equals the nr. of indices\n # equals the sum of objects\n assert max(concat_ptvs) + 1 == len(concat_ptvs) == sum(sub_info)\n\n info_dict = {'sub_info': sub_info,\n 'gen_pos_topo_vect': gen_pos_topo_vect,\n 'load_pos_topo_vect': load_pos_topo_vect,\n 'line_or_pos_topo_vect': line_or_pos_topo_vect,\n 'line_ex_pos_topo_vect': line_ex_pos_topo_vect}\n if line_disabled != -1:\n info_dict['dis_line_or_tv'] = dis_line_or_tv\n info_dict['dis_line_ex_tv'] = dis_line_ex_tv\n\n return info_dict\n\n\nclass ConMatrixCache:\n \"\"\"\n Connectivity matrices are expensive to compute and store and many\n datapoints might share the same connectivity matrix. For this reason, we\n only compute/store each con. matrix once, and instead provide data points\n with a hash pointing to the correct con. matrix.\n \"\"\"\n\n def __init__(self):\n self.con_matrices = {}\n\n def get_key_add_to_dict(self, topo_vect: np.array,\n line_disabled: int,\n sub_info: np.array,\n line_or_pos_topo_vect: np.array,\n line_ex_pos_topo_vect: np.array\n ) -> int:\n \"\"\"\n This function fulfils two purposes: (1) if the corresponding con.\n matrix hasn't been stored yet, compute it and store it;\n (2) return the hash key of the corresponding con. matrix.\n\n Parameters\n ----------\n topo_vect : np.array\n The topology vector from which to compute the con. matrix.\n line_disabled : int\n The line index to be disabled. -1 if no line is disabled.\n sub_info : np.array\n Vector representing the number of objects per substation.\n line_or_pos_topo_vect : np.array\n Vector representing the indices of the line origins in the topo\n vect.\n line_ex_pos_topo_vect : np.array\n Vector representing the indices of the line extremities in the topo\n vect.\n\n Returns\n -------\n h_topo_vect : int\n The hash of the topology vector, which can be used to index\n for con. matrices.\n\n \"\"\"\n # Check that line_or_pos_topo_vect and line_ex_pos_topo_vect\n # have no overlap\n assert set(line_or_pos_topo_vect).isdisjoint(set(line_ex_pos_topo_vect))\n # And have the same size\n assert len(line_or_pos_topo_vect) == len(line_ex_pos_topo_vect)\n # Check that the number of objects according to sub_info and topo_vect\n # are equal\n assert sum(sub_info) == len(topo_vect)\n\n h_topo_vect = hash((line_disabled, util.hash_nparray(topo_vect)))\n if h_topo_vect not in self.con_matrices:\n con_matrices = g2o_util.connectivity_matrices(sub_info.astype(int),\n topo_vect.astype(int),\n line_or_pos_topo_vect.astype(int),\n line_ex_pos_topo_vect.astype(int))\n if line_disabled != -1:\n raise NotImplementedError(\"Next function needs to be adapted to use the adapted env variables for\"\n \"lines disabled.\")\n hetero_con_matrices = g2o_util.connectivity_matrices_to_hetero_connectivity_matrices(\n {'same_busbar': con_matrices[0],\n 'other_busbar': con_matrices[1],\n 'line': con_matrices[2]}\n )\n\n # Convert hetero_con_matrices keys from tuples to strings, because of json\n hetero_con_matrices = dict([(\",\".join(k), v) for k, v in hetero_con_matrices.items()])\n self.con_matrices[h_topo_vect] = (topo_vect, con_matrices, hetero_con_matrices)\n\n return h_topo_vect\n\n\n def save(self, fpath: str = ''):\n \"\"\"\n Save the dictionary of connectivity matrices as a json file.\n\n Parameters\n ----------\n fpath : str, optional\n Where to store the json file. The default is the working directory.\n\n \"\"\"\n with open(fpath, 'w') as outfile:\n json.dump(self.con_matrices, outfile, cls=NumpyEncoder)\n\n @classmethod\n def load(cls, fpath: str):\n \"\"\"\n Factory class: initialize a ConMatrixCache based on a file.\n\n Parameters\n ----------\n fpath : str\n The filepath of the file.\n \"\"\"\n cmc = cls()\n with open(fpath, 'r') as file:\n cmc.con_matrices = json.loads(file.read())\n\n # Change the hetero_con_matrices back from strings to tuples\n for cm in cmc.con_matrices.values():\n cm[2] = dict([(tuple(k.split(',')), v) for k, v in cm[2].items()])\n return cmc\n\n\ndef save_data_to_file(data: List[dict], output_data_path: str):\n \"\"\"\n Given a list of dictionaries, representing various data points,\n save these to a json file. If the list is empty, save nothing.\n\n Parameters\n ----------\n data : List[dict]\n Various data points.\n output_data_path : str\n The output directory where to save the file.\n \"\"\"\n if not data:\n return\n\n filename = f'data_lout{data[0][\"line_disabled\"]}_' + \\\n f'chr{data[0][\"chronic_id\"]}.json'\n with open(output_data_path + filename, 'w') as outfile:\n json.dump(data, outfile, cls=NumpyEncoder)\n\n\nclass FeatureStatistics:\n \"\"\"\n Used to track the statistics about features (N, mean, std), which are used\n in feature normalization.\n\n Since the dataset is too large to hold in memory completely, the feature\n statistics are computed iteratively.\n \"\"\"\n\n def __init__(self):\n # Initialize statistics about features\n self.N_gen, self.N_load, self.N_line = 0, 0, 0\n self.S_gen, self.S_load, self.S_or, self.S_ex = None, None, None, None\n self.S2_gen, self.S2_load, self.S2_or, self.S2_ex = None, None, None, None\n\n def update_feature_statistics(self, data: dict):\n \"\"\"\n Update the statistics (number, sum, sum of squares) of the feature\n values.\n\n Parameters\n ----------\n data : np.array\n Dictionary representing the datapoints, containing the features.\n\n \"\"\"\n features = [data['gen_features'], data['load_features'],\n data['or_features'], data['ex_features']]\n\n # Update number of objects\n self.N_gen, self.N_load, self.N_line = [n + f.shape[0] for f, n in\n zip(features[:-1], [self.N_gen, self.N_load, self.N_line])]\n\n if self.S_gen is None:\n # Initialize sum\n self.S_gen, self.S_load, self.S_or, self.S_ex = \\\n [f.sum(axis=0) for f in features]\n # Initialize sum of squares\n self.S2_gen, self.S2_load, self.S2_or, self.S2_ex = \\\n [(f ** 2).sum(axis=0) for f in features]\n else:\n # Increase the sum\n self.S_gen, self.S_load, self.S_or, self.S_ex = \\\n [s + f.sum(axis=0) for f, s in zip(features,\n [self.S_gen, self.S_load, self.S_or, self.S_ex])]\n # Increase the sum of squares\n self.S2_gen, self.S2_load, self.S2_or, self.S2_ex = \\\n [s2 + (f ** 2).sum(axis=0) for f, s2 in zip(features,\n [self.S2_gen, self.S2_load, self.S2_or, self.S2_ex])]\n\n def save_feature_statistics(self, fpath: str):\n \"\"\"\n Save the feature statistics in the form of the mean and standard\n deviation per object type to a specified location.\n\n Parameters\n ----------\n fpath : str\n The filepath to save to.\n \"\"\"\n\n def std(num, sm, sum2):\n return np.sqrt(sum2 / num - (sm / num) ** 2)\n\n stats = {}\n for name, N, S, S2 in [('gen', self.N_gen, self.S_gen, self.S2_gen),\n ('load', self.N_load, self.S_load, self.S2_load),\n ('or', self.N_line, self.S_or, self.S2_or),\n ('ex', self.N_line, self.S_ex, self.S2_ex)]:\n stats[name] = {'mean': S / N,\n 'std': std(N, S, S2)}\n with open(fpath, 'w') as outfile:\n json.dump(stats, outfile, cls=NumpyEncoder)\n\n\ndef process_raw_tutor_data():\n \"\"\"\n Process the raw datapoints and store the processed datapoints.\n \"\"\"\n\n # Specify paths\n config = get_config()\n tutor_data_path = config['paths']['tutor_imitation']\n output_data_path = config['paths']['processed_tutor_imitation']\n con_matrix_path = config['paths']['con_matrix_cache']\n fstats_path = config['paths']['feature_statistics']\n ac_path = config['paths']['action_counter']\n\n # Initialize environment and environment variables\n env = g2o_util.init_env(grid2op.Rules.AlwaysLegal)\n grid2op_vect_size = len(env.get_obs().to_vect())\n thermal_limits = config['rte_case14_realistic']['thermal_limits']\n\n # Create an object for caching connectivity matrices\n cmc = ConMatrixCache()\n # Create a dictionary used for finding actions corresponding to action ids\n action_iders = {}\n # Create object for tracking the feature statistics\n fstats = FeatureStatistics()\n # Object for tracking action frequencies. Can be used to filter out\n # rare actions\n action_counter = Counter()\n\n for fp in tqdm(get_filepaths(tutor_data_path)):\n line_disabled, _, chronic_id, dayscomp = \\\n extract_data_from_filepath(fp.relative_to(tutor_data_path))\n\n # Load a single file with raw datapoints\n chr_ldis_raw_dps = np.load(fp)\n\n # If it doesn't already exit, create action_identificator for this\n # particular line disabled\n # Action identificator give the action corresponding to an action index\n if line_disabled not in action_iders:\n action_iders[line_disabled] = action_identificator(env, line_disabled)\n\n # Create a list wherein to store the processed datapoints for this\n # particular file\n file_dps = []\n\n # Env information specifically for a line removed\n env_info_dict = env_info_line_disabled(env, line_disabled)\n\n # Loop over the datapoints\n for raw_dp in chr_ldis_raw_dps:\n # Extract information dictionary from the datapoint\n dp = extract_data_from_single_ts(raw_dp,\n grid2op_vect_size,\n env.observation_space.from_vect,\n line_disabled,\n env_info_dict,\n thermal_limits)\n\n # Add the data from the filepath and environment to the data dictionary\n dp.update({'line_disabled': line_disabled,\n 'chronic_id': chronic_id,\n 'dayscomp': dayscomp})\n dp.update({'sub_info': env_info_dict['sub_info'],\n 'gen_pos_topo_vect': env_info_dict['gen_pos_topo_vect'],\n 'load_pos_topo_vect': env_info_dict['load_pos_topo_vect'],\n 'line_or_pos_topo_vect': env_info_dict['line_or_pos_topo_vect'],\n 'line_ex_pos_topo_vect': env_info_dict['line_ex_pos_topo_vect'],\n })\n\n # Update the feature statistics.\n fstats.update_feature_statistics(dp)\n\n # Find the set action topology vector and add it to the datapoint\n if dp['action_index'] != -1:\n action_ider = action_iders[line_disabled]\n dp['set_topo_vect'] = action_ider.get_set_topo_vect(dp['action_index'])\n # Remove disables lines from topo vect objects\n if line_disabled != -1:\n dp['set_topo_vect'] = np.delete(dp['set_topo_vect'], [\n env_info_dict['dis_line_or_tv'],\n env_info_dict['dis_line_ex_tv']])\n else:\n dp['set_topo_vect'] = np.zeros_like(dp['topo_vect'])\n\n dp['change_topo_vect'] = np.array([0 if s == 0 else abs(t - s) for t, s in\n zip(dp['topo_vect'], dp['set_topo_vect'])])\n dp['res_topo_vect'] = np.array([t if s == 0 else s for t, s in\n zip(dp['topo_vect'], dp['set_topo_vect'])])\n\n # Update action counter\n # TODO: Change this for scenarios with different topologies\n if line_disabled != -1:\n raise NotImplementedError\n act_hash = util.hash_nparray(dp['change_topo_vect'])\n action_counter[act_hash] += 1\n dp['act_hash'] = act_hash\n\n # Skip datapoint if any other line is disabled\n if -1 in dp['topo_vect']:\n continue\n\n assert len(dp['set_topo_vect']) == len(dp['topo_vect']) == len(dp['change_topo_vect']) \\\n == len(dp['res_topo_vect']), \"Not equal lengths\"\n assert len(dp['topo_vect']) == (56 if line_disabled == -1 else 54), \\\n \"Incorrect length\"\n assert all([(o in [0, 1, 2]) for o in dp['set_topo_vect']]), \\\n \"Incorrect element in set_topo_vect\"\n assert all([(o in [1, 2]) for o in dp['topo_vect']]), \\\n \"Incorrect element in topo_vect\"\n assert all([(o in [0, 1]) for o in dp['change_topo_vect']]), \\\n \"Incorrect element in change_topo_vect\"\n assert all([(o in [1, 2]) for o in dp['res_topo_vect']]), \\\n \"Incorrect element in res_topo_vect\"\n\n # Add the index of the connectivity matrix to the data object\n cm_index = cmc.get_key_add_to_dict(dp['topo_vect'],\n line_disabled,\n env_info_dict['sub_info'],\n env_info_dict['line_or_pos_topo_vect'],\n env_info_dict['line_ex_pos_topo_vect'])\n dp['cm_index'] = cm_index\n assert dp['cm_index'] in cmc.con_matrices\n\n # Append datapoint to the datapoints for a particular chronic and\n # line disabled, update summary object\n file_dps.append(dp)\n\n # Save the processed datapoints for a particular chronic and line disabled\n save_data_to_file(file_dps, output_data_path)\n\n cmc.save(con_matrix_path)\n fstats.save_feature_statistics(fstats_path)\n with open(ac_path, 'w') as outfile:\n json.dump(action_counter,\n outfile,\n cls=NumpyEncoder)\n\n\ndef divide_files_train_val_test():\n \"\"\"\n Divide the processed data files over train, val, and test subdirectories. If these subdirectories already exist,\n then they are first removed.\n\n Raises\n ------\n RuntimeError\n Whenever there are files in the existing train/val/test folders which are not .json files.\n \"\"\"\n config = get_config()\n processed_path = config['paths']['processed_tutor_imitation']\n\n # Remove directories including existing processed datapoints\n if os.path.exists(processed_path + 'train'):\n if not all([file.endswith('.json') for file in os.listdir(processed_path + 'train')]):\n raise RuntimeError('All files in the train folder to be overwritten must be .json files.')\n shutil.rmtree(processed_path + 'train')\n if os.path.exists(processed_path + 'val'):\n if not all([file.endswith('.json') for file in os.listdir(processed_path + 'val')]):\n raise RuntimeError('All files in the val folder to be overwritten must be .json files.')\n shutil.rmtree(processed_path + 'val')\n if os.path.exists(processed_path + 'test'):\n if not all([file.endswith('.json') for file in os.listdir(processed_path + 'test')]):\n raise RuntimeError('All files in the test folder to be overwritten must be .json files.')\n shutil.rmtree(processed_path + 'test')\n\n # List data files, shuffle them\n data_files = os.listdir(processed_path)\n assert all([file.endswith('.json') for file in data_files]), \"All files in the directory of\" \\\n \"processed files must be .json files.\"\n assert data_files, \"The directory with processed files cannot be empty.\"\n shuffle(data_files)\n\n # Create the train, val, test directories\n os.mkdir(processed_path + 'train')\n os.mkdir(processed_path + 'val')\n os.mkdir(processed_path + 'test')\n\n # Divide shuffled files over the three subdirectories\n train_range = config['dataset']['train_perc'] * len(data_files)\n val_range = train_range + config['dataset']['val_perc'] * len(data_files)\n for i, f in enumerate(data_files):\n if i > val_range:\n os.rename(processed_path + f, processed_path + 'test/' + f)\n elif i > train_range:\n os.rename(processed_path + f, processed_path + 'val/' + f)\n else:\n os.rename(processed_path + f, processed_path + 'train/' + f)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.delete",
"numpy.zeros_like",
"numpy.load",
"numpy.sqrt"
]
] |
brgcode/compas_vol | [
"258138f10ac6000534586ff3322d4aeac92a8343"
] | [
"src/compas_vol/combinations/smoothunion.py"
] | [
"from compas import PRECISION\n\n\nclass SmoothUnion(object):\n \"\"\"The smooth union between two volumetric objects.\n\n Parameters\n ----------\n a: volumetric object\n First object to add.\n b: volumetric object\n Second object to add.\n r: float\n Intensity factor, the higher the number, the smoother the result. Default value `1.0`\n\n Examples\n --------\n >>> s = Sphere(Point(5, 6, 0), 9)\n >>> b = Box(Frame.worldXY(), 20, 15, 10)\n >>> vs = VolSphere(s)\n >>> vb = VolBox(b, 2.5)\n >>> u = SmoothUnion(vs, vb, 1.5)\n \"\"\"\n def __init__(self, a=None, b=None, r=1.0):\n self.a = a\n self.b = b\n self.r = r\n\n def __repr__(self):\n return 'SmoothUnion({0},{1},{2:.{3}f})'.format(str(self.a), str(self.b), self.r, PRECISION[:1])\n\n def get_distance_alt(self, x, y, z):\n da = self.a.get_distance(x, y, z)\n db = self.b.get_distance(x, y, z)\n e = max(self.r - abs(da - db), 0)\n return min(da, db) - e**2 * 0.25 / self.r\n\n def get_distance(self, point):\n \"\"\"\n single point distance function\n \"\"\"\n da = self.a.get_distance(point)\n db = self.b.get_distance(point)\n k = self.r\n h = min(max(0.5 + 0.5 * (db - da) / k, 0), 1)\n return (db * (1 - h) + h * da) - k * h * (1 - h)\n\n def get_distance_numpy(self, x, y, z):\n \"\"\"\n vectorized distance function\n \"\"\"\n import numpy as np\n\n da = self.a.get_distance_numpy(x, y, z)\n db = self.b.get_distance_numpy(x, y, z)\n h = np.minimum(np.maximum(0.5 + 0.5 * (db - da)/self.r, 0), 1)\n return (db * (1 - h) + h * da) - self.r * h * (1 - h)\n"
] | [
[
"numpy.maximum"
]
] |
Anirudhsekar96/pandas | [
"2db3b0a0378487e269997700b14777af70838e95"
] | [
"pandas/tests/groupby/test_groupby.py"
] | [
"from datetime import datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n RangeIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n BooleanArray,\n FloatingArray,\n IntegerArray,\n)\nfrom pandas.core.base import SpecificationError\nimport pandas.core.common as com\nfrom pandas.core.groupby.base import maybe_normalize_deprecated_kernels\n\n\ndef test_group_by_copy():\n # GH#44803\n df = DataFrame(\n {\n \"name\": [\"Alice\", \"Bob\", \"Carl\"],\n \"age\": [20, 21, 20],\n }\n ).set_index(\"name\")\n\n grp_by_same_value = df.groupby([\"age\"]).apply(lambda group: group)\n grp_by_copy = df.groupby([\"age\"]).apply(lambda group: group.copy())\n tm.assert_frame_equal(grp_by_same_value, grp_by_copy)\n\n\ndef test_repr():\n # GH18203\n result = repr(Grouper(key=\"A\", level=\"B\"))\n expected = \"Grouper(key='A', level='B', axis=0, sort=False)\"\n assert result == expected\n\n\[email protected](\"dtype\", [\"int64\", \"int32\", \"float64\", \"float32\"])\ndef test_basic(dtype):\n\n data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n for k, v in grouped:\n assert len(v) == 3\n\n agged = grouped.aggregate(np.mean)\n assert agged[1] == 1\n\n tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand\n tm.assert_series_equal(agged, grouped.mean())\n tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())\n\n expected = grouped.apply(lambda x: x * x.sum())\n transformed = grouped.transform(lambda x: x * x.sum())\n assert transformed[7] == 12\n tm.assert_series_equal(transformed, expected)\n\n value_grouped = data.groupby(data)\n tm.assert_series_equal(\n value_grouped.aggregate(np.mean), agged, check_index_type=False\n )\n\n # complex agg\n agged = grouped.aggregate([np.mean, np.std])\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate({\"one\": np.mean, \"two\": np.std})\n\n group_constants = {0: 10, 1: 20, 2: 30}\n agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())\n assert agged[1] == 21\n\n # corner cases\n msg = \"Must produce aggregated value\"\n # exception raised is type Exception\n with pytest.raises(Exception, match=msg):\n grouped.aggregate(lambda x: x * 2)\n\n\ndef test_groupby_nonobject_dtype(mframe, df_mixed_floats):\n key = mframe.index.codes[0]\n grouped = mframe.groupby(key)\n result = grouped.sum()\n\n expected = mframe.groupby(key.astype(\"O\")).sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 3911, mixed frame non-conversion\n df = df_mixed_floats.copy()\n df[\"value\"] = range(len(df))\n\n def max_value(group):\n return group.loc[group[\"value\"].idxmax()]\n\n applied = df.groupby(\"A\").apply(max_value)\n result = applied.dtypes\n expected = df.dtypes\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_return_type():\n\n # GH2893, return a reduced type\n df1 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 2, \"val2\": 27},\n {\"val1\": 2, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df1.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n df2 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 1, \"val2\": 27},\n {\"val1\": 1, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df2.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n # GH3596, return a consistent type (regression in 0.11 from 0.10.1)\n df = DataFrame([[1, 1], [1, 1]], columns=[\"X\", \"Y\"])\n with tm.assert_produces_warning(FutureWarning):\n result = df.groupby(\"X\", squeeze=False).count()\n assert isinstance(result, DataFrame)\n\n\ndef test_inconsistent_return_type():\n # GH5592\n # inconsistent return type\n df = DataFrame(\n {\n \"A\": [\"Tiger\", \"Tiger\", \"Tiger\", \"Lamb\", \"Lamb\", \"Pony\", \"Pony\"],\n \"B\": Series(np.arange(7), dtype=\"int64\"),\n \"C\": date_range(\"20130101\", periods=7),\n }\n )\n\n def f(grp):\n return grp.iloc[0]\n\n expected = df.groupby(\"A\").first()[[\"B\"]]\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n tm.assert_frame_equal(result, expected)\n\n def f(grp):\n if grp.name == \"Tiger\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Tiger\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Pony\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n # 5592 revisited, with datetimes\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"C\"]]\n e = df.groupby(\"A\").first()[[\"C\"]]\n e.loc[\"Pony\"] = pd.NaT\n tm.assert_frame_equal(result, e)\n\n # scalar outputs\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0].loc[\"C\"]\n\n result = df.groupby(\"A\").apply(f)\n e = df.groupby(\"A\").first()[\"C\"].copy()\n e.loc[\"Pony\"] = np.nan\n e.name = None\n tm.assert_series_equal(result, e)\n\n\ndef test_pass_args_kwargs(ts, tsframe):\n def f(x, q=None, axis=0):\n return np.percentile(x, q, axis=axis)\n\n g = lambda x: np.percentile(x, 80, axis=0)\n\n # Series\n ts_grouped = ts.groupby(lambda x: x.month)\n agg_result = ts_grouped.agg(np.percentile, 80, axis=0)\n apply_result = ts_grouped.apply(np.percentile, 80, axis=0)\n trans_result = ts_grouped.transform(np.percentile, 80, axis=0)\n\n agg_expected = ts_grouped.quantile(0.8)\n trans_expected = ts_grouped.transform(g)\n\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n agg_result = ts_grouped.agg(f, q=80)\n apply_result = ts_grouped.apply(f, q=80)\n trans_result = ts_grouped.transform(f, q=80)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n # DataFrame\n for as_index in [True, False]:\n df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index)\n agg_result = df_grouped.agg(np.percentile, 80, axis=0)\n apply_result = df_grouped.apply(DataFrame.quantile, 0.8)\n expected = df_grouped.quantile(0.8)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n tm.assert_frame_equal(agg_result, expected)\n\n apply_result = df_grouped.apply(DataFrame.quantile, [0.4, 0.8])\n expected_seq = df_grouped.quantile([0.4, 0.8])\n tm.assert_frame_equal(apply_result, expected_seq, check_names=False)\n\n agg_result = df_grouped.agg(f, q=80)\n apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)\n tm.assert_frame_equal(agg_result, expected)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n\n\[email protected](\"as_index\", [True, False])\ndef test_pass_args_kwargs_duplicate_columns(tsframe, as_index):\n # go through _aggregate_frame with self.axis == 0 and duplicate columns\n tsframe.columns = [\"A\", \"B\", \"A\", \"C\"]\n gb = tsframe.groupby(lambda x: x.month, as_index=as_index)\n\n res = gb.agg(np.percentile, 80, axis=0)\n\n ex_data = {\n 1: tsframe[tsframe.index.month == 1].quantile(0.8),\n 2: tsframe[tsframe.index.month == 2].quantile(0.8),\n }\n expected = DataFrame(ex_data).T\n if not as_index:\n # TODO: try to get this more consistent?\n expected.index = Index(range(2))\n\n tm.assert_frame_equal(res, expected)\n\n\ndef test_len():\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n assert len(grouped) == len(df)\n\n grouped = df.groupby([lambda x: x.year, lambda x: x.month])\n expected = len({(x.year, x.month) for x in df.index})\n assert len(grouped) == expected\n\n # issue 11016\n df = DataFrame({\"a\": [np.nan] * 3, \"b\": [1, 2, 3]})\n assert len(df.groupby(\"a\")) == 0\n assert len(df.groupby(\"b\")) == 3\n assert len(df.groupby([\"a\", \"b\"])) == 3\n\n\ndef test_basic_regression():\n # regression\n result = Series([1.0 * x for x in list(range(1, 10)) * 10])\n\n data = np.random.random(1100) * 10.0\n groupings = Series(data)\n\n grouped = result.groupby(groupings)\n grouped.mean()\n\n\[email protected](\n \"dtype\", [\"float64\", \"float32\", \"int64\", \"int32\", \"int16\", \"int8\"]\n)\ndef test_with_na_groups(dtype):\n index = Index(np.arange(10))\n values = Series(np.ones(10), index, dtype=dtype)\n labels = Series(\n [np.nan, \"foo\", \"bar\", \"bar\", np.nan, np.nan, \"bar\", \"bar\", np.nan, \"foo\"],\n index=index,\n )\n\n # this SHOULD be an int\n grouped = values.groupby(labels)\n agged = grouped.agg(len)\n expected = Series([4, 2], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected, check_dtype=False)\n\n # assert issubclass(agged.dtype.type, np.integer)\n\n # explicitly return a float from my function\n def f(x):\n return float(len(x))\n\n agged = grouped.agg(f)\n expected = Series([4.0, 2.0], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected)\n\n\ndef test_indices_concatenation_order():\n\n # GH 2808\n\n def f1(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=[\"b\", \"c\"])\n res = DataFrame(columns=[\"a\"], index=multiindex)\n return res\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f2(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n return DataFrame()\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f3(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(\n levels=[[]] * 2, codes=[[]] * 2, names=[\"foo\", \"bar\"]\n )\n res = DataFrame(columns=[\"a\", \"b\"], index=multiindex)\n return res\n else:\n return y\n\n df = DataFrame({\"a\": [1, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n df2 = DataFrame({\"a\": [3, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n # correct result\n result1 = df.groupby(\"a\").apply(f1)\n result2 = df2.groupby(\"a\").apply(f1)\n tm.assert_frame_equal(result1, result2)\n\n # should fail (not the same number of levels)\n msg = \"Cannot concat indices that do not have the same number of levels\"\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f2)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f2)\n\n # should fail (incorrect shape)\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f3)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f3)\n\n\ndef test_attr_wrapper(ts):\n grouped = ts.groupby(lambda x: x.weekday())\n\n result = grouped.std()\n expected = grouped.agg(lambda x: np.std(x, ddof=1))\n tm.assert_series_equal(result, expected)\n\n # this is pretty cool\n result = grouped.describe()\n expected = {name: gp.describe() for name, gp in grouped}\n expected = DataFrame(expected).T\n tm.assert_frame_equal(result, expected)\n\n # get attribute\n result = grouped.dtype\n expected = grouped.agg(lambda x: x.dtype)\n tm.assert_series_equal(result, expected)\n\n # make sure raises error\n msg = \"'SeriesGroupBy' object has no attribute 'foo'\"\n with pytest.raises(AttributeError, match=msg):\n getattr(grouped, \"foo\")\n\n\ndef test_frame_groupby(tsframe):\n grouped = tsframe.groupby(lambda x: x.weekday())\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == 5\n assert len(aggregated.columns) == 4\n\n # by string\n tscopy = tsframe.copy()\n tscopy[\"weekday\"] = [x.weekday() for x in tscopy.index]\n stragged = tscopy.groupby(\"weekday\").aggregate(np.mean)\n tm.assert_frame_equal(stragged, aggregated, check_names=False)\n\n # transform\n grouped = tsframe.head(30).groupby(lambda x: x.weekday())\n transformed = grouped.transform(lambda x: x - x.mean())\n assert len(transformed) == 30\n assert len(transformed.columns) == 4\n\n # transform propagate\n transformed = grouped.transform(lambda x: x.mean())\n for name, group in grouped:\n mean = group.mean()\n for idx in group.index:\n tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)\n\n # iterate\n for weekday, group in grouped:\n assert group.index[0].weekday() == weekday\n\n # groups / group_indices\n groups = grouped.groups\n indices = grouped.indices\n\n for k, v in groups.items():\n samething = tsframe.index.take(indices[k])\n assert (samething == v).all()\n\n\ndef test_frame_groupby_columns(tsframe):\n mapping = {\"A\": 0, \"B\": 0, \"C\": 1, \"D\": 1}\n grouped = tsframe.groupby(mapping, axis=1)\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == len(tsframe)\n assert len(aggregated.columns) == 2\n\n # transform\n tf = lambda x: x - x.mean()\n groupedT = tsframe.T.groupby(mapping, axis=0)\n tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))\n\n # iterate\n for k, v in grouped:\n assert len(v.columns) == 2\n\n\ndef test_frame_set_name_single(df):\n grouped = df.groupby(\"A\")\n\n result = grouped.mean()\n assert result.index.name == \"A\"\n\n result = df.groupby(\"A\", as_index=False).mean()\n assert result.index.name != \"A\"\n\n result = grouped.agg(np.mean)\n assert result.index.name == \"A\"\n\n result = grouped.agg({\"C\": np.mean, \"D\": np.std})\n assert result.index.name == \"A\"\n\n result = grouped[\"C\"].mean()\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg(np.mean)\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg([np.mean, np.std])\n assert result.index.name == \"A\"\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"foo\": np.mean, \"bar\": np.std})\n\n\ndef test_multi_func(df):\n col1 = df[\"A\"]\n col2 = df[\"B\"]\n\n grouped = df.groupby([col1.get, col2.get])\n agged = grouped.mean()\n expected = df.groupby([\"A\", \"B\"]).mean()\n\n # TODO groupby get drops names\n tm.assert_frame_equal(\n agged.loc[:, [\"C\", \"D\"]], expected.loc[:, [\"C\", \"D\"]], check_names=False\n )\n\n # some \"groups\" with no data\n df = DataFrame(\n {\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n },\n index=[\"one\", \"two\", \"three\", \"four\", \"five\", \"six\"],\n )\n # only verify that it works for now\n grouped = df.groupby([\"k1\", \"k2\"])\n grouped.agg(np.sum)\n\n\ndef test_multi_key_multiple_functions(df):\n grouped = df.groupby([\"A\", \"B\"])[\"C\"]\n\n agged = grouped.agg([np.mean, np.std])\n expected = DataFrame({\"mean\": grouped.agg(np.mean), \"std\": grouped.agg(np.std)})\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_frame_multi_key_function_list():\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n grouped = data.groupby([\"A\", \"B\"])\n funcs = [np.mean, np.std]\n with tm.assert_produces_warning(\n FutureWarning, match=r\"\\['C'\\] did not aggregate successfully\"\n ):\n agged = grouped.agg(funcs)\n expected = pd.concat(\n [grouped[\"D\"].agg(funcs), grouped[\"E\"].agg(funcs), grouped[\"F\"].agg(funcs)],\n keys=[\"D\", \"E\", \"F\"],\n axis=1,\n )\n assert isinstance(agged.index, MultiIndex)\n assert isinstance(expected.index, MultiIndex)\n tm.assert_frame_equal(agged, expected)\n\n\[email protected](\"op\", [lambda x: x.sum(), lambda x: x.mean()])\ndef test_groupby_multiple_columns(df, op):\n data = df\n grouped = data.groupby([\"A\", \"B\"])\n\n result1 = op(grouped)\n\n keys = []\n values = []\n for n1, gp1 in data.groupby(\"A\"):\n for n2, gp2 in gp1.groupby(\"B\"):\n keys.append((n1, n2))\n values.append(op(gp2.loc[:, [\"C\", \"D\"]]))\n\n mi = MultiIndex.from_tuples(keys, names=[\"A\", \"B\"])\n expected = pd.concat(values, axis=1).T\n expected.index = mi\n\n # a little bit crude\n for col in [\"C\", \"D\"]:\n result_col = op(grouped[col])\n pivoted = result1[col]\n exp = expected[col]\n tm.assert_series_equal(result_col, exp)\n tm.assert_series_equal(pivoted, exp)\n\n # test single series works the same\n result = data[\"C\"].groupby([data[\"A\"], data[\"B\"]]).mean()\n expected = data.groupby([\"A\", \"B\"]).mean()[\"C\"]\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_as_index_select_column():\n # GH 5764\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=[\"A\", \"B\"])\n result = df.groupby(\"A\", as_index=False)[\"B\"].get_group(1)\n expected = Series([2, 4], name=\"B\")\n tm.assert_series_equal(result, expected)\n\n result = df.groupby(\"A\", as_index=False)[\"B\"].apply(lambda x: x.cumsum())\n expected = Series(\n [2, 6, 6], name=\"B\", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_as_index_select_column_sum_empty_df():\n # GH 35246\n df = DataFrame(columns=Index([\"A\", \"B\", \"C\"], name=\"alpha\"))\n left = df.groupby(by=\"A\", as_index=False)[\"B\"].sum(numeric_only=False)\n\n expected = DataFrame(columns=df.columns[:2], index=range(0))\n tm.assert_frame_equal(left, expected)\n\n\ndef test_groupby_as_index_agg(df):\n grouped = df.groupby(\"A\", as_index=False)\n\n # single-key\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n grouped = df.groupby(\"A\", as_index=True)\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"Q\": np.sum})\n\n # multi-key\n\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n expected3 = grouped[\"C\"].sum()\n expected3 = DataFrame(expected3).rename(columns={\"C\": \"Q\"})\n result3 = grouped[\"C\"].agg({\"Q\": np.sum})\n tm.assert_frame_equal(result3, expected3)\n\n # GH7115 & GH8112 & GH8582\n df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=[\"jim\", \"joe\", \"jolie\"])\n ts = Series(np.random.randint(5, 10, 50), name=\"jim\")\n\n gr = df.groupby(ts)\n gr.nth(0) # invokes set_selection_from_grouper internally\n tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))\n\n for attr in [\"mean\", \"max\", \"count\", \"idxmax\", \"cumsum\", \"all\"]:\n gr = df.groupby(ts, as_index=False)\n left = getattr(gr, attr)()\n\n gr = df.groupby(ts.values, as_index=True)\n right = getattr(gr, attr)().reset_index(drop=True)\n\n tm.assert_frame_equal(left, right)\n\n\ndef test_ops_not_as_index(reduction_func):\n # GH 10355, 21090\n # Using as_index=False should not modify grouped column\n\n if reduction_func in (\"corrwith\",):\n pytest.skip(\"Test not applicable\")\n\n if reduction_func in (\"nth\", \"ngroup\"):\n pytest.skip(\"Skip until behavior is determined (GH #5755)\")\n\n df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=[\"a\", \"b\"])\n expected = getattr(df.groupby(\"a\"), reduction_func)()\n if reduction_func == \"size\":\n expected = expected.rename(\"size\")\n expected = expected.reset_index()\n\n if reduction_func != \"size\":\n # 32 bit compat -> groupby preserves dtype whereas reset_index casts to int64\n expected[\"a\"] = expected[\"a\"].astype(df[\"a\"].dtype)\n\n g = df.groupby(\"a\", as_index=False)\n\n result = getattr(g, reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g.agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(g[\"b\"], reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g[\"b\"].agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_as_index_series_return_frame(df):\n grouped = df.groupby(\"A\", as_index=False)\n grouped2 = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped[\"C\"].agg(np.sum)\n expected = grouped.agg(np.sum).loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].agg(np.sum)\n expected2 = grouped2.agg(np.sum).loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n result = grouped[\"C\"].sum()\n expected = grouped.sum().loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].sum()\n expected2 = grouped2.sum().loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n\ndef test_as_index_series_column_slice_raises(df):\n # GH15072\n grouped = df.groupby(\"A\", as_index=False)\n msg = r\"Column\\(s\\) C already selected\"\n\n with pytest.raises(IndexError, match=msg):\n grouped[\"C\"].__getitem__(\"D\")\n\n\ndef test_groupby_as_index_cython(df):\n data = df\n\n # single-key\n grouped = data.groupby(\"A\", as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\"]).mean()\n expected.insert(0, \"A\", expected.index)\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n # multi-key\n grouped = data.groupby([\"A\", \"B\"], as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\", \"B\"]).mean()\n\n arrays = list(zip(*expected.index.values))\n expected.insert(0, \"A\", arrays[0])\n expected.insert(1, \"B\", arrays[1])\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_series_scalar(df):\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n # GH #421\n\n result = grouped[\"C\"].agg(len)\n expected = grouped.agg(len).loc[:, [\"A\", \"B\", \"C\"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_corner(df, ts):\n msg = \"as_index=False only valid with DataFrame\"\n with pytest.raises(TypeError, match=msg):\n ts.groupby(lambda x: x.weekday(), as_index=False)\n\n msg = \"as_index=False only valid for axis=0\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(lambda x: x.lower(), as_index=False, axis=1)\n\n\ndef test_groupby_multiple_key(df):\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n agged = grouped.sum()\n tm.assert_almost_equal(df.values, agged.values)\n\n grouped = df.T.groupby(\n [lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1\n )\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_index_equal(agged.index, df.columns)\n tm.assert_almost_equal(df.T.values, agged.values)\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_almost_equal(df.T.values, agged.values)\n\n\ndef test_groupby_multi_corner(df):\n # test that having an all-NA column doesn't mess you up\n df = df.copy()\n df[\"bad\"] = np.nan\n agged = df.groupby([\"A\", \"B\"]).mean()\n\n expected = df.groupby([\"A\", \"B\"]).mean()\n expected[\"bad\"] = np.nan\n\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_omit_nuisance(df):\n grouped = df.groupby(\"A\")\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n df = df.loc[:, [\"A\", \"C\", \"D\"]]\n df[\"E\"] = datetime.now()\n grouped = df.groupby(\"A\")\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n tm.assert_frame_equal(result, expected)\n\n # won't work with axis = 1\n grouped = df.groupby({\"A\": 0, \"C\": 0, \"D\": 1, \"E\": 1}, axis=1)\n msg = \"does not support reduction 'sum'\"\n with pytest.raises(TypeError, match=msg):\n grouped.agg(lambda x: x.sum(0, numeric_only=False))\n\n\[email protected](\n \"agg_function\",\n [\"max\", \"min\"],\n)\ndef test_keep_nuisance_agg(df, agg_function):\n # GH 38815\n grouped = df.groupby(\"A\")\n result = getattr(grouped, agg_function)()\n expected = result.copy()\n expected.loc[\"bar\", \"B\"] = getattr(df.loc[df[\"A\"] == \"bar\", \"B\"], agg_function)()\n expected.loc[\"foo\", \"B\"] = getattr(df.loc[df[\"A\"] == \"foo\", \"B\"], agg_function)()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"agg_function\",\n [\"sum\", \"mean\", \"prod\", \"std\", \"var\", \"median\"],\n)\ndef test_omit_nuisance_agg(df, agg_function):\n # GH 38774, GH 38815\n grouped = df.groupby(\"A\")\n result = getattr(grouped, agg_function)()\n expected = getattr(df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\"), agg_function)()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_omit_nuisance_warnings(df):\n # GH 38815\n with tm.assert_produces_warning(FutureWarning, filter_level=\"always\"):\n grouped = df.groupby(\"A\")\n result = grouped.skew()\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").skew()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_omit_nuisance_python_multiple(three_group):\n grouped = three_group.groupby([\"A\", \"B\"])\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n\ndef test_empty_groups_corner(mframe):\n # handle empty groups\n df = DataFrame(\n {\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n \"k3\": [\"foo\", \"bar\"] * 3,\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n }\n )\n\n grouped = df.groupby([\"k1\", \"k2\"])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped = mframe[3:5].groupby(level=0)\n agged = grouped.apply(lambda x: x.mean())\n agged_A = grouped[\"A\"].apply(np.mean)\n tm.assert_series_equal(agged[\"A\"], agged_A)\n assert agged.index.name == \"first\"\n\n\ndef test_nonsense_func():\n df = DataFrame([0])\n msg = r\"unsupported operand type\\(s\\) for \\+: 'int' and 'str'\"\n with pytest.raises(TypeError, match=msg):\n df.groupby(lambda x: x + \"foo\")\n\n\ndef test_wrap_aggregated_output_multindex(mframe):\n df = mframe.T\n df[\"baz\", \"two\"] = \"peekaboo\"\n\n keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]\n agged = df.groupby(keys).agg(np.mean)\n assert isinstance(agged.columns, MultiIndex)\n\n def aggfun(ser):\n if ser.name == (\"foo\", \"one\"):\n raise TypeError\n else:\n return ser.sum()\n\n with tm.assert_produces_warning(FutureWarning, match=\"Dropping invalid columns\"):\n agged2 = df.groupby(keys).aggregate(aggfun)\n assert len(agged2.columns) + 1 == len(df.columns)\n\n\ndef test_groupby_level_apply(mframe):\n\n result = mframe.groupby(level=0).count()\n assert result.index.name == \"first\"\n result = mframe.groupby(level=1).count()\n assert result.index.name == \"second\"\n\n result = mframe[\"A\"].groupby(level=0).count()\n assert result.index.name == \"first\"\n\n\ndef test_groupby_level_mapper(mframe):\n deleveled = mframe.reset_index()\n\n mapper0 = {\"foo\": 0, \"bar\": 0, \"baz\": 1, \"qux\": 1}\n mapper1 = {\"one\": 0, \"two\": 0, \"three\": 1}\n\n result0 = mframe.groupby(mapper0, level=0).sum()\n result1 = mframe.groupby(mapper1, level=1).sum()\n\n mapped_level0 = np.array([mapper0.get(x) for x in deleveled[\"first\"]])\n mapped_level1 = np.array([mapper1.get(x) for x in deleveled[\"second\"]])\n expected0 = mframe.groupby(mapped_level0).sum()\n expected1 = mframe.groupby(mapped_level1).sum()\n expected0.index.name, expected1.index.name = \"first\", \"second\"\n\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n\n\ndef test_groupby_level_nonmulti():\n # GH 1313, GH 13901\n s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name=\"foo\"))\n expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name=\"foo\"))\n\n result = s.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[0]).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=-1).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[-1]).sum()\n tm.assert_series_equal(result, expected)\n\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=1)\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=-2)\n msg = \"No group keys passed!\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[])\n msg = \"multiple levels only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 0])\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 1])\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[1])\n\n\ndef test_groupby_complex():\n # GH 12902\n a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])\n expected = Series((1 + 2j, 5 + 10j))\n\n result = a.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = a.sum(level=0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_complex_numbers():\n # GH 17927\n df = DataFrame(\n [\n {\"a\": 1, \"b\": 1 + 1j},\n {\"a\": 1, \"b\": 1 + 2j},\n {\"a\": 4, \"b\": 1},\n ]\n )\n expected = DataFrame(\n np.array([1, 1, 1], dtype=np.int64),\n index=Index([(1 + 1j), (1 + 2j), (1 + 0j)], dtype=\"object\", name=\"b\"),\n columns=Index([\"a\"], dtype=\"object\"),\n )\n result = df.groupby(\"b\", sort=False).count()\n tm.assert_frame_equal(result, expected)\n\n # Sorted by the magnitude of the complex numbers\n # Complex Index dtype is cast to object\n expected.index = Index([(1 + 0j), (1 + 1j), (1 + 2j)], dtype=\"object\", name=\"b\")\n result = df.groupby(\"b\", sort=True).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_indexed_differently():\n s1 = Series(\n [5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],\n index=Index([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"]),\n )\n s2 = Series(\n [1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index([\"a\", \"b\", \"d\", \"f\", \"g\", \"h\"])\n )\n\n grouped = s1.groupby(s2)\n agged = grouped.mean()\n exp = s1.groupby(s2.reindex(s1.index).get).mean()\n tm.assert_series_equal(agged, exp)\n\n\ndef test_groupby_with_hier_columns():\n tuples = list(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n )\n )\n index = MultiIndex.from_tuples(tuples)\n columns = MultiIndex.from_tuples(\n [(\"A\", \"cat\"), (\"B\", \"dog\"), (\"B\", \"cat\"), (\"A\", \"dog\")]\n )\n df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)\n\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).mean()\n tm.assert_index_equal(result.index, df.index)\n\n result = df.groupby(level=0).agg(np.mean)\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0).apply(lambda x: x.mean())\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))\n tm.assert_index_equal(result.columns, Index([\"A\", \"B\"]))\n tm.assert_index_equal(result.index, df.index)\n\n # add a nuisance column\n sorted_columns, _ = columns.sortlevel(0)\n df[\"A\", \"foo\"] = \"bar\"\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, df.columns[:-1])\n\n\ndef test_grouping_ndarray(df):\n grouped = df.groupby(df[\"A\"].values)\n\n result = grouped.sum()\n expected = df.groupby(\"A\").sum()\n tm.assert_frame_equal(\n result, expected, check_names=False\n ) # Note: no names when grouping by value\n\n\ndef test_groupby_wrong_multi_labels():\n\n index = Index([0, 1, 2, 3, 4], name=\"index\")\n data = DataFrame(\n {\n \"foo\": [\"foo1\", \"foo1\", \"foo2\", \"foo1\", \"foo3\"],\n \"bar\": [\"bar1\", \"bar2\", \"bar2\", \"bar1\", \"bar1\"],\n \"baz\": [\"baz1\", \"baz1\", \"baz1\", \"baz2\", \"baz2\"],\n \"spam\": [\"spam2\", \"spam3\", \"spam2\", \"spam1\", \"spam1\"],\n \"data\": [20, 30, 40, 50, 60],\n },\n index=index,\n )\n\n grouped = data.groupby([\"foo\", \"bar\", \"baz\", \"spam\"])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_name(df):\n result = df.groupby(df[\"A\"]).mean()\n result2 = df.groupby(df[\"A\"], as_index=False).mean()\n assert result.index.name == \"A\"\n assert \"A\" in result2\n\n result = df.groupby([df[\"A\"], df[\"B\"]]).mean()\n result2 = df.groupby([df[\"A\"], df[\"B\"]], as_index=False).mean()\n assert result.index.names == (\"A\", \"B\")\n assert \"A\" in result2\n assert \"B\" in result2\n\n\ndef test_seriesgroupby_name_attr(df):\n # GH 6265\n result = df.groupby(\"A\")[\"C\"]\n assert result.count().name == \"C\"\n assert result.mean().name == \"C\"\n\n testFunc = lambda x: np.sum(x) * 2\n assert result.agg(testFunc).name == \"C\"\n\n\ndef test_consistency_name():\n # GH 12363\n\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n expected = df.groupby([\"A\"]).B.count()\n result = df.B.groupby(df.A).count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_name_propagation(df):\n # GH 6124\n def summarize(df, name=None):\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=name)\n\n def summarize_random_name(df):\n # Provide a different name for each Series. In this case, groupby\n # should not attempt to propagate the Series name since they are\n # inconsistent.\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=df.iloc[0][\"A\"])\n\n metrics = df.groupby(\"A\").apply(summarize)\n assert metrics.columns.name is None\n metrics = df.groupby(\"A\").apply(summarize, \"metrics\")\n assert metrics.columns.name == \"metrics\"\n metrics = df.groupby(\"A\").apply(summarize_random_name)\n assert metrics.columns.name is None\n\n\ndef test_groupby_nonstring_columns():\n df = DataFrame([np.arange(10) for x in range(10)])\n grouped = df.groupby(0)\n result = grouped.mean()\n expected = df.groupby(df[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_mixed_type_columns():\n # GH 13432, unorderable types in py3\n df = DataFrame([[0, 1, 2]], columns=[\"A\", \"B\", 0])\n expected = DataFrame([[1, 2]], columns=[\"B\", 0], index=Index([0], name=\"A\"))\n\n result = df.groupby(\"A\").first()\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"A\").sum()\n tm.assert_frame_equal(result, expected)\n\n\n# TODO: Ensure warning isn't emitted in the first place\[email protected](\"ignore:Mean of:RuntimeWarning\")\ndef test_cython_grouper_series_bug_noncontig():\n arr = np.empty((100, 100))\n arr.fill(np.nan)\n obj = Series(arr[:, 0])\n inds = np.tile(range(10), 10)\n\n result = obj.groupby(inds).agg(Series.median)\n assert result.isna().all()\n\n\ndef test_series_grouper_noncontig_index():\n index = Index(tm.rands_array(10, 100))\n\n values = Series(np.random.randn(50), index=index[::2])\n labels = np.random.randint(0, 5, 50)\n\n # it works!\n grouped = values.groupby(labels)\n\n # accessing the index elements causes segfault\n f = lambda x: len(set(map(id, x.index)))\n grouped.agg(f)\n\n\ndef test_convert_objects_leave_decimal_alone():\n\n s = Series(range(5))\n labels = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"], dtype=\"O\")\n\n def convert_fast(x):\n return Decimal(str(x.mean()))\n\n def convert_force_pure(x):\n # base will be length 0\n assert len(x.values.base) > 0\n return Decimal(str(x.mean()))\n\n grouped = s.groupby(labels)\n\n result = grouped.agg(convert_fast)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n result = grouped.agg(convert_force_pure)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n\ndef test_groupby_dtype_inference_empty():\n # GH 6733\n df = DataFrame({\"x\": [], \"range\": np.arange(0, dtype=\"int64\")})\n assert df[\"x\"].dtype == np.float64\n\n result = df.groupby(\"x\").first()\n exp_index = Index([], name=\"x\", dtype=np.float64)\n expected = DataFrame({\"range\": Series([], index=exp_index, dtype=\"int64\")})\n tm.assert_frame_equal(result, expected, by_blocks=True)\n\n\ndef test_groupby_unit64_float_conversion():\n # GH: 30859 groupby converts unit64 to floats sometimes\n df = DataFrame({\"first\": [1], \"second\": [1], \"value\": [16148277970000000000]})\n result = df.groupby([\"first\", \"second\"])[\"value\"].max()\n expected = Series(\n [16148277970000000000],\n MultiIndex.from_product([[1], [1]], names=[\"first\", \"second\"]),\n name=\"value\",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_list_infer_array_like(df):\n result = df.groupby(list(df[\"A\"])).mean()\n expected = df.groupby(df[\"A\"]).mean()\n tm.assert_frame_equal(result, expected, check_names=False)\n\n with pytest.raises(KeyError, match=r\"^'foo'$\"):\n df.groupby(list(df[\"A\"][:-1]))\n\n # pathological case of ambiguity\n df = DataFrame({\"foo\": [0, 1], \"bar\": [3, 4], \"val\": np.random.randn(2)})\n\n result = df.groupby([\"foo\", \"bar\"]).mean()\n expected = df.groupby([df[\"foo\"], df[\"bar\"]]).mean()[[\"val\"]]\n\n\ndef test_groupby_keys_same_size_as_index():\n # GH 11185\n freq = \"s\"\n index = date_range(\n start=Timestamp(\"2015-09-29T11:34:44-0700\"), periods=2, freq=freq\n )\n df = DataFrame([[\"A\", 10], [\"B\", 15]], columns=[\"metric\", \"values\"], index=index)\n result = df.groupby([Grouper(level=0, freq=freq), \"metric\"]).mean()\n expected = df.set_index([df.index, \"metric\"]).astype(float)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_one_row():\n # GH 11741\n msg = r\"^'Z'$\"\n df1 = DataFrame(np.random.randn(1, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df1.groupby(\"Z\")\n df2 = DataFrame(np.random.randn(2, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df2.groupby(\"Z\")\n\n\ndef test_groupby_nat_exclude():\n # GH 6992\n df = DataFrame(\n {\n \"values\": np.random.randn(8),\n \"dt\": [\n np.nan,\n Timestamp(\"2013-01-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-01-01\"),\n ],\n \"str\": [np.nan, \"a\", np.nan, \"a\", np.nan, \"a\", np.nan, \"b\"],\n }\n )\n grouped = df.groupby(\"dt\")\n\n expected = [Index([1, 7]), Index([3, 5])]\n keys = sorted(grouped.groups.keys())\n assert len(keys) == 2\n for k, e in zip(keys, expected):\n # grouped.groups keys are np.datetime64 with system tz\n # not to be affected by tz, only compare values\n tm.assert_index_equal(grouped.groups[k], e)\n\n # confirm obj is not filtered\n tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)\n assert grouped.ngroups == 2\n\n expected = {\n Timestamp(\"2013-01-01 00:00:00\"): np.array([1, 7], dtype=np.intp),\n Timestamp(\"2013-02-01 00:00:00\"): np.array([3, 5], dtype=np.intp),\n }\n\n for k in grouped.indices:\n tm.assert_numpy_array_equal(grouped.indices[k], expected[k])\n\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-01-01\")), df.iloc[[1, 7]])\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-02-01\")), df.iloc[[3, 5]])\n\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n nan_df = DataFrame(\n {\"nan\": [np.nan, np.nan, np.nan], \"nat\": [pd.NaT, pd.NaT, pd.NaT]}\n )\n assert nan_df[\"nan\"].dtype == \"float64\"\n assert nan_df[\"nat\"].dtype == \"datetime64[ns]\"\n\n for key in [\"nan\", \"nat\"]:\n grouped = nan_df.groupby(key)\n assert grouped.groups == {}\n assert grouped.ngroups == 0\n assert grouped.indices == {}\n with pytest.raises(KeyError, match=r\"^nan$\"):\n grouped.get_group(np.nan)\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n\ndef test_groupby_two_group_keys_all_nan():\n # GH #36842: Grouping over two group keys shouldn't raise an error\n df = DataFrame({\"a\": [np.nan, np.nan], \"b\": [np.nan, np.nan], \"c\": [1, 2]})\n result = df.groupby([\"a\", \"b\"]).indices\n assert result == {}\n\n\ndef test_groupby_2d_malformed():\n d = DataFrame(index=range(2))\n d[\"group\"] = [\"g1\", \"g2\"]\n d[\"zeros\"] = [0, 0]\n d[\"ones\"] = [1, 1]\n d[\"label\"] = [\"l1\", \"l2\"]\n tmp = d.groupby([\"group\"]).mean()\n res_values = np.array([[0.0, 1.0], [0.0, 1.0]])\n tm.assert_index_equal(tmp.columns, Index([\"zeros\", \"ones\"]))\n tm.assert_numpy_array_equal(tmp.values, res_values)\n\n\ndef test_int32_overflow():\n B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))\n A = np.arange(25000)\n df = DataFrame({\"A\": A, \"B\": B, \"C\": A, \"D\": B, \"E\": np.random.randn(25000)})\n\n left = df.groupby([\"A\", \"B\", \"C\", \"D\"]).sum()\n right = df.groupby([\"D\", \"C\", \"B\", \"A\"]).sum()\n assert len(left) == len(right)\n\n\ndef test_groupby_sort_multi():\n df = DataFrame(\n {\n \"a\": [\"foo\", \"bar\", \"baz\"],\n \"b\": [3, 2, 1],\n \"c\": [0, 1, 2],\n \"d\": np.random.randn(3),\n }\n )\n\n tups = [tuple(row) for row in df[[\"a\", \"b\", \"c\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"a\", \"b\", \"c\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])\n\n tups = [tuple(row) for row in df[[\"c\", \"a\", \"b\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"c\", \"a\", \"b\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups)\n\n tups = [tuple(x) for x in df[[\"b\", \"c\", \"a\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"b\", \"c\", \"a\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])\n\n df = DataFrame(\n {\"a\": [0, 1, 2, 0, 1, 2], \"b\": [0, 0, 0, 1, 1, 1], \"d\": np.random.randn(6)}\n )\n grouped = df.groupby([\"a\", \"b\"])[\"d\"]\n result = grouped.sum()\n\n def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):\n tups = [tuple(row) for row in df[keys].values]\n tups = com.asarray_tuplesafe(tups)\n expected = f(df.groupby(tups)[field])\n for k, v in expected.items():\n assert result[k] == v\n\n _check_groupby(df, result, [\"a\", \"b\"], \"d\")\n\n\ndef test_dont_clobber_name_column():\n df = DataFrame(\n {\"key\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"], \"name\": [\"foo\", \"bar\", \"baz\"] * 2}\n )\n\n result = df.groupby(\"key\").apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\ndef test_skip_group_keys():\n\n tsf = tm.makeTimeDataFrame()\n\n grouped = tsf.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values(by=\"A\")[:3])\n\n pieces = [group.sort_values(by=\"A\")[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_frame_equal(result, expected)\n\n grouped = tsf[\"A\"].groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values()[:3])\n\n pieces = [group.sort_values()[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_series_equal(result, expected)\n\n\ndef test_no_nonsense_name(float_frame):\n # GH #995\n s = float_frame[\"C\"].copy()\n s.name = None\n\n result = s.groupby(float_frame[\"A\"]).agg(np.sum)\n assert result.name is None\n\n\ndef test_multifunc_sum_bug():\n # GH #1065\n x = DataFrame(np.arange(9).reshape(3, 3))\n x[\"test\"] = 0\n x[\"fl\"] = [1.3, 1.5, 1.6]\n\n grouped = x.groupby(\"test\")\n result = grouped.agg({\"fl\": \"sum\", 2: \"size\"})\n assert result[\"fl\"].dtype == np.float64\n\n\ndef test_handle_dict_return_value(df):\n def f(group):\n return {\"max\": group.max(), \"min\": group.min()}\n\n def g(group):\n return Series({\"max\": group.max(), \"min\": group.min()})\n\n result = df.groupby(\"A\")[\"C\"].apply(f)\n expected = df.groupby(\"A\")[\"C\"].apply(g)\n\n assert isinstance(result, Series)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"grouper\", [\"A\", [\"A\", \"B\"]])\ndef test_set_group_name(df, grouper):\n def f(group):\n assert group.name is not None\n return group\n\n def freduce(group):\n assert group.name is not None\n return group.sum()\n\n def foo(x):\n return freduce(x)\n\n grouped = df.groupby(grouper)\n\n # make sure all these work\n grouped.apply(f)\n grouped.aggregate(freduce)\n grouped.aggregate({\"C\": freduce, \"D\": freduce})\n grouped.transform(f)\n\n grouped[\"C\"].apply(f)\n grouped[\"C\"].aggregate(freduce)\n grouped[\"C\"].aggregate([freduce, foo])\n grouped[\"C\"].transform(f)\n\n\ndef test_group_name_available_in_inference_pass():\n # gh-15062\n df = DataFrame({\"a\": [0, 0, 1, 1, 2, 2], \"b\": np.arange(6)})\n\n names = []\n\n def f(group):\n names.append(group.name)\n return group.copy()\n\n df.groupby(\"a\", sort=False, group_keys=False).apply(f)\n\n expected_names = [0, 1, 2]\n assert names == expected_names\n\n\ndef test_no_dummy_key_names(df):\n # see gh-1291\n result = df.groupby(df[\"A\"].values).sum()\n assert result.index.name is None\n\n result = df.groupby([df[\"A\"].values, df[\"B\"].values]).sum()\n assert result.index.names == (None, None)\n\n\ndef test_groupby_sort_multiindex_series():\n # series multiindex groupby sort argument was not being passed through\n # _compress_group_index\n # GH 9444\n index = MultiIndex(\n levels=[[1, 2], [1, 2]],\n codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],\n names=[\"a\", \"b\"],\n )\n mseries = Series([0, 1, 2, 3, 4, 5], index=index)\n index = MultiIndex(\n levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=[\"a\", \"b\"]\n )\n mseries_result = Series([0, 2, 4], index=index)\n\n result = mseries.groupby(level=[\"a\", \"b\"], sort=False).first()\n tm.assert_series_equal(result, mseries_result)\n result = mseries.groupby(level=[\"a\", \"b\"], sort=True).first()\n tm.assert_series_equal(result, mseries_result.sort_index())\n\n\ndef test_groupby_reindex_inside_function():\n\n periods = 1000\n ind = date_range(start=\"2012/1/1\", freq=\"5min\", periods=periods)\n df = DataFrame({\"high\": np.arange(periods), \"low\": np.arange(periods)}, index=ind)\n\n def agg_before(func, fix=False):\n \"\"\"\n Run an aggregate func on the subset of data.\n \"\"\"\n\n def _func(data):\n d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()\n if fix:\n data[data.index[0]]\n if len(d) == 0:\n return None\n return func(d)\n\n return _func\n\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n closure_bad = grouped.agg({\"high\": agg_before(np.max)})\n closure_good = grouped.agg({\"high\": agg_before(np.max, True)})\n\n tm.assert_frame_equal(closure_bad, closure_good)\n\n\ndef test_groupby_multiindex_missing_pair():\n # GH9049\n df = DataFrame(\n {\n \"group1\": [\"a\", \"a\", \"a\", \"b\"],\n \"group2\": [\"c\", \"c\", \"d\", \"c\"],\n \"value\": [1, 1, 1, 5],\n }\n )\n df = df.set_index([\"group1\", \"group2\"])\n df_grouped = df.groupby(level=[\"group1\", \"group2\"], sort=True)\n\n res = df_grouped.agg(\"sum\")\n idx = MultiIndex.from_tuples(\n [(\"a\", \"c\"), (\"a\", \"d\"), (\"b\", \"c\")], names=[\"group1\", \"group2\"]\n )\n exp = DataFrame([[2], [1], [5]], index=idx, columns=[\"value\"])\n\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_multiindex_not_lexsorted():\n # GH 11640\n\n # define the lexsorted version\n lexsorted_mi = MultiIndex.from_tuples(\n [(\"a\", \"\"), (\"b1\", \"c1\"), (\"b2\", \"c2\")], names=[\"b\", \"c\"]\n )\n lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)\n assert lexsorted_df.columns._is_lexsorted()\n\n # define the non-lexsorted version\n not_lexsorted_df = DataFrame(\n columns=[\"a\", \"b\", \"c\", \"d\"], data=[[1, \"b1\", \"c1\", 3], [1, \"b2\", \"c2\", 4]]\n )\n not_lexsorted_df = not_lexsorted_df.pivot_table(\n index=\"a\", columns=[\"b\", \"c\"], values=\"d\"\n )\n not_lexsorted_df = not_lexsorted_df.reset_index()\n assert not not_lexsorted_df.columns._is_lexsorted()\n\n # compare the results\n tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)\n\n expected = lexsorted_df.groupby(\"a\").mean()\n with tm.assert_produces_warning(PerformanceWarning):\n result = not_lexsorted_df.groupby(\"a\").mean()\n tm.assert_frame_equal(expected, result)\n\n # a transforming function should work regardless of sort\n # GH 14776\n df = DataFrame(\n {\"x\": [\"a\", \"a\", \"b\", \"a\"], \"y\": [1, 1, 2, 2], \"z\": [1, 2, 3, 4]}\n ).set_index([\"x\", \"y\"])\n assert not df.index._is_lexsorted()\n\n for level in [0, 1, [0, 1]]:\n for sort in [False, True]:\n result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)\n expected = df\n tm.assert_frame_equal(expected, result)\n\n result = (\n df.sort_index()\n .groupby(level=level, sort=sort)\n .apply(DataFrame.drop_duplicates)\n )\n expected = df.sort_index()\n tm.assert_frame_equal(expected, result)\n\n\ndef test_index_label_overlaps_location():\n # checking we don't have any label/location confusion in the\n # wake of GH5375\n df = DataFrame(list(\"ABCDE\"), index=[2, 0, 2, 1, 1])\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n # and again, with a generic Index of floats\n df.index = df.index.astype(float)\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n\ndef test_transform_doesnt_clobber_ints():\n # GH 7972\n n = 6\n x = np.arange(n)\n df = DataFrame({\"a\": x // 2, \"b\": 2.0 * x, \"c\": 3.0 * x})\n df2 = DataFrame({\"a\": x // 2 * 1.0, \"b\": 2.0 * x, \"c\": 3.0 * x})\n\n gb = df.groupby(\"a\")\n result = gb.transform(\"mean\")\n\n gb2 = df2.groupby(\"a\")\n expected = gb2.transform(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"sort_column\",\n [\"ints\", \"floats\", \"strings\", [\"ints\", \"floats\"], [\"ints\", \"strings\"]],\n)\[email protected](\n \"group_column\", [\"int_groups\", \"string_groups\", [\"int_groups\", \"string_groups\"]]\n)\ndef test_groupby_preserves_sort(sort_column, group_column):\n # Test to ensure that groupby always preserves sort order of original\n # object. Issue #8588 and #9651\n\n df = DataFrame(\n {\n \"int_groups\": [3, 1, 0, 1, 0, 3, 3, 3],\n \"string_groups\": [\"z\", \"a\", \"z\", \"a\", \"a\", \"g\", \"g\", \"g\"],\n \"ints\": [8, 7, 4, 5, 2, 9, 1, 1],\n \"floats\": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],\n \"strings\": [\"z\", \"d\", \"a\", \"e\", \"word\", \"word2\", \"42\", \"47\"],\n }\n )\n\n # Try sorting on different types and with different group types\n\n df = df.sort_values(by=sort_column)\n g = df.groupby(group_column)\n\n def test_sort(x):\n tm.assert_frame_equal(x, x.sort_values(by=sort_column))\n\n g.apply(test_sort)\n\n\ndef test_pivot_table_values_key_error():\n # This test is designed to replicate the error in issue #14938\n df = DataFrame(\n {\n \"eventDate\": date_range(datetime.today(), periods=20, freq=\"M\").tolist(),\n \"thename\": range(0, 20),\n }\n )\n\n df[\"year\"] = df.set_index(\"eventDate\").index.year\n df[\"month\"] = df.set_index(\"eventDate\").index.month\n\n with pytest.raises(KeyError, match=\"'badname'\"):\n df.reset_index().pivot_table(\n index=\"year\", columns=\"month\", values=\"badname\", aggfunc=\"count\"\n )\n\n\[email protected](\"columns\", [\"C\", [\"C\"]])\[email protected](\"keys\", [[\"A\"], [\"A\", \"B\"]])\[email protected](\n \"values\",\n [\n [True],\n [0],\n [0.0],\n [\"a\"],\n Categorical([0]),\n [to_datetime(0)],\n date_range(0, 1, 1, tz=\"US/Eastern\"),\n pd.array([0], dtype=\"Int64\"),\n pd.array([0], dtype=\"Float64\"),\n pd.array([False], dtype=\"boolean\"),\n ],\n)\[email protected](\"method\", [\"attr\", \"agg\", \"apply\"])\[email protected](\n \"op\", [\"idxmax\", \"idxmin\", \"mad\", \"min\", \"max\", \"sum\", \"prod\", \"skew\"]\n)\[email protected](\"ignore:Dropping invalid columns:FutureWarning\")\[email protected](\"ignore:.*Select only valid:FutureWarning\")\ndef test_empty_groupby(columns, keys, values, method, op, request, using_array_manager):\n # GH8093 & GH26411\n override_dtype = None\n\n if (\n isinstance(values, Categorical)\n and not isinstance(columns, list)\n and op in [\"sum\", \"prod\", \"skew\", \"mad\"]\n ):\n # handled below GH#41291\n\n if using_array_manager and op == \"mad\":\n right_msg = \"Cannot interpret 'CategoricalDtype.* as a data type\"\n msg = \"Regex pattern \\\"'Categorical' does not implement.*\" + right_msg\n mark = pytest.mark.xfail(raises=AssertionError, match=msg)\n request.node.add_marker(mark)\n\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and op in [\"idxmax\", \"idxmin\"]\n ):\n mark = pytest.mark.xfail(\n raises=ValueError, match=\"attempt to get arg(min|max) of an empty sequence\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and not isinstance(columns, list)\n ):\n mark = pytest.mark.xfail(\n raises=TypeError, match=\"'Categorical' does not implement\"\n )\n request.node.add_marker(mark)\n elif isinstance(values, Categorical) and len(keys) == 1 and op in [\"sum\", \"prod\"]:\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 2\n and op in [\"min\", \"max\", \"sum\"]\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, (IntegerArray, FloatingArray))\n and op == \"mad\"\n and isinstance(columns, list)\n ):\n mark = pytest.mark.xfail(\n raises=TypeError, match=\"can only perform ops with numeric values\"\n )\n request.node.add_marker(mark)\n\n elif (\n op == \"mad\"\n and not isinstance(columns, list)\n and isinstance(values, pd.DatetimeIndex)\n and values.tz is not None\n and using_array_manager\n ):\n mark = pytest.mark.xfail(\n raises=TypeError,\n match=r\"Cannot interpret 'datetime64\\[ns, US/Eastern\\]' as a data type\",\n )\n request.node.add_marker(mark)\n\n elif isinstance(values, BooleanArray) and op in [\"sum\", \"prod\"]:\n # We expect to get Int64 back for these\n override_dtype = \"Int64\"\n\n if isinstance(values[0], bool) and op in (\"prod\", \"sum\"):\n # sum/product of bools is an integer\n override_dtype = \"int64\"\n\n df = DataFrame({\"A\": values, \"B\": values, \"C\": values}, columns=list(\"ABC\"))\n\n if hasattr(values, \"dtype\"):\n # check that we did the construction right\n assert (df.dtypes == values.dtype).all()\n\n df = df.iloc[:0]\n\n gb = df.groupby(keys)[columns]\n\n def get_result():\n if method == \"attr\":\n return getattr(gb, op)()\n else:\n return getattr(gb, method)(op)\n\n if columns == \"C\":\n # i.e. SeriesGroupBy\n if op in [\"prod\", \"sum\", \"skew\"]:\n # ops that require more than just ordered-ness\n if df.dtypes[0].kind == \"M\":\n # GH#41291\n # datetime64 -> prod and sum are invalid\n if op == \"skew\":\n msg = \"does not support reduction 'skew'\"\n else:\n msg = \"datetime64 type does not support\"\n with pytest.raises(TypeError, match=msg):\n get_result()\n\n return\n if op in [\"prod\", \"sum\", \"skew\", \"mad\"]:\n if isinstance(values, Categorical):\n # GH#41291\n if op == \"mad\":\n # mad calls mean, which Categorical doesn't implement\n msg = \"does not support reduction 'mean'\"\n elif op == \"skew\":\n msg = f\"does not support reduction '{op}'\"\n else:\n msg = \"category type does not support\"\n with pytest.raises(TypeError, match=msg):\n get_result()\n\n return\n else:\n # ie. DataFrameGroupBy\n if op in [\"prod\", \"sum\"]:\n # ops that require more than just ordered-ness\n if df.dtypes[0].kind == \"M\":\n # GH#41291\n # datetime64 -> prod and sum are invalid\n result = get_result()\n\n # with numeric_only=True, these are dropped, and we get\n # an empty DataFrame back\n expected = df.set_index(keys)[[]]\n tm.assert_equal(result, expected)\n return\n\n elif isinstance(values, Categorical):\n # GH#41291\n # Categorical doesn't implement sum or prod\n result = get_result()\n\n # with numeric_only=True, these are dropped, and we get\n # an empty DataFrame back\n expected = df.set_index(keys)[[]]\n if len(keys) != 1 and op == \"prod\":\n # TODO: why just prod and not sum?\n # Categorical is special without 'observed=True'\n lev = Categorical([0], dtype=values.dtype)\n mi = MultiIndex.from_product([lev, lev], names=[\"A\", \"B\"])\n expected = DataFrame([], columns=[], index=mi)\n\n tm.assert_equal(result, expected)\n return\n\n elif df.dtypes[0] == object:\n # FIXME: the test is actually wrong here, xref #41341\n result = get_result()\n # In this case we have list-of-list, will raise TypeError,\n # and subsequently be dropped as nuisance columns\n expected = df.set_index(keys)[[]]\n tm.assert_equal(result, expected)\n return\n\n if (\n op in [\"mad\", \"min\", \"max\", \"skew\"]\n and isinstance(values, Categorical)\n and len(keys) == 1\n ):\n # Categorical doesn't implement, so with numeric_only=True\n # these are dropped and we get an empty DataFrame back\n result = get_result()\n expected = df.set_index(keys)[[]]\n\n # with numeric_only=True, these are dropped, and we get\n # an empty DataFrame back\n if len(keys) != 1:\n # Categorical is special without 'observed=True'\n lev = Categorical([0], dtype=values.dtype)\n mi = MultiIndex.from_product([lev, lev], names=keys)\n expected = DataFrame([], columns=[], index=mi)\n else:\n # all columns are dropped, but we end up with one row\n # Categorical is special without 'observed=True'\n lev = Categorical([0], dtype=values.dtype)\n ci = Index(lev, name=keys[0])\n expected = DataFrame([], columns=[], index=ci)\n # expected = df.set_index(keys)[columns]\n\n tm.assert_equal(result, expected)\n return\n\n result = get_result()\n expected = df.set_index(keys)[columns]\n if override_dtype is not None:\n expected = expected.astype(override_dtype)\n if len(keys) == 1:\n expected.index.name = keys[0]\n tm.assert_equal(result, expected)\n\n\ndef test_empty_groupby_apply_nonunique_columns():\n # GH#44417\n df = DataFrame(np.random.randn(0, 4))\n df[3] = df[3].astype(np.int64)\n df.columns = [0, 1, 2, 0]\n gb = df.groupby(df[1])\n res = gb.apply(lambda x: x)\n assert (res.dtypes == df.dtypes).all()\n\n\ndef test_tuple_as_grouping():\n # https://github.com/pandas-dev/pandas/issues/18314\n df = DataFrame(\n {\n (\"a\", \"b\"): [1, 1, 1, 1],\n \"a\": [2, 2, 2, 2],\n \"b\": [2, 2, 2, 2],\n \"c\": [1, 1, 1, 1],\n }\n )\n\n with pytest.raises(KeyError, match=r\"('a', 'b')\"):\n df[[\"a\", \"b\", \"c\"]].groupby((\"a\", \"b\"))\n\n result = df.groupby((\"a\", \"b\"))[\"c\"].sum()\n expected = Series([4], name=\"c\", index=Index([1], name=(\"a\", \"b\")))\n tm.assert_series_equal(result, expected)\n\n\ndef test_tuple_correct_keyerror():\n # https://github.com/pandas-dev/pandas/issues/18798\n df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]]))\n with pytest.raises(KeyError, match=r\"^\\(7, 8\\)$\"):\n df.groupby((7, 8)).mean()\n\n\ndef test_groupby_agg_ohlc_non_first():\n # GH 21716\n df = DataFrame(\n [[1], [1]],\n columns=Index([\"foo\"], name=\"mycols\"),\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\", name=\"dti\"),\n )\n\n expected = DataFrame(\n [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n columns=MultiIndex.from_tuples(\n (\n (\"foo\", \"sum\", \"foo\"),\n (\"foo\", \"ohlc\", \"open\"),\n (\"foo\", \"ohlc\", \"high\"),\n (\"foo\", \"ohlc\", \"low\"),\n (\"foo\", \"ohlc\", \"close\"),\n ),\n names=[\"mycols\", None, None],\n ),\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\", name=\"dti\"),\n )\n\n result = df.groupby(Grouper(freq=\"D\")).agg([\"sum\", \"ohlc\"])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_multiindex_nat():\n # GH 9236\n values = [\n (pd.NaT, \"a\"),\n (datetime(2012, 1, 2), \"a\"),\n (datetime(2012, 1, 2), \"b\"),\n (datetime(2012, 1, 3), \"a\"),\n ]\n mi = MultiIndex.from_tuples(values, names=[\"date\", None])\n ser = Series([3, 2, 2.5, 4], index=mi)\n\n result = ser.groupby(level=1).mean()\n expected = Series([3.0, 2.5], index=[\"a\", \"b\"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_empty_list_raises():\n # GH 5289\n values = zip(range(10), range(10))\n df = DataFrame(values, columns=[\"apple\", \"b\"])\n msg = \"Grouper and axis must be same length\"\n with pytest.raises(ValueError, match=msg):\n df.groupby([[]])\n\n\ndef test_groupby_multiindex_series_keys_len_equal_group_axis():\n # GH 25704\n index_array = [[\"x\", \"x\"], [\"a\", \"b\"], [\"k\", \"k\"]]\n index_names = [\"first\", \"second\", \"third\"]\n ri = MultiIndex.from_arrays(index_array, names=index_names)\n s = Series(data=[1, 2], index=ri)\n result = s.groupby([\"first\", \"third\"]).sum()\n\n index_array = [[\"x\"], [\"k\"]]\n index_names = [\"first\", \"third\"]\n ei = MultiIndex.from_arrays(index_array, names=index_names)\n expected = Series([3], index=ei)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_groups_in_BaseGrouper():\n # GH 26326\n # Test if DataFrame grouped with a pandas.Grouper has correct groups\n mi = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\"]], names=[\"alpha\", \"beta\"])\n df = DataFrame({\"foo\": [1, 2, 1, 2], \"bar\": [1, 2, 3, 4]}, index=mi)\n result = df.groupby([Grouper(level=\"alpha\"), \"beta\"])\n expected = df.groupby([\"alpha\", \"beta\"])\n assert result.groups == expected.groups\n\n result = df.groupby([\"beta\", Grouper(level=\"alpha\")])\n expected = df.groupby([\"beta\", \"alpha\"])\n assert result.groups == expected.groups\n\n\[email protected](\"group_name\", [\"x\", [\"x\"]])\ndef test_groupby_axis_1(group_name):\n # GH 27614\n df = DataFrame(\n np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]\n )\n df.index.name = \"y\"\n df.columns.name = \"x\"\n\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n # test on MI column\n iterables = [[\"bar\", \"baz\", \"foo\"], [\"one\", \"two\"]]\n mi = MultiIndex.from_product(iterables=iterables, names=[\"x\", \"x1\"])\n df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n\[email protected](\n \"op, expected\",\n [\n (\n \"shift\",\n {\n \"time\": [\n None,\n None,\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n ]\n },\n ),\n (\n \"bfill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n (\n \"ffill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n ],\n)\ndef test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):\n # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill\n tz = tz_naive_fixture\n data = {\n \"id\": [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"],\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ],\n }\n df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))\n\n grouped = df.groupby(\"id\")\n result = getattr(grouped, op)()\n expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_only_none_group():\n # see GH21624\n # this was crashing with \"ValueError: Length of passed values is 1, index implies 0\"\n df = DataFrame({\"g\": [None], \"x\": 1})\n actual = df.groupby(\"g\")[\"x\"].transform(\"sum\")\n expected = Series([np.nan], name=\"x\")\n\n tm.assert_series_equal(actual, expected)\n\n\ndef test_groupby_duplicate_index():\n # GH#29189 the groupby call here used to raise\n ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])\n gb = ser.groupby(level=0)\n\n result = gb.mean()\n expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"idx\",\n [\n Index([\"a\", \"a\"], name=\"foo\"),\n MultiIndex.from_tuples(((\"a\", \"a\"), (\"a\", \"a\")), names=[\"foo\", \"bar\"]),\n ],\n)\[email protected](\"ignore:tshift is deprecated:FutureWarning\")\ndef test_dup_labels_output_shape(groupby_func, idx):\n if groupby_func in {\"size\", \"ngroup\", \"cumcount\"}:\n pytest.skip(\"Not applicable\")\n # TODO(2.0) Remove after pad/backfill deprecation enforced\n groupby_func = maybe_normalize_deprecated_kernels(groupby_func)\n df = DataFrame([[1, 1]], columns=idx)\n grp_by = df.groupby([0])\n\n args = []\n if groupby_func in {\"fillna\", \"nth\"}:\n args.append(0)\n elif groupby_func == \"corrwith\":\n args.append(df)\n elif groupby_func == \"tshift\":\n df.index = [Timestamp(\"today\")]\n args.extend([1, \"D\"])\n\n result = getattr(grp_by, groupby_func)(*args)\n\n assert result.shape == (1, 2)\n tm.assert_index_equal(result.columns, idx)\n\n\ndef test_groupby_crash_on_nunique(axis):\n # Fix following 30253\n dti = date_range(\"2016-01-01\", periods=2, name=\"foo\")\n df = DataFrame({(\"A\", \"B\"): [1, 2], (\"A\", \"C\"): [1, 3], (\"D\", \"B\"): [0, 0]})\n df.columns.names = (\"bar\", \"baz\")\n df.index = dti\n\n axis_number = df._get_axis_number(axis)\n if not axis_number:\n df = df.T\n\n gb = df.groupby(axis=axis_number, level=0)\n result = gb.nunique()\n\n expected = DataFrame({\"A\": [1, 2], \"D\": [1, 1]}, index=dti)\n expected.columns.name = \"bar\"\n if not axis_number:\n expected = expected.T\n\n tm.assert_frame_equal(result, expected)\n\n if axis_number == 0:\n # same thing, but empty columns\n gb2 = df[[]].groupby(axis=axis_number, level=0)\n exp = expected[[]]\n else:\n # same thing, but empty rows\n gb2 = df.loc[[]].groupby(axis=axis_number, level=0)\n # default for empty when we can't infer a dtype is float64\n exp = expected.loc[[]].astype(np.float64)\n\n res = gb2.nunique()\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_list_level():\n # GH 9790\n expected = DataFrame(np.arange(0, 9).reshape(3, 3), dtype=float)\n result = expected.groupby(level=[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"max_seq_items, expected\",\n [\n (5, \"{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}\"),\n (4, \"{0: [0], 1: [1], 2: [2], 3: [3], ...}\"),\n (1, \"{0: [0], ...}\"),\n ],\n)\ndef test_groups_repr_truncates(max_seq_items, expected):\n # GH 1135\n df = DataFrame(np.random.randn(5, 1))\n df[\"a\"] = df.index\n\n with pd.option_context(\"display.max_seq_items\", max_seq_items):\n result = df.groupby(\"a\").groups.__repr__()\n assert result == expected\n\n result = df.groupby(np.array(df.a)).groups.__repr__()\n assert result == expected\n\n\ndef test_group_on_two_row_multiindex_returns_one_tuple_key():\n # GH 18451\n df = DataFrame([{\"a\": 1, \"b\": 2, \"c\": 99}, {\"a\": 1, \"b\": 2, \"c\": 88}])\n df = df.set_index([\"a\", \"b\"])\n\n grp = df.groupby([\"a\", \"b\"])\n result = grp.indices\n expected = {(1, 2): np.array([0, 1], dtype=np.int64)}\n\n assert len(result) == 1\n key = (1, 2)\n assert (result[key] == expected[key]).all()\n\n\[email protected](\n \"klass, attr, value\",\n [\n (DataFrame, \"level\", \"a\"),\n (DataFrame, \"as_index\", False),\n (DataFrame, \"sort\", False),\n (DataFrame, \"group_keys\", False),\n (DataFrame, \"squeeze\", True),\n (DataFrame, \"observed\", True),\n (DataFrame, \"dropna\", False),\n pytest.param(\n Series,\n \"axis\",\n 1,\n marks=pytest.mark.xfail(\n reason=\"GH 35443: Attribute currently not passed on to series\"\n ),\n ),\n (Series, \"level\", \"a\"),\n (Series, \"as_index\", False),\n (Series, \"sort\", False),\n (Series, \"group_keys\", False),\n (Series, \"squeeze\", True),\n (Series, \"observed\", True),\n (Series, \"dropna\", False),\n ],\n)\[email protected](\n \"ignore:The `squeeze` parameter is deprecated:FutureWarning\"\n)\ndef test_subsetting_columns_keeps_attrs(klass, attr, value):\n # GH 9959 - When subsetting columns, don't drop attributes\n df = DataFrame({\"a\": [1], \"b\": [2], \"c\": [3]})\n if attr != \"axis\":\n df = df.set_index(\"a\")\n\n expected = df.groupby(\"a\", **{attr: value})\n result = expected[[\"b\"]] if klass is DataFrame else expected[\"b\"]\n assert getattr(result, attr) == getattr(expected, attr)\n\n\ndef test_subsetting_columns_axis_1():\n # GH 37725\n g = DataFrame({\"A\": [1], \"B\": [2], \"C\": [3]}).groupby([0, 0, 1], axis=1)\n match = \"Cannot subset columns when using axis=1\"\n with pytest.raises(ValueError, match=match):\n g[[\"A\", \"B\"]].sum()\n\n\[email protected](\"func\", [\"sum\", \"any\", \"shift\"])\ndef test_groupby_column_index_name_lost(func):\n # GH: 29764 groupby loses index sometimes\n expected = Index([\"a\"], name=\"idx\")\n df = DataFrame([[1]], columns=expected)\n df_grouped = df.groupby([1])\n result = getattr(df_grouped, func)().columns\n tm.assert_index_equal(result, expected)\n\n\ndef test_groupby_duplicate_columns():\n # GH: 31735\n df = DataFrame(\n {\"A\": [\"f\", \"e\", \"g\", \"h\"], \"B\": [\"a\", \"b\", \"c\", \"d\"], \"C\": [1, 2, 3, 4]}\n ).astype(object)\n df.columns = [\"A\", \"B\", \"B\"]\n result = df.groupby([0, 0, 0, 0]).min()\n expected = DataFrame([[\"e\", \"a\", 1]], columns=[\"A\", \"B\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_tuple_name():\n # GH 37755\n ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=(\"a\", \"a\"))\n ser.index.name = (\"b\", \"b\")\n result = ser.groupby(level=0).last()\n expected = Series([2, 4], index=[1, 2], name=(\"a\", \"a\"))\n expected.index.name = (\"b\", \"b\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\[email protected](\n \"func, values\", [(\"sum\", [97.0, 98.0]), (\"mean\", [24.25, 24.5])]\n)\ndef test_groupby_numerical_stability_sum_mean(func, values):\n # GH#38778\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = getattr(df.groupby(\"group\"), func)()\n expected = DataFrame({\"a\": values, \"b\": values}, index=Index([1, 2], name=\"group\"))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\ndef test_groupby_numerical_stability_cumsum():\n # GH#38934\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = df.groupby(\"group\").cumsum()\n exp_data = (\n [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]\n )\n expected = DataFrame({\"a\": exp_data, \"b\": exp_data})\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n\ndef test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex):\n dups = rand_series_with_duplicate_datetimeindex\n result = dups.groupby(level=0).mean()\n expected = dups.groupby(dups.index).mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_all_nan_groups_drop():\n # GH 15036\n s = Series([1, 2, 3], [np.nan, np.nan, np.nan])\n result = s.groupby(s.index).sum()\n expected = Series([], index=Index([], dtype=np.float64), dtype=np.int64)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"numeric_only\", [True, False])\ndef test_groupby_empty_multi_column(as_index, numeric_only):\n # GH 15106 & GH 41998\n df = DataFrame(data=[], columns=[\"A\", \"B\", \"C\"])\n gb = df.groupby([\"A\", \"B\"], as_index=as_index)\n result = gb.sum(numeric_only=numeric_only)\n if as_index:\n index = MultiIndex([[], []], [[], []], names=[\"A\", \"B\"])\n columns = [\"C\"] if not numeric_only else []\n else:\n index = RangeIndex(0)\n columns = [\"A\", \"B\", \"C\"] if not numeric_only else [\"A\", \"B\"]\n expected = DataFrame([], columns=columns, index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_non_numeric_dtype():\n # GH #43108\n df = DataFrame(\n [[\"M\", [1]], [\"M\", [1]], [\"W\", [10]], [\"W\", [20]]], columns=[\"MW\", \"v\"]\n )\n\n expected = DataFrame(\n {\n \"v\": [[1, 1], [10, 20]],\n },\n index=Index([\"M\", \"W\"], dtype=\"object\", name=\"MW\"),\n )\n\n gb = df.groupby(by=[\"MW\"])\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_multi_non_numeric_dtype():\n # GH #42395\n df = DataFrame(\n {\n \"x\": [1, 0, 1, 1, 0],\n \"y\": [Timedelta(i, \"days\") for i in range(1, 6)],\n \"z\": [Timedelta(i * 10, \"days\") for i in range(1, 6)],\n }\n )\n\n expected = DataFrame(\n {\n \"y\": [Timedelta(i, \"days\") for i in range(7, 9)],\n \"z\": [Timedelta(i * 10, \"days\") for i in range(7, 9)],\n },\n index=Index([0, 1], dtype=\"int64\", name=\"x\"),\n )\n\n gb = df.groupby(by=[\"x\"])\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_numeric_with_non_numeric_dtype():\n # GH #43108\n df = DataFrame(\n {\n \"x\": [1, 0, 1, 1, 0],\n \"y\": [Timedelta(i, \"days\") for i in range(1, 6)],\n \"z\": list(range(1, 6)),\n }\n )\n\n expected = DataFrame(\n {\"z\": [7, 8]},\n index=Index([0, 1], dtype=\"int64\", name=\"x\"),\n )\n\n gb = df.groupby(by=[\"x\"])\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_filtered_df_std():\n # GH 16174\n dicts = [\n {\"filter_col\": False, \"groupby_col\": True, \"bool_col\": True, \"float_col\": 10.5},\n {\"filter_col\": True, \"groupby_col\": True, \"bool_col\": True, \"float_col\": 20.5},\n {\"filter_col\": True, \"groupby_col\": True, \"bool_col\": True, \"float_col\": 30.5},\n ]\n df = DataFrame(dicts)\n\n df_filter = df[df[\"filter_col\"] == True] # noqa:E712\n dfgb = df_filter.groupby(\"groupby_col\")\n result = dfgb.std()\n expected = DataFrame(\n [[0.0, 0.0, 7.071068]],\n columns=[\"filter_col\", \"bool_col\", \"float_col\"],\n index=Index([True], name=\"groupby_col\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_datetime_categorical_multikey_groupby_indices():\n # GH 26859\n df = DataFrame(\n {\n \"a\": Series(list(\"abc\")),\n \"b\": Series(\n to_datetime([\"2018-01-01\", \"2018-02-01\", \"2018-03-01\"]),\n dtype=\"category\",\n ),\n \"c\": Categorical.from_codes([-1, 0, 1], categories=[0, 1]),\n }\n )\n result = df.groupby([\"a\", \"b\"]).indices\n expected = {\n (\"a\", Timestamp(\"2018-01-01 00:00:00\")): np.array([0]),\n (\"b\", Timestamp(\"2018-02-01 00:00:00\")): np.array([1]),\n (\"c\", Timestamp(\"2018-03-01 00:00:00\")): np.array([2]),\n }\n assert result == expected\n\n\ndef test_rolling_wrong_param_min_period():\n # GH34037\n name_l = [\"Alice\"] * 5 + [\"Bob\"] * 5\n val_l = [np.nan, np.nan, 1, 2, 3] + [np.nan, 1, 2, 3, 4]\n test_df = DataFrame([name_l, val_l]).T\n test_df.columns = [\"name\", \"val\"]\n\n result_error_msg = r\"__init__\\(\\) got an unexpected keyword argument 'min_period'\"\n with pytest.raises(TypeError, match=result_error_msg):\n test_df.groupby(\"name\")[\"val\"].rolling(window=2, min_period=1).sum()\n\n\ndef test_pad_backfill_deprecation():\n # GH 33396\n s = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match=\"backfill\"):\n s.groupby(level=0).backfill()\n with tm.assert_produces_warning(FutureWarning, match=\"pad\"):\n s.groupby(level=0).pad()\n"
] | [
[
"pandas.Grouper",
"pandas._testing.rands_array",
"pandas.core.common.asarray_tuplesafe",
"pandas.Timestamp",
"pandas.concat",
"pandas._testing.assert_series_equal",
"numpy.random.random",
"numpy.empty",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas._testing.makeTimeDataFrame",
"numpy.arange",
"numpy.random.randint",
"pandas.MultiIndex",
"pandas.to_datetime",
"numpy.array",
"numpy.percentile",
"pandas.array",
"pandas.MultiIndex.from_tuples",
"numpy.random.randn",
"numpy.random.shuffle",
"pandas._testing.assert_frame_equal",
"pandas.Categorical.from_codes",
"pandas.MultiIndex.from_arrays",
"numpy.std",
"pandas.MultiIndex.from_product",
"pandas._testing.assert_equal",
"pandas._testing.assert_index_equal",
"pandas.Index",
"pandas._testing.assert_produces_warning",
"numpy.sum",
"pandas.date_range",
"numpy.ones",
"pandas._testing.assert_almost_equal",
"pandas.RangeIndex",
"pandas.Categorical",
"pandas._testing.assert_numpy_array_equal",
"pandas.option_context",
"pandas.Series",
"pandas.core.groupby.base.maybe_normalize_deprecated_kernels"
]
] |
Johannes-Sahlmann/uhelpers | [
"58f8e25ef8644ab5b24a5be76fd58a338a400912"
] | [
"uhelpers/plotting_helpers.py"
] | [
"\"\"\"Helper functions for recurring plotting tasks\n\nAuthors\n-------\n\n Johannes Sahlmann\n\nUse\n---\n\n\"\"\"\nimport os\nimport numpy as np\nimport pylab as pl\nfrom scipy.stats import norm\n\n\ndef histogram_with_gaussian_fit(omc, facecolors=None, labels=None, titles=None, linecolors=None, xlabel='value', normed=0,\n save_plot=0, out_dir='', name_seed='', separate_panels=False, show_fit=True, **kwargs):\n \"\"\"Plot one or several histograms and perform Gaussian fit(s).\n\n Parameters\n ----------\n omc\n facecolors\n labels\n titles\n linecolors\n xlabel\n normed\n save_plot\n out_dir\n name_seed\n separate_panels\n show_fit\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if omc.ndim == 1:\n Nhist = 1\n omc = np.expand_dims(omc, axis=-1)\n else:\n Nhist = omc.shape[1]\n\n if facecolors is None:\n facecolors = ['grey'] * Nhist\n linecolors = ['k'] * Nhist\n if labels is None:\n labels = ['data %d' % j for j in np.arange(Nhist)]\n\n if separate_panels:\n fig = pl.figure(figsize=(12, 5), facecolor='w', edgecolor='k');\n pl.clf()\n alpha = 0.8\n linecolors = ['k'] * Nhist\n else:\n fig = pl.figure(figsize=(8, 6), facecolor='w', edgecolor='k');\n pl.clf()\n alpha = 0.5\n\n for i in np.arange(Nhist):\n if separate_panels:\n pl.subplot(1, 2, i + 1)\n\n data = omc[:, i]\n # from http://stackoverflow.com/questions/7805552/fitting-a-histogram-with-python\n (mu, sigma) = norm.fit(data)\n if show_fit == False:\n histlabel = labels[i]\n else:\n histlabel = None\n n, bins, patches = pl.hist(data, normed=normed, facecolor=facecolors[i], color=linecolors[i], alpha=alpha,\n histtype='stepfilled', label=histlabel, **kwargs)\n if normed:\n normFact = 1.\n ylabel = 'Probability'\n else:\n normFact = np.sum(n) * np.mean(np.diff(bins));\n ylabel = 'N'\n if show_fit:\n y = norm.pdf(bins, mu, sigma) # add a 'best fit' line\n l = pl.plot(bins, y * normFact, 'k-', linewidth=2, color=linecolors[i],\n label='{0:s}: $\\mu$={2:1.3f}$\\pm${1:1.3f}'.format(labels[i], sigma, mu))\n\n if titles is not None:\n if type(titles) == list:\n pl.title(titles[i])\n else:\n pl.title(titles)\n pl.xlabel(xlabel)\n pl.ylabel(ylabel) # pl.ylim((0,max(n)+1))\n pl.legend(loc='best')\n pl.show()\n\n fig.tight_layout(h_pad=0.0)\n if save_plot:\n figName = os.path.join(out_dir, '%s_distortionFit_residualWithFit.pdf' % name_seed)\n pl.savefig(figName, transparent=True, bbox_inches='tight', pad_inches=0)\n\n\ndef multiple_histograms(all_data, facecolors=None, labels=None, titles=None,\n linecolors=None, xlabel='value', normed=0, save_plot=0, out_dir='',\n name_seed='', separate_panels=False, show_fit=True, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n all_data\n facecolors\n labels\n titles\n linecolors\n xlabel\n normed\n save_plot\n out_dir\n name_seed\n separate_panels\n show_fit\n kwargs\n\n Returns\n -------\n\n \"\"\"\n Nhist = len(all_data)\n\n if facecolors is None:\n facecolors = ['grey'] * Nhist\n if linecolors is None:\n linecolors = ['k'] * Nhist\n if labels is None:\n labels = ['data %d' % j for j in np.arange(Nhist)]\n\n if separate_panels:\n fig = pl.figure(figsize=(12, 5), facecolor='w', edgecolor='k')\n pl.clf()\n alpha = 0.8\n linecolors = ['k'] * Nhist\n else:\n fig = pl.figure(figsize=(8, 4), facecolor='w', edgecolor='k')\n pl.clf()\n alpha = 0.5\n\n for i in np.arange(Nhist):\n if separate_panels:\n pl.subplot(1, 2, i + 1)\n\n data = all_data[i]\n histlabel = labels[i]\n n, bins, patches = pl.hist(data, normed=normed, facecolor=facecolors[i],\n color=linecolors[i], alpha=alpha, histtype='stepfilled',\n label=histlabel, **kwargs)\n if normed:\n normFact = 1.\n ylabel = 'Probability'\n else:\n normFact = np.sum(n) * np.mean(np.diff(bins))\n ylabel = 'N'\n\n if titles is not None:\n pl.title(titles[i])\n pl.xlabel(xlabel)\n pl.ylabel(ylabel) # pl.ylim((0,max(n)+1))\n pl.legend(loc=2)\n pl.show()\n\n fig.tight_layout(h_pad=0.0)\n"
] | [
[
"scipy.stats.norm.pdf",
"numpy.sum",
"numpy.diff",
"scipy.stats.norm.fit",
"numpy.arange",
"numpy.expand_dims"
]
] |
Parasgupta44/py_holiday_calendar | [
"18ecc88b3638a1b126e159f96a31a88e517f45f1"
] | [
"py_holiday_calendar/py_holiday_calendar.py"
] | [
"import pandas as pd\nimport datetime\nfrom business_calendar import Calendar, MO, TU, WE, TH, FR\n\n\n# obj_cal = Calendar(workdays=[MO, TU, WE, TH, FR], holidays=[])\n\ndef _initialise_cal_obj(workdays, holidays=[]):\n \"\"\"Function to initialise custom calendar object.\n\n The return value must be the custom calendr object.\n\n Parameters\n ----------\n workdays\n List of custom workdays.\n holidays\n List of custom holidays.\n\n Returns\n -------\n Object\n Custom calendar object.\n\n \"\"\"\n obj_cal = Calendar(workdays=workdays, holidays=holidays)\n return obj_cal\n\n\ndef calc_diff_with_holidays(df, start, end, new_col, workdays=[MO, TU, WE, TH, FR], holidays=[]):\n \"\"\"Calculate difference between pandas df columns adjusting for custom bus and holidays. Start date is not included.\n\n The return type will be the pandas dataframe.\n\n Parameters\n ----------\n df\n The pandas dataframe to work on.\n start\n Start Date column in the df.\n end\n End Date column in the df.\n new_col\n New column to be generated containing the difference.\n workdays\n List of custom workdays.\n holidays\n List of custom holidays.\n\n Returns\n -------\n Dataframe\n Modified dataframe with new_col for difference between dates.\n\n \"\"\"\n # Cast to datetime if not already\n df[start] = pd.to_datetime(df[start])\n df[end] = pd.to_datetime(df[end])\n obj_cal = _initialise_cal_obj(workdays, holidays)\n df[new_col] = 0\n # iterate over the cols\n for i in range(df.shape[0]):\n df.at[i, new_col] = obj_cal.busdaycount(df[start][i], df[end][i])\n return df\n\n\ndef add_bus_days_with_holidays(df, col_op, col_res, days, workdays=[MO, TU, WE, TH, FR], holidays=[]):\n \"\"\"Add business days to a column in pandas dataframe (holidays can be considered).\n\n The return type must be the modified df containing a new column with the result after adding business days.\n\n Parameters\n ----------\n df\n The pandas dataframe to work on.\n col_op\n Column with dates to add bus days to.\n col_res\n New col with the modified dates.\n days\n Number of business days to add.\n workdays\n List of custom workdays.\n holidays\n List of custom holidays.\n\n Returns\n -------\n Dataframe\n Modified dataframe with new_col containg new business dates.\n\n \"\"\"\n # Cast to datetime if not already\n df[col_op] = pd.to_datetime(df[col_op])\n obj_cal = _initialise_cal_obj(workdays, holidays)\n df[col_res] = df[col_op].apply(lambda x: obj_cal.addbusdays(x, days))\n return df\n\n\ndef add_bus_days(df, col_op, col_res, days, workdays=[MO, TU, WE, TH, FR]):\n \"\"\"Add business days to a column in pandas dataframe (to consider holidays, use the method with holidays).\n\n The return type must be the modified df containing a new col with the results after adding provided business days.\n\n Parameters\n ----------\n df\n The pandas dataframe to work on.\n col_op\n Column with dates to add bus days to.\n col_res\n New col with the modified dates.\n days\n Number of business days to add.\n workdays\n List of custom workdays.\n\n Returns\n -------\n Dataframe\n Modified dataframe with new_col containg new business dates.\n\n \"\"\"\n # Cast to datetime if not already\n df[col_op] = pd.to_datetime(df[col_op])\n obj_cal = _initialise_cal_obj(workdays)\n df[col_res] = df[col_op].apply(lambda x: obj_cal.addworkdays(x, days))\n return df\n\n\ndef calc_workday_diff(df, start, end, new_col, workdays=[MO, TU, WE, TH, FR]):\n \"\"\"Calculate difference between(to consider holidays as well, use the other method). Start Date not included.\n\n The return type must be the modified df containing a new column with the diff result.\n\n Parameters\n ----------\n df\n The pandas dataframe to work on.\n start\n Start Date column in the df.\n end\n End Date column in the df.\n new_col\n New column containing the diff between the date cols provided.\n workdays\n List of custom workdays.\n\n Returns\n -------\n Dataframe\n Modified dataframe with new_col containing dif between business dates among provided cols.\n\n \"\"\"\n # Cast to datetime if not already\n df[start] = pd.to_datetime(df[start])\n df[end] = pd.to_datetime(df[end])\n obj_cal = _initialise_cal_obj(workdays)\n df[new_col] = 0\n # iterate over the cols\n for i in range(df.shape[0]):\n df.at[i, new_col] = obj_cal.workdaycount(df[start][i], df[end][i])\n return df\n"
] | [
[
"pandas.to_datetime"
]
] |
tgen/vcfMerger2 | [
"3371800eaf2f95077c47ea175f988570757d121b"
] | [
"prep_vcfs_somatic/strelka2/VCF.py"
] | [
"\"\"\"\nVCF.py\nKamil Slowikowski\nOctober 30, 2013\nhttps://gist.github.com/slowkow/6215557\n\nRead VCF files. Works with gzip compressed files and pandas.\n\nNote: This module ignores the genotype columns because\n I didn't need them at the time of writing.\n\nRead more about VCF:\n\n http://vcftools.sourceforge.net/specs.html\n\nUsage example:\n\n >>> import VCF\n >>> variants = VCF.lines('file.vcf.gz')\n >>> print variants.next()['CHROM']\n 1\n\nUse the generator to avoid loading the entire file into memory:\n\n >>> for v in VCF.lines('file.vcf.gz'):\n ... print v['REF'], v['ALT']\n ... break\n A T\n\nIf your file is not too large, read it directly into a DataFrame:\n\n >>> df = VCF.dataframe('file.vcf.gz')\n >>> df.columns\n Index([u'CHROM', u'POS', u'ID', u'REF', u'ALT', u'QUAL', u'FILTER',\n u'INFO'], dtype=object)\n\nIf your file is *very small* and you want to access INFO fields as columns:\n\n >>> df = VCF.dataframe('file.vcf.gz', large=False)\n >>> df.columns\n Index([u'CHROM', u'POS', u'ID', u'REF', u'ALT', u'QUAL', u'FILTER',\n u'GENE_NAME', u'GENE_ID', u'AA_POS', u'AA_CHANGE'], dtype=object)\n\nLICENSE\n\nThis is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to <http://unlicense.org/>\n\"\"\"\n\n\nfrom collections import OrderedDict\nimport gzip\nimport pandas as pd\n\n\nVCF_HEADER = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']\n\n\ndef dataframe(filename, large=True):\n \"\"\"Open an optionally gzipped VCF file and return a pandas.DataFrame with\n each INFO field included as a column in the dataframe.\n\n Note: Using large=False with large VCF files. It will be painfully slow.\n\n :param filename: An optionally gzipped VCF file.\n :param large: Use this with large VCF files to skip the ## lines and\n leave the INFO fields unseparated as a single column.\n \"\"\"\n if large:\n # Set the proper argument if the file is compressed.\n comp = 'gzip' if filename.endswith('.gz') else None\n # Count how many comment lines should be skipped.\n comments = _count_comments(filename)\n # Return a simple DataFrame without splitting the INFO column.\n return pd.read_table(filename, compression=comp, skiprows=comments,\n names=VCF_HEADER, usecols=range(8))\n\n # Each column is a list stored as a value in this dict. The keys for this\n # dict are the VCF column names and the keys in the INFO column.\n result = OrderedDict()\n # Parse each line in the VCF file into a dict.\n for i, line in enumerate(lines(filename)):\n for key in line.keys():\n # This key has not been seen yet, so set it to None for all\n # previous lines.\n if key not in result:\n result[key] = [None] * i\n # Ensure this row has some value for each column.\n for key in result.keys():\n result[key].append(line.get(key, None))\n\n return pd.DataFrame(result)\n\n\ndef lines(filename):\n \"\"\"Open an optionally gzipped VCF file and generate an OrderedDict for\n each line.\n \"\"\"\n fn_open = gzip.open if filename.endswith('.gz') else open\n\n with fn_open(filename, 'rt') as fh:\n for line in fh:\n if line.startswith('#'):\n continue\n else:\n yield parse(line)\n\n\ndef parse(line):\n \"\"\"Parse a single VCF line and return an OrderedDict.\n \"\"\"\n result = OrderedDict()\n\n fields = line.rstrip().split('\\t')\n\n # Read the values in the first seven columns.\n for i, col in enumerate(VCF_HEADER[:7]):\n result[col] = _get_value(fields[i])\n\n # INFO field consists of \"key1=value;key2=value;...\".\n infos = fields[7].split(';')\n\n for i, info in enumerate(infos, 1):\n # info should be \"key=value\".\n try:\n key, value = info.split('=')\n # But sometimes it is just \"value\", so we'll make our own key.\n except ValueError:\n key = 'INFO{}'.format(i)\n value = info\n # Set the value to None if there is no value.\n result[key] = _get_value(value)\n\n return result\n\n\ndef _get_value(value):\n \"\"\"Interpret null values and return ``None``. Return a list if the value\n contains a comma.\n \"\"\"\n if not value or value in ['', '.', 'NA']:\n return None\n if ',' in value:\n return value.split(',')\n return value\n\n\ndef _count_comments(filename):\n \"\"\"Count comment lines (those that start with \"#\") in an optionally\n gzipped file.\n\n :param filename: An optionally gzipped file.\n \"\"\"\n comments = 0\n fn_open = gzip.open if filename.endswith('.gz') else open\n with fn_open(filename) as fh:\n for line in fh:\n if line.startswith('#'):\n comments += 1\n else:\n break\n return comments\n"
] | [
[
"pandas.DataFrame"
]
] |
fperez/sympy | [
"7d8d096215c8f65ba1d4a9c09af78ec0c3844518"
] | [
"sympy/test_external/test_numpy.py"
] | [
"# This testfile tests SymPy <-> NumPy compatibility\n\n# Don't test any SymPy features here. Just pure interaction with NumPy.\n# Always write regular SymPy tests for anything, that can be tested in pure\n# Python (without numpy). Here we test everything, that a user may need when\n# using SymPy with NumPy\n\ntry:\n from numpy import array, matrix, ndarray\n import numpy\nexcept ImportError:\n #py.test will not execute any tests now\n disabled = True\n\n\nfrom sympy import Rational, Symbol, list2numpy, sin, Real, Matrix, lambdify\nimport sympy\n\nfrom sympy import mpmath\nmpmath.mp.dps = 16\nsin02 = mpmath.mpf(\"0.198669330795061215459412627\")\n\n# first, systematically check, that all operations are implemented and don't\n# raise and exception\n\ndef test_systematic_basic():\n def s(sympy_object, numpy_array):\n x = sympy_object + numpy_array\n x = numpy_array + sympy_object\n x = sympy_object - numpy_array\n x = numpy_array - sympy_object\n x = sympy_object * numpy_array\n x = numpy_array * sympy_object\n x = sympy_object / numpy_array\n x = numpy_array / sympy_object\n x = sympy_object ** numpy_array\n x = numpy_array ** sympy_object\n x = Symbol(\"x\")\n y = Symbol(\"y\")\n sympy_objs = [\n Rational(2),\n Real(\"1.3\"),\n x,\n y,\n pow(x,y)*y,\n 5,\n 5.5,\n ]\n numpy_objs = [\n array([1]),\n array([3, 8, -1]),\n array([x, x**2, Rational(5)]),\n array([x/y*sin(y), 5, Rational(5)]),\n ]\n for x in sympy_objs:\n for y in numpy_objs:\n s(x,y)\n\n\n# now some random tests, that test particular problems and that also\n# check that the results of the operations are correct\n\ndef test_basics():\n one = Rational(1)\n zero = Rational(0)\n x = Symbol(\"x\")\n assert array(1) == array(one)\n assert array([one]) == array([one])\n assert array([x]) == array([x])\n assert array(x) == array(Symbol(\"x\"))\n assert array(one+x) == array(1+x)\n\n X = array([one, zero, zero])\n assert (X == array([one, zero, zero])).all()\n assert (X == array([one, 0, 0])).all()\n\ndef test_arrays():\n one = Rational(1)\n zero = Rational(0)\n X = array([one, zero, zero])\n Y = one*X\n X = array([Symbol(\"a\")+Rational(1,2)])\n Y = X+X\n assert Y == array([1+2*Symbol(\"a\")])\n Y = Y + 1\n assert Y == array([2+2*Symbol(\"a\")])\n Y = X-X\n assert Y == array([0])\n\ndef test_conversion1():\n x = Symbol(\"x\")\n a = list2numpy([x**2, x])\n #looks like an array?\n assert isinstance(a, ndarray)\n assert a[0] == x**2\n assert a[1] == x\n assert len(a) == 2\n #yes, it's the array\n\ndef test_conversion2():\n x = Symbol(\"x\")\n a = 2*list2numpy([x**2, x])\n b = list2numpy([2*x**2, 2*x])\n assert (a == b).all()\n\n one = Rational(1)\n zero = Rational(0)\n X = list2numpy([one, zero, zero])\n Y = one*X\n X = list2numpy([Symbol(\"a\")+Rational(1,2)])\n Y = X+X\n assert Y == array([1+2*Symbol(\"a\")])\n Y = Y + 1\n assert Y == array([2+2*Symbol(\"a\")])\n Y = X-X\n assert Y == array([0])\n\ndef test_list2numpy():\n x = Symbol(\"x\")\n assert (array([x**2, x]) == list2numpy([x**2, x])).all()\n\ndef test_Matrix1():\n x = Symbol(\"x\")\n m = Matrix([[x, x**2], [5, 2/x]])\n assert (array(m.subs(x, 2)) == array([[2, 4],[5, 1]])).all()\n m = Matrix([[sin(x), x**2], [5, 2/x]])\n assert (array(m.subs(x, 2)) == array([[sin(2), 4],[5, 1]])).all()\n\ndef test_Matrix2():\n x = Symbol(\"x\")\n m = Matrix([[x, x**2], [5, 2/x]])\n assert (matrix(m.subs(x, 2)) == matrix([[2, 4],[5, 1]])).all()\n m = Matrix([[sin(x), x**2], [5, 2/x]])\n assert (matrix(m.subs(x, 2)) == matrix([[sin(2), 4],[5, 1]])).all()\n\ndef test_Matrix3():\n x = Symbol(\"x\")\n a = array([[2, 4],[5, 1]])\n assert Matrix(a) == Matrix([[2, 4], [5, 1]])\n assert Matrix(a) != Matrix([[2, 4], [5, 2]])\n a = array([[sin(2), 4], [5, 1]])\n assert Matrix(a) == Matrix([[sin(2), 4],[5, 1]])\n assert Matrix(a) != Matrix([[sin(0), 4],[5, 1]])\n\ndef test_Matrix4():\n x = Symbol(\"x\")\n a = matrix([[2, 4],[5, 1]])\n assert Matrix(a) == Matrix([[2, 4], [5, 1]])\n assert Matrix(a) != Matrix([[2, 4], [5, 2]])\n a = matrix([[sin(2), 4], [5, 1]])\n assert Matrix(a) == Matrix([[sin(2), 4],[5, 1]])\n assert Matrix(a) != Matrix([[sin(0), 4],[5, 1]])\n\ndef test_Matrix_sum():\n x, y, z = Symbol('x'), Symbol('y'), Symbol('z')\n M = Matrix([[1,2,3],[x,y,x],[2*y,-50,z*x]])\n m = matrix([[2,3,4],[x,5,6],[x,y,z**2]])\n assert M+m == Matrix([[3,5,7],[2*x,y+5,x+6],[2*y+x,y-50,z*x+z**2]])\n assert m+M == Matrix([[3,5,7],[2*x,y+5,x+6],[2*y+x,y-50,z*x+z**2]])\n assert M+m == M.add(m)\n\ndef test_Matrix_mul():\n x, y, z = Symbol('x'), Symbol('y'), Symbol('z')\n M = Matrix([[1,2,3],[x,y,x]])\n m = matrix([[2,4],[x,6],[x,z**2]])\n assert M*m == Matrix([\n [ 2 + 5*x, 16 + 3*z**2],\n [2*x + x*y + x**2, 4*x + 6*y + x*z**2],\n ])\n\n assert m*M == Matrix([\n [ 2 + 4*x, 4 + 4*y, 6 + 4*x],\n [ 7*x, 2*x + 6*y, 9*x],\n [x + x*z**2, 2*x + y*z**2, 3*x + x*z**2],\n ])\n a = array([2])\n assert a[0] * M == 2 * M\n assert M * a[0] == 2 * M\n\ndef test_Matrix_array():\n class matarray(object):\n def __array__(self):\n from numpy import array\n return array([[1,2,3],[4,5,6],[7,8,9]])\n matarr = matarray()\n assert Matrix(matarr) == Matrix([[1,2,3],[4,5,6],[7,8,9]])\n\ndef test_issue629():\n x = Symbol(\"x\")\n assert (Rational(1,2)*array([2*x, 0]) == array([x, 0])).all()\n assert (Rational(1,2)+array([2*x, 0]) == array([2*x+Rational(1,2), Rational(1,2)])).all()\n assert (Real(\"0.5\")*array([2*x, 0]) == array([Real(\"1.0\")*x, 0])).all()\n assert (Real(\"0.5\")+array([2*x, 0]) == array([2*x+Real(\"0.5\"), Real(\"0.5\")])).all()\n\ndef test_lambdify():\n x = Symbol(\"x\")\n f = lambdify(x, sin(x), \"numpy\")\n prec = 1e-15\n assert -prec < f(0.2) - sin02 < prec\n try:\n f(x) # if this succeeds, it can't be a numpy function\n assert False\n except AttributeError:\n pass\n\ndef test_lambdify_matrix():\n x = Symbol(\"x\")\n f = lambdify(x, Matrix([[x, 2*x],[1, 2]]), \"numpy\")\n assert (f(1) == matrix([[1,2],[1,2]])).all()\n\ndef test_lambdify_matrix_multi_input():\n x,y,z=sympy.symbols('x,y,z')\n M=sympy.Matrix([[x**2, x*y, x*z],\n [y*x, y**2, y*z],\n [z*x, z*y, z**2]])\n f = lambdify((x,y,z), M, \"numpy\")\n\n xh,yh,zh = 1.0, 2.0, 3.0\n expected = matrix([[xh**2, xh*yh, xh*zh],\n [yh*xh, yh**2, yh*zh],\n [zh*xh, zh*yh, zh**2]])\n actual = f(xh,yh,zh)\n assert numpy.allclose(actual,expected)\n\ndef test_lambdify_matrix_vec_input():\n X=sympy.DeferredVector('X')\n M=Matrix([[X[0]**2, X[0]*X[1], X[0]*X[2]],\n [X[1]*X[0], X[1]**2, X[1]*X[2]],\n [X[2]*X[0], X[2]*X[1], X[2]**2]])\n f = lambdify(X, M, \"numpy\")\n\n Xh = array([1.0, 2.0, 3.0])\n expected = matrix([[Xh[0]**2, Xh[0]*Xh[1], Xh[0]*Xh[2]],\n [Xh[1]*Xh[0], Xh[1]**2, Xh[1]*Xh[2]],\n [Xh[2]*Xh[0], Xh[2]*Xh[1], Xh[2]**2]])\n actual = f(Xh)\n assert numpy.allclose(actual,expected)\n\ndef test_lambdify_transl():\n from sympy.utilities.lambdify import NUMPY_TRANSLATIONS\n for sym, mat in NUMPY_TRANSLATIONS.iteritems():\n assert sym in sympy.functions.__dict__ or sym in (\"Matrix\", )\n assert mat in numpy.__dict__\n"
] | [
[
"numpy.allclose",
"numpy.matrix",
"numpy.array"
]
] |
MarinBallu/regularized-wasserstein-estimator | [
"aeb21778180a5f7b88789ac9640bf0aa90a07552"
] | [
"regularized-wasserstein-estimator/computations.py"
] | [
"import numpy as np\n\n### INTERMEDIATE COMPUTATIONS FOR THE UPDATES\n\ndef dual_to_target(b, reg2, beta):\n ''' compute the target given the dual variable '''\n target = b * np.exp( - beta / reg2)\n target = target / target.sum()\n return target\n\ndef partial_target_meas(b, beta, reg2, S):\n ''' Compute one coefficient of the current target measure with one coefficient of the current dual variable, O(1)'''\n nu = b * np.exp(- beta / reg2) / S\n return nu\n\ndef partial_grad_dual(b, target, M, reg1, alpha, beta):\n ''' Compute one coefficient of the gradient for the dual variables, O(1) '''\n D = np.exp((alpha + beta - M) / reg1)\n grad_alpha = 1 - D\n grad_beta = target / b - D\n return grad_alpha, grad_beta\n\ndef semi_grad_dual(b, target, M, reg1, alpha, beta):\n ''' Compute the stochastic gradients for the dual variable alpha and full gradient for beta '''\n D = np.exp((alpha + beta - M) / reg1) * b\n grad_alpha = 1 - D.sum()\n grad_beta = target - D\n return grad_alpha, grad_beta\n\ndef sgd_update(b, reg2, alpha, beta, cur_S, grad_alpha, grad_beta, stepsize):\n ''' Update the dual variables as well as the latent memory-conserved variable '''\n cur_S -= b * np.exp(- beta / reg2)\n alpha += stepsize * grad_alpha\n beta += stepsize * grad_beta\n cur_S += b * np.exp(- beta / reg2)\n return alpha, beta, cur_S\n\ndef bgd_update(b, reg2, alpha, beta, cur_S, grad_alpha, grad_beta, batch_a, batch_b, stepsize):\n ''' Update the dual variables as well as the latent memory-conserved variable in the batch case '''\n batch_b_unique = list(np.unique(batch_b))\n cur_S -= (b[batch_b_unique] * np.exp(- beta[batch_b_unique] / reg2)).sum()\n for k in range(len(batch_a)):\n alpha[batch_a[k]] += stepsize * grad_alpha[k]\n for k in range(len(batch_b)):\n beta[batch_b[k]] += stepsize * grad_beta[k]\n cur_S += (b[batch_b_unique] * np.exp(- beta[batch_b_unique] / reg2)).sum()\n return alpha, beta, cur_S"
] | [
[
"numpy.exp",
"numpy.unique"
]
] |
BUSS-DeeCamp/Det3D | [
"c8f4d59af8a0721b22ffcfed8be3805d4b9bd824"
] | [
"tools/train.py"
] | [
"import argparse\nimport json\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport yaml\nfrom det3d import __version__\nfrom det3d.datasets import build_dataset\nfrom det3d.models import build_detector\nfrom det3d.torchie import Config\nfrom det3d.torchie.apis import (\n build_optimizer,\n get_root_logger,\n init_dist,\n set_random_seed,\n train_detector,\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Train a detector\")\n parser.add_argument(\"config\", help=\"train config file path\")\n parser.add_argument(\"--work_dir\", help=\"the dir to save logs and models\")\n parser.add_argument(\"--resume_from\", help=\"the checkpoint file to resume from\")\n parser.add_argument(\n \"--validate\",\n action=\"store_true\",\n help=\"whether to evaluate the checkpoint during training\",\n )\n parser.add_argument(\n \"--gpus\",\n type=int,\n default=1,\n help=\"number of gpus to use \" \"(only applicable to non-distributed training)\",\n )\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed\")\n parser.add_argument(\n \"--launcher\",\n choices=[\"none\", \"pytorch\", \"slurm\", \"mpi\"],\n default=\"none\",\n help=\"job launcher\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--autoscale-lr\",\n action=\"store_true\",\n help=\"automatically scale lr with the number of gpus\",\n )\n args = parser.parse_args()\n if \"LOCAL_RANK\" not in os.environ:\n os.environ[\"LOCAL_RANK\"] = str(args.local_rank)\n\n return args\n\n\ndef main():\n\n # torch.manual_seed(0)\n # torch.backends.cudnn.deterministic = True\n # torch.backends.cudnn.benchmark = False\n # np.random.seed(0)\n\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n cfg.local_rank = args.local_rank\n\n # update configs according to CLI args\n if args.work_dir is not None:\n cfg.work_dir = args.work_dir\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n\n distributed = False\n if \"WORLD_SIZE\" in os.environ:\n distributed = int(os.environ[\"WORLD_SIZE\"]) > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\n\n cfg.gpus = torch.distributed.get_world_size()\n\n if args.autoscale_lr:\n cfg.lr_config.lr_max = cfg.lr_config.lr_max * cfg.gpus\n\n # init logger before other steps\n logger = get_root_logger(cfg.log_level)\n logger.info(\"Distributed training: {}\".format(distributed))\n logger.info(f\"torch.backends.cudnn.benchmark: {torch.backends.cudnn.benchmark}\")\n\n# if args.local_rank == 0:\n# # copy important files to backup\n# backup_dir = os.path.join(cfg.work_dir, \"det3d\")\n# os.makedirs(backup_dir, exist_ok=True)\n# os.system(\"cp -r * %s/\" % backup_dir)\n# logger.info(f\"Backup source files to {cfg.work_dir}/det3d\")\n\n # set random seeds\n if args.seed is not None:\n logger.info(\"Set random seed to {}\".format(args.seed))\n set_random_seed(args.seed)\n\n model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n\n datasets = [build_dataset(cfg.data.train)]\n\n if len(cfg.workflow) == 2:\n datasets.append(build_dataset(cfg.data.val))\n\n if cfg.checkpoint_config is not None:\n # save det3d version, config file content and class names in\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n det3d_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES\n )\n\n # add an attribute for visualization convenience\n model.CLASSES = datasets[0].CLASSES\n train_detector(\n model,\n datasets,\n cfg,\n distributed=distributed,\n validate=args.validate,\n logger=logger,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.distributed.get_world_size"
]
] |
prateekchandan/Flight-Delay-Prediction | [
"4330d32657c197fae47cd799b07cd2690154a4f3"
] | [
"train.py"
] | [
"import pandas as pd\nimport numpy as np\nimport csv\nimport sys\nfrom sklearn import linear_model, svm, ensemble\nimport cPickle\n\nfrom sklearn import tree\nfrom sklearn import cross_validation\n\n\n# Class bcolors\nclass bcolors:\n '''\n Class bcolor used for printing pretty messages\n '''\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# Check for arg correctoness\nif len(sys.argv) < 2:\n message = bcolors.BOLD + \"Usage: python train.py <train_data>\" + bcolors.ENDC\n sys.exit(message)\n\ntry:\n df = pd.read_csv(sys.argv[1],header=0)\nexcept:\n message = bcolors.FAIL + \" file \" + sys.argv[1] + \" does not exist\" + bcolors.ENDC\n sys.exit(message)\n\n\ny1 = df['DepDelay'].values\ny2 = df['ArrDelay'].values\n\ndf = df.drop(['DepDelay','ArrDelay'], axis=1)\nX = df.values\n\n#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y1,test_size=0.3,random_state=0)\n\nclf = linear_model.LinearRegression()\n#scores = cross_validation.cross_val_score(clf, X, y1, scoring = 'mean_squared_error', cv=5)\nclf.fit(X,y1)\n#print \"Linear regression: \" + str(scores)\n\nwith open('linear_regression.pkl', 'wb') as fid:\n cPickle.dump(clf, fid)\n\n\"\"\"\n\nclf = linear_model.Ridge (alpha = .5)\nscores = cross_validation.cross_val_score(clf, X, y1, scoring = 'mean_squared_error', cv=5)\nprint \"Ridge regression: \" + str(scores)\n\n\nparams = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,\n 'learning_rate': 0.01, 'loss': 'ls'}\nclf = ensemble.GradientBoostingRegressor(**params)\nscores = cross_validation.cross_val_score(clf, X, y1, scoring = 'mean_squared_error', cv=5)\nprint \"gradient boosting: \" + str(scores)\n\"\"\"\n"
] | [
[
"pandas.read_csv",
"sklearn.linear_model.LinearRegression"
]
] |
VictorAtPL/Tensorflow-2_Distribution-Strategies_Playground | [
"5f8affd77c07b6df62bf71f6eb07770c2db1f608"
] | [
"common.py"
] | [
"from enum import Enum\n\nimport tensorflow as tf\nfrom tensorflow.keras import models, layers\n\nBUFFER_SIZE = 10000\n\n\nclass ModelArchitecture(Enum):\n SA_MIRI = 1\n RESNET101 = 2\n MOBILENET = 3\n\n def __str__(self):\n return self.name.lower()\n\n def __repr__(self):\n return str(self)\n\n @staticmethod\n def argparse(s):\n try:\n return ModelArchitecture[s.upper()]\n except KeyError:\n return s\n\n\ndef get_model(show_summary=True, architecture=ModelArchitecture.SA_MIRI):\n model = None\n\n if architecture == ModelArchitecture.SA_MIRI:\n model = models.Sequential(name=\"samiri2019\")\n model.add(layers.Conv2D(32, (5, 5), activation='relu', input_shape=(28, 28, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (5, 5), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(10, activation='softmax'))\n elif architecture == ModelArchitecture.MOBILENET:\n model = tf.keras.applications.MobileNetV2(classes=10, weights=None)\n elif architecture == ModelArchitecture.RESNET101:\n model = tf.keras.applications.ResNet101V2(classes=10, weights=None)\n\n if model and show_summary:\n model.summary()\n\n return model\n\n\ndef make_datasets_unbatched(datasets, set_name='train', architecture=ModelArchitecture.SA_MIRI):\n # Scaling MNIST data from (0, 255] to (0., 1.]\n def scale(image, label):\n image = tf.cast(image, tf.float32)\n\n if architecture is not ModelArchitecture.SA_MIRI:\n image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n image /= 255\n\n label = tf.one_hot(label, depth=10)\n return image, label\n\n if 'train' in set_name:\n return datasets['train'].map(scale, num_parallel_calls=tf.data.experimental.AUTOTUNE).\\\n cache().repeat().shuffle(BUFFER_SIZE)\n else:\n return datasets['test'].map(scale, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n"
] | [
[
"tensorflow.image.resize",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.applications.ResNet101V2",
"tensorflow.one_hot",
"tensorflow.cast"
]
] |
tianjixuetu/ray | [
"65297e65f02e52472c114f52797c2ea18cc3fc3e"
] | [
"python/ray/tests/test_advanced_2.py"
] | [
"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport pytest\n\nimport ray\nimport ray.cluster_utils\nimport ray.test_utils\n\nfrom ray.test_utils import RayTestTimeoutException\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 2\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 2\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 3\n ray.init(num_cpus=num_gpus, num_gpus=num_gpus)\n\n def get_gpu_ids(num_gpus_per_worker):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == num_gpus_per_worker\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))\n f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))\n f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.2)\n return os.getpid()\n\n start_time = time.time()\n while True:\n num_workers_started = len(\n set(ray.get([f.remote() for _ in range(num_gpus)])))\n if num_workers_started == num_gpus:\n break\n if time.time() > start_time + 10:\n raise RayTestTimeoutException(\n \"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n ray.get([f1.remote() for _ in range(10)])\n ray.get([f2.remote() for _ in range(10)])\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n # We should be able to execute a task that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n ray.get(f.remote())\n\n # We should be able to create an actor that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n class Actor(object):\n def method(self):\n pass\n\n a = Actor.remote()\n x = a.method.remote()\n ray.get(x)\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(address=cluster.address)\n\n node_id = ray.worker.global_worker.node.unique_id\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.node.unique_id\n\n # Make sure tasks and actors run on the remote raylet.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != node_id\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_raylets(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific raylets, and we will check that they are assigned\n # to the correct raylets.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(address=cluster.address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and raylets (at least right now), this can be\n # used to identify which raylet the task was assigned to.\n\n # This must be run on the zeroth raylet.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the first raylet.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the second raylet.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the first or second raylet.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the zeroth or second raylet.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.nodes()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.node.unique_id\n\n # The f tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2\n\n node_id = ray.worker.global_worker.node.unique_id\n\n # The g tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != node_id\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_node_id_resource(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3)\n cluster.add_node(num_cpus=3)\n ray.init(address=cluster.address)\n\n local_node = ray.state.current_node_id()\n\n # Note that these will have the same IP in the test cluster\n assert len(ray.state.node_ids()) == 2\n assert local_node in ray.state.node_ids()\n\n @ray.remote(resources={local_node: 1})\n def f():\n return ray.state.current_node_id()\n\n # Check the node id resource is automatically usable for scheduling.\n assert ray.get(f.remote()) == ray.state.current_node_id()\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(address=cluster.address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n # The f and g tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(500)]))) == 2\n\n node_id = ray.worker.global_worker.node.unique_id\n\n # The h tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != node_id\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\n# TODO: 5 retry attempts may be too little for Travis and we may need to\n# increase it if this test begins to be flaky on Travis.\ndef test_zero_capacity_deletion_semantics(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})\n\n def test():\n resources = ray.available_resources()\n MAX_RETRY_ATTEMPTS = 5\n retry_count = 0\n\n del resources[\"memory\"]\n del resources[\"object_store_memory\"]\n for key in list(resources.keys()):\n if key.startswith(\"node:\"):\n del resources[key]\n\n while resources and retry_count < MAX_RETRY_ATTEMPTS:\n time.sleep(0.1)\n resources = ray.available_resources()\n retry_count += 1\n\n if retry_count >= MAX_RETRY_ATTEMPTS:\n raise RuntimeError(\n \"Resources were available even after five retries.\", resources)\n\n return resources\n\n function = ray.remote(\n num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})(test)\n cluster_resources = ray.get(function.remote())\n\n # All cluster resources should be utilized and\n # cluster_resources must be empty\n assert cluster_resources == {}\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(ray_start_regular):\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(ray_start_regular):\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.test_utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.test_utils.wait_for_pid_to_exit(pid1)\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.random.permutation",
"numpy.random.randint"
]
] |
ahyansaputra/T1002 | [
"462c66d973e2a509cce7f90b57fbe2912c41a924"
] | [
"app.py"
] | [
"from flask import Flask, render_template, request\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\n\n\napp = Flask(__name__)\n\n\nmodel = load_model('Indonesian Abusive and Hate Speech Twitter Text/model.h5')\n\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef main():\n return render_template(\"home.html\")\n\ndef get_encode(teks):\n global padded\n tokenizer = Tokenizer(num_words= 15, oov_token='x')\n tokenizer.fit_on_texts(teks)\n sekuens = tokenizer.texts_to_sequences(teks)\n padded = pad_sequences(sekuens, \n maxlen=20,\n padding='post',\n truncating='post')\n return padded\n\ndef get_classes(classes):\n if max(classes[0]) == classes[0][0] :\n return(\"Bukan Ujaran Kebencian\")\n elif max(classes[0]) == classes[0][1] :\n return(\"Ujaran Kebencian Agama\")\n elif max(classes[0]) == classes[0][2] :\n return(\"Ujaran Kebencian Ras\")\n elif max(classes[0]) == classes[0][3] :\n return(\"Ujaran Kebencian Fisik\")\n elif max(classes[0]) == classes[0][4] :\n return(\"Ujaran Kebencian Gender\")\n elif max(classes[0]) == classes[0][5] :\n return(\"Ujaran Kebencian Lain-lain\")\n \n\[email protected](\"/predict\", methods=[\"GET\", \"POST\"])\ndef home():\n global classes\n if request.method == \"POST\":\n teks = request.form['form-input']\n encode_text = get_encode(teks)\n classes = model.predict(encode_text)\n\n return render_template(\"index.html\", classes=get_classes(classes))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)"
] | [
[
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.preprocessing.text.Tokenizer"
]
] |
Chris-cbc/SGRAF | [
"785535168ad417dda523888f2f047359231fcbf7"
] | [
"data.py"
] | [
"\"\"\"Data provider\"\"\"\n\nimport torch\nimport torch.utils.data as data\n\nimport os\nimport nltk\nimport numpy as np\n\n\nclass PrecompDataset(data.Dataset):\n \"\"\"\n Load precomputed captions and image features\n Possible options: f30k_precomp, coco_precomp\n \"\"\"\n\n def __init__(self, data_path, data_split, vocab):\n self.vocab = vocab\n loc = data_path + '/'\n\n # load the raw captions\n self.captions = []\n with open(loc+'%s_caps.txt' % data_split, 'rb') as f:\n for line in f:\n self.captions.append(line.strip())\n\n # load the image features\n self.images = np.load(loc+'%s_ims.npy' % data_split)\n self.length = len(self.captions)\n\n # rkiros data has redundancy in images, we divide by 5\n if self.images.shape[0] != self.length:\n self.im_div = 5\n else:\n self.im_div = 1\n\n # the development set for coco is large and so validation would be slow\n if data_split == 'dev':\n self.length = 5000\n\n def __getitem__(self, index):\n # handle the image redundancy\n img_id = index/self.im_div\n image = torch.Tensor(self.images[img_id])\n caption = self.captions[index]\n vocab = self.vocab\n\n # convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(\n str(caption).lower().decode('utf-8'))\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n\n return image, target, index, img_id\n\n def __len__(self):\n return self.length\n\n\ndef collate_fn(data):\n \"\"\"\n Build mini-batch tensors from a list of (image, caption, index, img_id) tuples.\n Args:\n data: list of (image, target, index, img_id) tuple.\n - image: torch tensor of shape (36, 2048).\n - target: torch tensor of shape (?) variable length.\n Returns:\n - images: torch tensor of shape (batch_size, 36, 2048).\n - targets: torch tensor of shape (batch_size, padded_length).\n - lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 2D tensor to 3D tensor)\n images = torch.stack(images, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n\n return images, targets, lengths, ids\n\n\ndef get_precomp_loader(data_path, data_split, vocab, opt, batch_size=100,\n shuffle=True, num_workers=2):\n dset = PrecompDataset(data_path, data_split, vocab)\n\n data_loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=shuffle,\n pin_memory=True,\n collate_fn=collate_fn)\n return data_loader\n\n\ndef get_loaders(data_name, vocab, batch_size, workers, opt):\n # get the data path\n dpath = os.path.join(opt.data_path, data_name)\n\n # get the train_loader\n train_loader = get_precomp_loader(dpath, 'train', vocab, opt,\n batch_size, True, workers)\n # get the val_loader\n val_loader = get_precomp_loader(dpath, 'dev', vocab, opt,\n 100, False, workers)\n return train_loader, val_loader\n\n\ndef get_test_loader(split_name, data_name, vocab, batch_size, workers, opt):\n # get the data path\n dpath = os.path.join(opt.data_path, data_name)\n\n # get the test_loader\n test_loader = get_precomp_loader(dpath, split_name, vocab, opt,\n 100, False, workers)\n return test_loader\n"
] | [
[
"torch.Tensor",
"numpy.load",
"torch.stack",
"torch.utils.data.DataLoader"
]
] |
And1210/Cartoonization | [
"8b05040d574e64b565cdfbff98dbebbab4c7a9d2"
] | [
"losses/VariationLoss.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\n\nclass VariationLoss(nn.Module):\n def __init__(self, k_size: int) -> None:\n super().__init__()\n self.k_size = k_size\n\n def forward(self, image: torch.Tensor):\n b, c, h, w = image.shape\n tv_h = torch.mean((image[:, :, self.k_size:, :] - image[:, :, : -self.k_size, :])**2)\n tv_w = torch.mean((image[:, :, :, self.k_size:] - image[:, :, :, : -self.k_size])**2)\n tv_loss = (tv_h + tv_w) / (3 * h * w)\n return tv_loss\n"
] | [
[
"torch.mean"
]
] |
gbaasch/Gnu-RL | [
"04621c3cd299eb0fa361d303699676d662aa147d"
] | [
"agent/simulate.py"
] | [
"import gym\nimport eplus_env\n\nimport pandas as pd\nimport pickle\nimport numpy as np\n\nfrom utils import make_dict\n\n# Create Environment. Follow the documentation of 'Gym-Eplus' to set up additional EnergyPlus simulation environment.\nenv = gym.make('5Zone-sim_TMY2-v0');\n#env = gym.make('5Zone-sim_TMY3-v0');\n\n# Modify here: Outputs from EnergyPlus; Match the variables.cfg file.\nobs_name = [\"Outdoor Temp.\", \"Outdoor RH\", \"Wind Speed\", \"Wind Direction\", \"Diff. Solar Rad.\", \"Direct Solar Rad.\", \"Htg SP\", \"Clg SP\", \"Indoor Temp.\", \"Indoor Temp. Setpoint\", \"PPD\", \"Occupancy Flag\", \"Coil Power\", \"HVAC Power\", \"Sys In Temp.\", \"Sys In Mdot\", \"OA Temp.\", \"OA Mdot\", \"MA Temp.\", \"MA Mdot\", \"Sys Out Temp.\", \"Sys Out Mdot\"]\ndist_name = [\"Outdoor Temp.\", \"Outdoor RH\", \"Wind Speed\", \"Wind Direction\", \"Diff. Solar Rad.\", \"Direct Solar Rad.\", \"Indoor Temp. Setpoint\", \"Occupancy Flag\"]\n\n# Reset the env (creat the EnergyPlus subprocess)\ntimeStep, obs, isTerminal = env.reset();\nobs_dict = make_dict(obs_name, obs)\nstart_time = pd.datetime(year = env.start_year, month = env.start_mon, day = env.start_day)\nprint(start_time)\n\ntimeStamp = [start_time]\nobservations = [obs]\nactions = []\n\nfor i in range(91*96):\n # Using EnergyPlus default control strategy;\n action = ()\n timeStep, obs, isTerminal = env.step(action)\n obs_dict = make_dict(obs_name, obs)\n cur_time = start_time + pd.Timedelta(seconds = timeStep)\n \n print(\"{}: Sys Out: {:.2f}({:.2f})-> OA: {:.2f}({:.2f})-> MA: {:.2f}({:.2f})-> Sys Out: {:.2f}({:.2f})-> Zone Temp: {:.2f}\".format(cur_time,\n obs_dict[\"Sys In Temp.\"], obs_dict[\"Sys In Mdot\"],obs_dict[\"OA Temp.\"], obs_dict[\"OA Mdot\"],\n obs_dict[\"MA Temp.\"], obs_dict[\"MA Mdot\"], obs_dict[\"Sys Out Temp.\"], obs_dict[\"Sys Out Mdot\"],\n obs_dict[\"Indoor Temp.\"]))\n\n timeStamp.append(cur_time)\n observations.append(obs)\n #actions.append(action)\n\n# Save Observations\nobs_df = pd.DataFrame(np.array(observations), index = np.array(timeStamp), columns = obs_name)\ndist_df = obs_df[dist_name]\nobs_df.to_pickle(\"results/Sim-TMY2.pkl\")\n#obs_df.to_pickle(\"results/Sim-TMY3.pkl\")\ndist_df.to_pickle(\"results/Dist-TMY2.pkl\")\n#dist_df.to_pickle(\"results/Dist-TMY3.pkl\")\nprint(\"Saved!\")\n\nenv.end_env() # Safe termination of the environment after use.\n"
] | [
[
"numpy.array",
"pandas.datetime",
"pandas.Timedelta"
]
] |
prabhnoor0212/Empathy-in-Mental-Health-Support | [
"18ca5b2f274487f32eb5f22e793c8e3aa49d1f11"
] | [
"TEST/test.py"
] | [
"import numpy as np\nfrom src.data_utils.data_loader import DataReaderUtility\nimport unittest\nimport pandas as pd\nimport torch\nfrom src.models.epitome import EPITOME\nfrom transformers import AdamW\nfrom config import _EPS, _LR, _LAMBDA_EI, _LAMBDA_RE, _BATCH_SIZE, _max_tokenizer_len\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n logging.info('No GPU! Sad Day!')\n device = torch.device(\"cpu\")\n\nclass Test_Suite(unittest.TestCase):\n \"\"\"Class for unit test\n \"\"\"\n @classmethod\n def setUpClass(cls):\n file_paths = [\"datasets/emotional-reactions-reddit.csv\", \"datasets/explorations-reddit.csv\", \"datasets/interpretations-reddit.csv\"]\n cls.data = []\n cls.out_paths = []\n\n ### data loaders\n for file_path in file_paths:\n file_name = file_path.split(\"/\")[-1].split(\".\")[0]\n out_path = \"TEST/data/\"+file_name+\"_model.csv\"\n cls.out_paths.append(out_path)\n DataReaderUtility().prepare_model_csv(file_path,out_path)\n train, val, test = DataReaderUtility().prepare_inputs(data_path=out_path)\n cls.data.append([train,val, test])\n\n ### model\n cls.model = EPITOME()\n cls.model = cls.model.to(device)\n for p in cls.model.seeker_encoder.parameters():\n p.requires_grad = False\n\n\n cls.optimizer = AdamW(cls.model.parameters(),lr = _LR, eps = _EPS)\n \n def test_data_loading(self):\n \"\"\"Test for data splits check\n \"\"\"\n self.assertEqual(len(Test_Suite.data), 3)\n for empathy_data in Test_Suite.data:\n self.assertEqual(len(empathy_data), 3)\n \n def test_dimemsions(self):\n \"\"\"Test for checking the dimensions of the pre-processed files.\n \"\"\"\n original_data = []\n for file_path in Test_Suite.out_paths:\n original_data.append(pd.read_csv(file_path))\n\n for idx, empathy_data in enumerate(Test_Suite.data):\n N = 0\n for idx, split in enumerate(empathy_data):\n n_batches = len(split)\n n_rows_in_split = len(split.dataset)\n N += n_rows_in_split\n\n self.assertEqual(n_batches, np.ceil(n_rows_in_split/_BATCH_SIZE))\n\n n_cols = len(split.dataset[0])\n self.assertEqual(n_cols, 7)\n self.assertEqual(N, original_data[idx].shape[0])\n\n def test_dtype_sanity(self):\n \"\"\"Test for data types of the processed files.\n \"\"\"\n for empathy_data in Test_Suite.data:\n for split in empathy_data:\n for row in split.dataset:\n self.assertEqual(row[0].shape[0], _max_tokenizer_len)\n self.assertEqual(row[0].dtype, torch.int64)\n self.assertEqual(row[1].shape[0], _max_tokenizer_len)\n self.assertEqual(row[1].dtype, torch.int64)\n self.assertEqual(row[2].shape[0], _max_tokenizer_len)\n self.assertEqual(row[2].dtype, torch.int64)\n self.assertEqual(row[3].shape[0], _max_tokenizer_len)\n self.assertEqual(row[3].dtype, torch.int64)\n self.assertEqual(row[4].numel(), 1)\n self.assertEqual(row[4].dtype, torch.int64)\n self.assertEqual(row[5].shape[0], _max_tokenizer_len)\n self.assertEqual(row[5].dtype, torch.int64)\n self.assertEqual(row[6].numel(), 1)\n self.assertEqual(row[6].dtype, torch.int64)\n\n\n def test_training(self):\n \"\"\"Test for checking the training. (Basically, checks if the model weights are getting updated after first iteration)\n \"\"\"\n Test_Suite.model.train()\n Test_Suite.model.zero_grad()\n row = Test_Suite.data[0][0].dataset[0:1]\n loss, empathy_loss, rationale_loss, logits_empathy, logits_rationale = Test_Suite.model(seeker_input = row[0].to(device),\n responder_input = row[2].to(device), \n seeker_attn_mask=row[1].to(device),\n responder_attn_mask=row[3].to(device), \n class_label=row[4].to(device),\n rationale=row[5].to(device),\n len_rationale=None,\n lambda_EI=_LAMBDA_EI,\n lambda_RE=_LAMBDA_RE)\n\n loss.backward()\n Test_Suite.optimizer.step()\n\n Test_Suite.model.zero_grad()\n n_loss, n_empathy_loss, n_rationale_loss, n_logits_empathy, n_logits_rationale = Test_Suite.model(seeker_input = row[0].to(device),\n responder_input = row[2].to(device), \n seeker_attn_mask=row[1].to(device),\n responder_attn_mask=row[3].to(device), \n class_label=row[4].to(device),\n rationale=row[5].to(device),\n len_rationale=None,\n lambda_EI=_LAMBDA_EI,\n lambda_RE=_LAMBDA_RE)\n \n self.assertEqual(n_loss.item()!=0, True)\n self.assertEqual(n_empathy_loss.item()!=0, True)\n self.assertEqual(n_rationale_loss.item()!=0, True)\n self.assertEqual((n_logits_empathy.cpu().detach().numpy() != logits_empathy.cpu().detach().numpy()).all(), True)\n self.assertEqual((n_logits_rationale.cpu().detach().numpy() != logits_rationale.cpu().detach().numpy()).all(), True)\n\nif __name__ == \"__main__\":\n Test_Suite.setUpClass()\n logging.info(\"Data Loaded! Started Tests\")\n Test_Suite().test_data_loading()\n Test_Suite().test_dimemsions()\n Test_Suite().test_dtype_sanity()\n Test_Suite().test_training()\n logging.info(\"All Tests Passed! :) \")"
] | [
[
"torch.device",
"pandas.read_csv",
"torch.cuda.is_available",
"numpy.ceil"
]
] |
VCL3D/SingleShotCuboids | [
"586a13bef9f75eb89f1a04a79c57df162f67db08"
] | [
"ssc/cuboid_fitting.py"
] | [
"import torch\nimport numpy as np\nimport functools\nimport kornia\n\nclass CuboidFitting(torch.nn.Module):\n def __init__(self,\n mode: str='joint', # one of ['joint', 'floor', 'ceil', 'avg']\n floor_distance: float=-1.6,\n ):\n super(CuboidFitting, self).__init__() \n self._mode = mode\n self._floor_distance = floor_distance\n self._set_func(mode, floor_distance)\n cuboid_axes = torch.Tensor([[\n [-1, 1],\n [-1, -1],\n [1, -1],\n [1, 1], \n ]]).float()\n self.register_buffer(\"cuboid_axes\", cuboid_axes)\n\n def _set_func(self, mode, floor_distance):\n self.homography_func = functools.partial(\n self._homography_floor_svd,\n floor_z=floor_distance)\\\n if mode == 'floor' else (\n functools.partial(\n self._homography_ceil_svd,\n ceil_z=-floor_distance\n ) if mode == 'ceil' else (functools.partial(\n self._homography_avg_svd,\n floor_z=floor_distance,\n ceil_z=-floor_distance\n ) if mode == 'avg' else functools.partial(\n self._homography_joint_svd,\n floor_z=floor_distance,\n ceil_z=-floor_distance\n )\n )\n )\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, value): \n self._mode = value\n self._set_func(self.mode, self.floor_distance)\n\n @property\n def floor_distance(self):\n return self._floor_distance\n\n @floor_distance.setter\n def floor_distance(self, value): \n self._floor_distance = value\n self._set_func(self.mode, self.floor_distance)\n\n def _get_scale_all(self, coords: torch.Tensor, eps: float=1e-12) -> torch.Tensor:\n a_x1 = torch.linalg.norm(coords[:, 0, :] - coords[:, 1, :], ord=2, dim=1)\n a_y1 = torch.linalg.norm(coords[:, 1, :] - coords[:, 2, :], ord=2, dim=1)\n a_x2 = torch.linalg.norm(coords[:, 2, :] - coords[:, 3, :], ord=2, dim=1)\n a_y2 = torch.linalg.norm(coords[:, 3, :] - coords[:, 0, :], ord=2, dim=1)\n a_x = 0.5 * (a_x1 + a_x2)\n a_y = 0.5 * (a_y1 + a_y2)\n return torch.stack([a_y, a_x], dim=1)\n\n def _svd(self,\n points1: torch.Tensor,\n points2: torch.Tensor\n ) -> torch.Tensor:\n '''\n Computes a similarity transform (sR, t) that takes\n a set of 3D points S1 (3 x N) closest to a set of 3D points S2,\n where R is an 3x3 rotation matrix, t 3x1 translation, s scale.\n i.e. solves the orthogonal Procrutes problem.\n '''\n #NOTE: adapted from https://gist.github.com/mkocabas/54ea2ff3b03260e3fedf8ad22536f427\n b, _, c = points1.shape\n # 1. Remove mean.\n points1 = torch.transpose(points1, -2, -1)\n points2 = torch.transpose(points2, -2, -1)\n centroid1 = points1.mean(dim=-1, keepdims=True)\n centroid2 = points1.mean(dim=-1, keepdims=True)\n centered1 = points1 - centroid1\n centered2 = points2 - centroid2\n # 2. Compute variance of X1 used for scale.\n variance = torch.sum(centered1 ** 2, dim=[1, 2])\n # 3. The outer product of X1 and X2. \n K = centered1 @ torch.transpose(centered2, -2, -1)\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are singular vectors of K.\n U, s, V = torch.svd(K)\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = torch.eye(c).to(U).unsqueeze(0).repeat(b, 1, 1)\n Z[:,-1, -1] *= torch.sign(torch.det(U @ torch.transpose(V, -2, -1)))\n # Construct R.\n rotation = V @ (Z @ torch.transpose(U, -2, -1))\n # 5. Recover scale.\n scale = torch.cat([torch.trace(x).unsqueeze(0) for x in (rotation @ K)]) / variance\n # 6. Recover translation.\n scale = scale.unsqueeze(-1).unsqueeze(-1)\n translation = centroid2 - (scale * (rotation @ centroid1))\n return rotation, translation, scale\n\n def _transform_points(self,\n points: torch.Tensor,\n rotation: torch.Tensor,\n translation: torch.Tensor,\n scale: torch.Tensor,\n ) -> torch.Tensor:\n xformed = scale * (rotation @ torch.transpose(points, -2, -1)) + translation\n return torch.transpose(xformed, -2, -1)\n\n def _homography_floor_svd(self,\n top_corners: torch.Tensor, # in [-1, 1]\n bottom_corners: torch.Tensor, # in [-1, 1]\n floor_z: float=-1.6,\n ):\n b, N, _ = top_corners.size()\n u = bottom_corners[:, :, 0] * np.pi\n v = bottom_corners[:, :, 1] * (-0.5 * np.pi)\n c = floor_z / torch.tan(v)\n x = c * torch.sin(u)\n y = -c * torch.cos(u)\n floor_xy = torch.stack([x, y], dim=-1)\n scale = self._get_scale_all(floor_xy)\n scale = scale / 2.0\n centroid = floor_xy.mean(dim=1)\n c = torch.linalg.norm(floor_xy, ord=2, dim=-1)\n v = top_corners[:, :, 1] * (-0.5 * np.pi)\n ceil_z = (c * torch.tan(v)).mean(dim=1, keepdim=True)\n ceil_z = ceil_z.unsqueeze(1).expand(b, 4, 1).contiguous()\n floor_xy = floor_xy - centroid.unsqueeze(1)\n inds = torch.sort(torch.atan2(floor_xy[..., 0], floor_xy[..., 1] + 1e-12))[1]\n axes = self.cuboid_axes[:, inds.squeeze(), :]\n homography = kornia.get_perspective_transform(floor_xy, axes) \n homogeneous = torch.cat([floor_xy, torch.ones_like(floor_xy[..., -1:])], dim=2)\n xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)\n xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)\n rect_floor_xy = xformed * scale.unsqueeze(1) + centroid.unsqueeze(1)\n original_xy = floor_xy + centroid.unsqueeze(1)\n R, t, s = self._svd(rect_floor_xy, original_xy[:, inds.squeeze(), :])\n rect_floor_xy = self._transform_points(rect_floor_xy, R, t, s)\n bottom_points = torch.cat([rect_floor_xy, floor_z * torch.ones_like(c.unsqueeze(-1))], dim=-1)\n top_points = torch.cat([rect_floor_xy, ceil_z], dim=-1)\n return top_points, bottom_points\n\n def _homography_joint_svd(self,\n top_corners: torch.Tensor, # in [-1, 1]\n bottom_corners: torch.Tensor, # in [-1, 1]\n floor_z: float=-1.6,\n ceil_z: float=1.6,\n ):\n b, N, _ = top_corners.size()\n floor_u = bottom_corners[:, :, 0] * np.pi\n floor_v = bottom_corners[:, :, 1] * (-0.5 * np.pi)\n floor_c = floor_z / torch.tan(floor_v)\n floor_x = floor_c * torch.sin(floor_u)\n floor_y = -floor_c * torch.cos(floor_u)\n floor_xy = torch.stack([floor_x, floor_y], dim=-1)\n floor_scale = self._get_scale_all(floor_xy)\n floor_scale = floor_scale / 2.0 \n floor_ceil_c = torch.linalg.norm(floor_xy, ord=2, dim=-1)\n floor_ceil_v = top_corners[:, :, 1] * (-0.5 * np.pi)\n floor_ceil_z = (floor_ceil_c * torch.tan(floor_ceil_v)).mean(dim=1, keepdim=True)\n floor_ceil_z = floor_ceil_z.unsqueeze(1).expand(b, 4, 1).contiguous()\n ceil_u_t = top_corners[:, :, 0] * np.pi\n ceil_v_t = top_corners[:, :, 1] * (-0.5 * np.pi)\n ceil_c = ceil_z / torch.tan(ceil_v_t)\n ceil_x = ceil_c * torch.sin(ceil_u_t)\n ceil_y = -ceil_c * torch.cos(ceil_u_t)\n ceil_xy = torch.stack([ceil_x, ceil_y], dim=-1)\n ceil_floor_c = torch.linalg.norm(ceil_xy, ord=2, dim=-1)\n ceil_v_b = bottom_corners[:, :, 1] * (-0.5 * np.pi)\n ceil_floor_z = (ceil_floor_c * torch.tan(ceil_v_b)).mean(dim=1, keepdim=True)\n fix_ceil = -ceil_z / ceil_floor_z\n ceil_z_fix = ceil_z * fix_ceil\n ceil_z_fix = ceil_z_fix.unsqueeze(1).expand(b, 4, 1).contiguous()\n ceil_floor_fixed_c = ceil_z_fix.squeeze(-1) / torch.tan(ceil_v_t)\n ceil_x = ceil_floor_fixed_c * torch.sin(ceil_u_t)\n ceil_y = -ceil_floor_fixed_c * torch.cos(ceil_u_t)\n ceil_xy = torch.stack([ceil_x, ceil_y], dim=-1)\n ceil_scale = self._get_scale_all(ceil_xy)\n ceil_scale = ceil_scale / 2.0\n joint_xy = 0.5 * (floor_xy + ceil_xy)\n joint_scale = 0.5 * (floor_scale + ceil_scale) \n joint_centroid = joint_xy.mean(dim=1)\n joint_xy = joint_xy - joint_centroid.unsqueeze(1)\n inds = torch.sort(torch.atan2(joint_xy[..., 0], joint_xy[..., 1] + 1e-12))[1]\n axes = self.cuboid_axes[:, inds.squeeze(), :]\n homography = kornia.get_perspective_transform(joint_xy, axes) \n homogeneous = torch.cat([joint_xy, torch.ones_like(joint_xy[..., -1:])], dim=2)\n xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)\n xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)\n rect_joint_xy = xformed * joint_scale.unsqueeze(1) + joint_centroid.unsqueeze(1)\n original_xy = joint_xy + joint_centroid.unsqueeze(1)\n R, t, s = self._svd(rect_joint_xy, original_xy[:, inds.squeeze(), :])\n rect_joint_xy = self._transform_points(rect_joint_xy, R, t, s)\n bottom_points = torch.cat([rect_joint_xy, floor_z * torch.ones_like(floor_c.unsqueeze(-1))], dim=-1)\n top_points = torch.cat([rect_joint_xy, ceil_z_fix], dim=-1)\n return top_points, bottom_points\n\n def _homography_ceil_svd(self,\n top_corners: torch.Tensor, # in [-1, 1]\n bottom_corners: torch.Tensor, # in [-1, 1]\n ceil_z: float=1.6,\n ):\n b, N, _ = top_corners.size()\n u_t = top_corners[:, :, 0] * np.pi\n v_t = top_corners[:, :, 1] * (-0.5 * np.pi)\n c = ceil_z / torch.tan(v_t)\n x = c * torch.sin(u_t)\n y = -c * torch.cos(u_t)\n ceil_xy = torch.stack([x, y], dim=-1)\n c = torch.linalg.norm(ceil_xy, ord=2, dim=-1)\n v_b = bottom_corners[:, :, 1] * (-0.5 * np.pi)\n floor_z = (c * torch.tan(v_b)).mean(dim=1, keepdim=True)\n fix_ceil = -ceil_z / floor_z\n floor_z = -ceil_z\n ceil_z_fix = ceil_z * fix_ceil\n ceil_z_fix = ceil_z_fix.unsqueeze(1).expand(b, 4, 1).contiguous()\n c = ceil_z_fix.squeeze(-1) / torch.tan(v_t)\n x = c * torch.sin(u_t)\n y = -c * torch.cos(u_t)\n ceil_xy = torch.stack([x, y], dim=-1)\n scale = self._get_scale_all(ceil_xy)\n scale = scale / 2.0\n centroid = ceil_xy.mean(dim=1)\n ceil_xy = ceil_xy - centroid.unsqueeze(1)\n inds = torch.sort(torch.atan2(ceil_xy[..., 0], ceil_xy[..., 1] + 1e-12))[1]\n axes = self.cuboid_axes[:, inds.squeeze(), :]\n homography = kornia.get_perspective_transform(ceil_xy, axes)\n homogeneous = torch.cat([ceil_xy, torch.ones_like(ceil_xy[..., -1:])], dim=2)\n xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)\n xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)\n rect_ceil_xy = xformed * scale.unsqueeze(1) + centroid.unsqueeze(1)\n original_xy = ceil_xy + centroid.unsqueeze(1)\n R, t, s = self._svd(rect_ceil_xy, original_xy[:, inds.squeeze(), :])\n rect_ceil_xy = self._transform_points(rect_ceil_xy, R, t, s)\n bottom_points = torch.cat([rect_ceil_xy, floor_z * torch.ones_like(c.unsqueeze(-1))], dim=-1)\n top_points = torch.cat([rect_ceil_xy, ceil_z_fix], dim=-1)\n return top_points, bottom_points\n\n def _homography_avg_svd(self,\n top_corners: torch.Tensor, # in [-1, 1]\n bottom_corners: torch.Tensor, # in [-1, 1]\n floor_z: float=-1.6,\n ceil_z: float=1.6,\n ):\n top_ceil, bottom_ceil = self._homography_ceil_svd(top_corners, bottom_corners, ceil_z)\n top_floor, bottom_floor = self._homography_floor_svd(top_corners, bottom_corners, floor_z)\n return (top_ceil + top_floor) * 0.5, (bottom_ceil + bottom_floor) * 0.5\n\n def _project_points(self,\n points3d: torch.Tensor,\n epsilon: float=1e-12,\n ):\n phi = torch.atan2(points3d[:, :, 0], -1.0 * points3d[:, :, 1] + epsilon) # [-pi, pi]\n xy_dist = torch.linalg.norm(points3d[:, :, :2], ord=2, dim=-1)\n theta = -1.0 * torch.atan2(points3d[:, :, 2], xy_dist + epsilon) # [-pi / 2.0, pi / 2.0]\n u = phi / np.pi\n v = theta / (0.5 * np.pi)\n return torch.stack([u, v], dim=-1)\n\n def forward(self, corners: torch.Tensor) -> torch.Tensor:\n top, bottom = torch.chunk(corners, 2, dim=1)\n b = top.shape[0]\n aligned = []\n for i in range(b):\n t = top[i, ...].unsqueeze(0)\n b = bottom[i, ...].unsqueeze(0)\n try:\n t_xyz, b_xyz = self.homography_func(t, b)\n t_uv, b_uv = self._project_points(t_xyz), self._project_points(b_xyz)\n t_uv = t_uv[:, torch.argsort(t_uv[0, :, 0]), :]\n b_uv = b_uv[:, torch.argsort(b_uv[0, :, 0]), :]\n aligned_corners = torch.cat([t_uv, b_uv], dim=1).squeeze(0)\n aligned.append(aligned_corners)\n except RuntimeError as ex:\n aligned.append(corners[i, ...])\n return torch.stack(aligned, dim=0)\n\n\nif __name__ == \"__main__\":\n from cuboid_test_utils import *\n from cuboid_tests import *\n import sys\n\n selected_test ='15' if len(sys.argv) < 2 else str(sys.argv[1])\n selected_mode ='floor' if len(sys.argv) < 3 else str(sys.argv[2])\n modes = ['floor', 'ceil', 'joint', 'avg']\n for name, test in get_tests():\n if selected_test not in name:\n continue\n for mode in modes:\n if selected_mode not in mode:\n continue\n alignment = CuboidFitting(mode=mode)\n \n top, bottom = test()\n if torch.cuda.is_available():\n top = top.cuda()\n bottom = bottom.cuda()\n alignment = alignment.cuda()\n\n corners = torch.cat([top, bottom], dim=1)\n aligned = alignment.forward(corners)\n images = np.zeros([1, 256, 512, 3], dtype=np.uint8)\n top_pts2d, bottom_pts2d = torch.chunk(aligned, 2, dim=-2)\n draw_points(top_pts2d, images, [255, 0, 0])\n draw_points(bottom_pts2d, images, [255, 0, 0])\n\n top_pts2d, bottom_pts2d = torch.chunk(corners, 2, dim=-2)\n draw_points(top_pts2d, images, [0, 255, 0])\n draw_points(bottom_pts2d, images, [0, 255, 0])\n show_frozen(f\"{mode} {name}\", images[0])\n # show_playback(f\"{mode} {name}\", images[0]) "
] | [
[
"torch.cos",
"torch.cat",
"torch.stack",
"torch.trace",
"torch.tan",
"torch.sin",
"numpy.zeros",
"torch.argsort",
"torch.sum",
"torch.cuda.is_available",
"torch.eye",
"torch.ones_like",
"torch.linalg.norm",
"torch.transpose",
"torch.atan2",
"torch.Tensor",
"torch.chunk",
"torch.svd"
]
] |
adjs/qclib | [
"998a98b33a059c59452a50389084a9a747426ea8"
] | [
"qclib/state_preparation/util/state_tree_preparation.py"
] | [
"# Copyright 2021 qclib project.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nhttps://arxiv.org/abs/2108.10182\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import NamedTuple\nimport numpy as np\n\n\nclass Amplitude(NamedTuple):\n \"\"\"\n Named tuple for amplitudes\n \"\"\"\n index: int\n amplitude: float\n\n def __str__(self):\n return str(self.index) + ':' + \\\n '{0:.2g}'.format(self.amplitude)\n\n@dataclass\nclass Node:\n \"\"\"\n Binary tree node used in state_decomposition function\n \"\"\"\n\n index: int\n level: int\n amplitude: float\n left: 'Node'\n right: 'Node'\n\n def __str__(self):\n return str(self.level) + '_' + \\\n str(self.index) + '\\n' + \\\n '{0:.2g}'.format(self.amplitude)\n\ndef state_decomposition(nqubits, data):\n \"\"\"\n :param nqubits: number of qubits required to generate a\n state with the same length as the data vector (2^nqubits)\n :param data: list with exactly 2^nqubits pairs (index, amplitude)\n :return: root of the state tree\n \"\"\"\n new_nodes = []\n\n # leafs\n for k in data:\n new_nodes.append(Node(k.index, nqubits, k.amplitude, None, None))\n\n # build state tree\n while nqubits > 0:\n nodes = new_nodes\n new_nodes = []\n nqubits = nqubits - 1\n k = 0\n n_nodes = len(nodes)\n while k < n_nodes:\n mag = np.sqrt(np.abs(nodes[k].amplitude) ** 2 + np.abs(nodes[k + 1].amplitude) ** 2)\n arg = (np.angle(nodes[k].amplitude) + np.angle(nodes[k + 1].amplitude)) / 2\n\n amp = mag * np.exp(1j*arg)\n\n new_nodes.append(Node(nodes[k].index // 2, nqubits,\n amp,\n nodes[k],\n nodes[k + 1]))\n k = k + 2\n\n tree_root = new_nodes[0]\n return tree_root\n"
] | [
[
"numpy.angle",
"numpy.exp",
"numpy.abs"
]
] |
xiongyixiaoyang/QUANTAXIS | [
"08441ce711e55385e2b01f80df17d34e7e89f564"
] | [
"test_backtest/MACD_JCSC.py"
] | [
"# -*- coding: utf-8 -*-\n# Demo: MACD strategy\n# src: ./test_backtest/MACD_JCSC.py\n# jupyter: ./test_backtest/QUANTAXIS回测分析全过程讲解.ipynb\n# paper: ./test_backtest/QUANTAXIS回测分析全过程讲解.md\n\nimport QUANTAXIS as QA\nimport numpy as np\nimport pandas as pd\n\n\n# define the MACD strategy\ndef MACD_JCSC(dataframe, SHORT=12, LONG=26, M=9):\n \"\"\"\n 1.DIF向上突破DEA,买入信号参考。\n 2.DIF向下跌破DEA,卖出信号参考。\n \"\"\"\n CLOSE = dataframe.close\n DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)\n DEA = QA.EMA(DIFF, M)\n MACD = 2*(DIFF-DEA)\n\n CROSS_JC = QA.CROSS(DIFF, DEA)\n CROSS_SC = QA.CROSS(DEA, DIFF)\n ZERO = 0\n return pd.DataFrame({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})\n\n\n# create account\nAccount = QA.QA_Account()\nBroker = QA.QA_BacktestBroker()\n\nAccount.reset_assets(1000000)\nAccount.account_cookie = 'user_admin_macd'\n\n# get data from mongodb\ndata = QA.QA_fetch_stock_day_adv(\n ['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')\ndata = data.to_qfq()\n\n# add indicator\nind = data.add_func(MACD_JCSC)\n# ind.xs('000001',level=1)['2018-01'].plot()\n\ndata_forbacktest = data.select_time('2018-01-01', '2018-05-20')\n\nfor items in data_forbacktest.panel_gen:\n for item in items.security_gen:\n daily_ind = ind.loc[item.index]\n if daily_ind.CROSS_JC.iloc[0] > 0:\n order = Account.send_order(\n code=item.data.code[0],\n time=item.data.date[0],\n amount=1000,\n towards=QA.ORDER_DIRECTION.BUY,\n price=0,\n order_model=QA.ORDER_MODEL.CLOSE,\n amount_model=QA.AMOUNT_MODEL.BY_AMOUNT\n )\n Account.receive_deal(Broker.receive_order(\n QA.QA_Event(order=order, market_data=item)))\n elif daily_ind.CROSS_SC.iloc[0] > 0:\n if Account.sell_available.get(item.code[0], 0) > 0:\n order = Account.send_order(\n code=item.data.code[0],\n time=item.data.date[0],\n amount=Account.sell_available.get(item.code[0], 0),\n towards=QA.ORDER_DIRECTION.SELL,\n price=0,\n order_model=QA.ORDER_MODEL.MARKET,\n amount_model=QA.AMOUNT_MODEL.BY_AMOUNT\n )\n Account.receive_deal(Broker.receive_order(\n QA.QA_Event(order=order, market_data=item)))\n Account.settle()\n\nprint(Account.history)\nprint(Account.history_table)\nprint(Account.daily_hold)\n\n# create Risk analysis\nRisk = QA.QA_Risk(Account)\nprint(Risk.message)\nprint(Risk.assets)\nRisk.plot_assets_curve()\nRisk.plot_dailyhold()\nRisk.plot_signal()\n# Risk.assets.plot()\n# Risk.benchmark_assets.plot()\n\n# save result\nAccount.save()\nRisk.save()\n\naccount_info = QA.QA_fetch_account({'account_cookie': 'user_admin_macd'})\naccount = QA.QA_Account().from_message(account_info[0])\nprint(account)\n"
] | [
[
"pandas.DataFrame"
]
] |
dchealth/covid-mil | [
"b0d6501923dec161a7235167cdee7a90109bf4ed"
] | [
"misc.py"
] | [
"# %%\nimport numpy as np\n\n# %%\ndef get_bbox_pad(data_size, patch_size, center):\n \"\"\"\n Calculate the bbox and needed pad according to patch center.\n \"\"\"\n # bbox_low = center - np.array(patch_size) // 2\n # bbox_high = center + patch_size\n # pad_low = np.abs(np.minimum(bbox_low - 0, 0))\n # pad_high = np.abs(np.maximum(bbox_high - data_size, 0))\n # bbox_low = np.maximum(bbox_low, 0)\n # bbox_high = np.minimum(bbox_high, data_size)\n # bbox = tuple(slice(*b) for b in np.stack((bbox_low, bbox_high), 1))\n # pad = np.stack((pad_low, pad_high), 1)\n coord = np.array(center)\n size = np.array(data_size)\n patch = np.array(patch_size)\n\n req_stt = coord - patch//2\n req_end = req_stt + patch\n src_stt = np.maximum(0, req_stt)\n src_end = np.minimum(size, req_end)\n\n # print(f'req: {[(s,e) for s,e in zip(req_stt,req_end)]}')\n # print(f'src: {[(s,e) for s,e in zip(src_stt,src_end)]}')\n\n # print(f'tile shape: {tile.shape}')\n\n pad_low = np.abs(np.minimum(req_stt, 0))\n pad_high = np.abs(np.maximum(req_end - size, 0))\n pad = np.stack((pad_low, pad_high), 1)\n bbox = tuple(slice(*b) for b in np.stack((src_stt, src_end), 1))\n\n return bbox, pad\n"
] | [
[
"numpy.minimum",
"numpy.array",
"numpy.stack",
"numpy.maximum"
]
] |
mkenworthy/pds110 | [
"47a6dc85265e5a6d6d03bf3690ac535331796bde"
] | [
"code/plot_photometry.py"
] | [
"'''\nCopyright 2017, Matthew A. Kenworthy\n\nplot_photometry.py - pretty plots for the PDS 110 Monitoring slack channel\n\n'''\n\nimport numpy as np\nfrom astropy.io import ascii\nimport matplotlib\nmatplotlib.use('qt5agg')\nimport matplotlib.pyplot as plt\n#import seaborn as sns \n\naavso_file = '../data/aavsodata.txt'\n\nta = ascii.read(aavso_file)\n\nfrom astropy.time import Time\n\n# read in JD into a Time object for easy conversion\n\nta['MJD'] = ta['JD'] - 2400000.5\n\ntimes = Time(ta['JD'], format='jd')\n\nfrom datetime import datetime\n\nnow = Time(datetime.utcnow(),format='datetime')\nprint('current MJD is {}'.format(now.mjd))\n\n# get a list of the unique bandpasses\nta_by_band = ta.group_by('Band')\n\n# the number of different bands\nn_bands = len(ta_by_band.groups)\n\nconv = {'HKEB':'s',\n 'HMB':'o',\n 'MGW':'v',\n 'DKS':'<',\n 'HJW':'.',\n 'PCC':'^',\n 'LCLC':'*',\n 'MGAB':'>'}\n\nobscol = {'HKEB':'r',\n 'HMB':'g',\n 'LCLC':'darkblue',\n 'PCC':'brown',\n 'MGW':'b',\n 'DKS':'y',\n 'HJW':'purple',\n 'MGAB':'m'}\n\n\ntmax = 57998.\n\nmybands = ('B','V','R','I')\nn_bands = len(mybands)\n\nfig, axes = plt.subplots(n_bands, 1, figsize=(8, 11), sharex=True, sharey=True)\n\nfig.subplots_adjust(hspace=0.05, wspace=0.05)\n\n# add expected location of eclipse\nfrom astropy.modeling.models import Gaussian1D\n# .. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}\n# 0.5 = exp-x*x / (2*s*s)\n# -2 s s ln 0.5 = 1\n# s = -1/(2 ln 2) \n# HJD=2458015.5 ± 10 (Sept 9-30 2017\nhjd_mid = 2458015.5 - 2400000.5\necl_fwhm = 7.\necl_sig = ecl_fwhm / np.sqrt((2*np.log(2)))\nf = Gaussian1D(0.26, hjd_mid, ecl_sig)\nf_late = Gaussian1D(0.26, hjd_mid+10., ecl_sig)\n\nt_ecl = np.arange(hjd_mid - 50, hjd_mid + 50, 1)\n\n\nfor (ax, band) in zip(axes, mybands):\n#for (ax, band) in zip(axes, ta_by_band.groups.keys):\n mask = ta_by_band.groups.keys['Band'] == band[0]\n\n # loop over each band and estimate out of transit flux\n tb = ta_by_band.groups[mask]\n tb_by_obs = tb.group_by('Observer Code')\n n_obs = len(tb_by_obs.groups)\n# print('{} observers in filter {}'.format(n_obs,band[0]))\n for nob in tb_by_obs.groups.keys:\n mask2 = tb_by_obs.groups.keys['Observer Code'] == nob[0] \n tc = tb_by_obs.groups[mask2]\n n_points = tc['JD'].size\n# print('In band {} observer {} has {} observations'.format(band[0],nob[0],n_points))\n\n t_noecl = (tc['MJD'] < tmax)\n\n # make an out of eclipse average\n t_out = tc[t_noecl]\n# print(t_out)\n mean_mag = np.array(t_out['Magnitude']).mean()\n# print('mean magnitude is {}'.format(mean_mag))\n\n # mag to intensity\n tc['dmag'] = tc['Magnitude'] - mean_mag\n tc['I'] = np.power(10, tc['dmag'] / -2.5)\n\n # photometry with errorbars\n ax.errorbar(tc['MJD'], tc['I'], tc['Uncertainty'], fmt=conv.get(nob[0],\"*\"), color=obscol.get(nob[0],'black'), label=nob[0])\n\n # photometric band label\n ax.text(0.2, 0.2, band[0], ha='center', va='center', fontsize=24, transform=ax.transAxes)\n\n # today's date\n ax.vlines(now.mjd, 0.0, 1.1,linestyle='dashed')\n\n # out of eclipse flux level\n ax.hlines(1.0, 0, 60000, linestyle='dotted')\n\n # expected location of eclipse\n ax.plot(t_ecl, 1 - f(t_ecl),color='red')\n\n # expected location of eclipse\n ax.plot(t_ecl, 1 - f_late(t_ecl), color='yellow', alpha=0.5)\n \n ax.legend(loc=\"lower right\", fontsize=8, ncol=3, fancybox=True)\n \nax.set_ylim(0.50,1.08)\nax.set_xlim(now.mjd-20, now.mjd+40)\n\naxes[-1].set_xlabel('Time [MJD]')\nfig.suptitle('PDS 110 Photometry', fontsize='large')\n\nfout = datetime.today().strftime('pds110_intens_aavso_%Y%m%d_%H%M.png')\nplt.savefig(fout)\nplt.draw()\nplt.show()\n"
] | [
[
"matplotlib.use",
"numpy.array",
"numpy.log",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.draw",
"numpy.power",
"matplotlib.pyplot.show"
]
] |
pmc-tables/pmc-tables | [
"3f4adbfff353b83a5dc660010f058192948a8833"
] | [
"src/pmc_tables/fixers/extra_headers_and_footers.py"
] | [
"import logging\nfrom collections import Counter\nfrom typing import Tuple\n\nimport pandas as pd\n\nfrom ._errors import _FixDoesNotApplyError\n\nlogger = logging.getLogger(__name__)\n\n\ndef fix_extra_headers_and_footers(df: pd.DataFrame, info: dict) -> pd.DataFrame:\n \"\"\"Fix cases where the first / last row(s) in a DataFrame are actually headers / footers.\n\n Args:\n df: DataFrame to be fixed.\n info: Dictionary containing the error message encountered when trying to save `df`.\n\n Returns:\n Fixed DataFrame.\n\n Raises:\n _FixDoesNotApplyError\n \"\"\"\n error_message = info.get('error_message', '')\n if not error_message.startswith(\"Cannot serialize the column\"):\n raise _FixDoesNotApplyError(f\"Unsupported error message: {error_message}.\")\n # Get a mask for mixed columns\n is_number_mask = _get_is_number_mask(df)\n # Make sure at least 90% of mixed columns are numbers\n _check_mostly_numbers(is_number_mask)\n # Extract the header and / or the footer\n header_range, footer_range = _find_header_and_footer(is_number_mask)\n if header_range:\n df = _add_rows_to_header(df, header_range)\n if footer_range:\n df = df.drop(df.index[footer_range], axis=0)\n return df\n\n\ndef _get_is_number_mask(df: pd.DataFrame) -> pd.Series:\n \"\"\"Return a boolean Series indicating whether all mixed columns are numbers.\"\"\"\n object_columns = df.dtypes[df.dtypes == object].index.tolist()\n # Mixed columns are all columns taht are not entirely string or null\n is_string_df = pd.DataFrame(\n {c: [(isinstance(v, str) or pd.isnull(v)) for v in df[c].values]\n for c in object_columns},\n columns=object_columns)\n is_string_s = is_string_df.all(axis=0)\n # Number mask indicates whether all mixed columns in a given row are numbers\n mixed_columns = is_string_s[~is_string_s].index.tolist()\n is_number_df = pd.DataFrame(\n {c: [isinstance(v, (int, float)) for v in df[c].values]\n for c in object_columns},\n columns=object_columns)\n is_number_mask = is_number_df[mixed_columns].all(axis=1)\n return is_number_mask\n\n\ndef _check_mostly_numbers(is_number_mask: pd.Series, cutoff: float = 0.9) -> None:\n is_number_count = Counter(is_number_mask.values)\n frac_number = is_number_count[True] / (is_number_count[True] + is_number_count[False])\n cutoff = min(cutoff, 1 - 1 / len(is_number_count))\n if frac_number < cutoff:\n raise _FixDoesNotApplyError(\n f\"The fraction of numbers in mixed columns is too low ({frac_number}).\")\n\n\ndef _find_header_and_footer(is_number_s: pd.Series) -> Tuple[range, range]:\n header_start = 0\n header_stop = None\n footer_start = None\n footer_stop = len(is_number_s)\n for i in range(len(is_number_s)):\n if header_stop is None and is_number_s[i]:\n header_stop = i\n elif header_stop is not None and footer_start is None and not is_number_s[i]:\n footer_start = i\n max_permissible_offset = min(5, len(is_number_s) // 20 + 1)\n if header_stop is None or (header_stop - header_start) > max_permissible_offset:\n header_stop = 0\n if footer_start is None or (footer_stop - footer_start) > max_permissible_offset:\n footer_start = len(is_number_s)\n return range(header_start, header_stop), range(footer_start, footer_stop)\n\n\ndef _add_rows_to_header(df, header_range=range(0, 1)):\n columns = list(df.columns)\n assert all(isinstance(c, str) for c in columns)\n columns = [(c,) for c in columns]\n for r_idx in header_range:\n for c_idx in range(len(columns)):\n columns[c_idx] = columns[c_idx] + (df.iloc[r_idx, c_idx],)\n df = df.drop(df.index[header_range], axis=0)\n df.columns = columns\n return df\n"
] | [
[
"pandas.isnull"
]
] |
SimlaBurcu/newhbfp | [
"cbafee4e68f42556b0eef098f6b5d657f73b3a8c"
] | [
"cnn/models/wideresnet.py"
] | [
"# Copyright (c) 2021, Parallel Systems Architecture Laboratory (PARSA), EPFL & \n# Machine Learning and Optimization Laboratory (MLO), EPFL. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the PARSA, EPFL & MLO, EPFL\n# nor the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# -*- coding: utf-8 -*-\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom bfp.bfp_ops import BFPLinear, BFPConv2d, unpack_bfp_args\n\n\n__all__ = ['wideresnet']\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, drop_rate=0.0, bfp_args={}):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = BFPConv2d(in_planes, out_planes, kernel_size=3,\n stride=stride, padding=1, bias=False,\n **bfp_args)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = BFPConv2d(out_planes, out_planes, kernel_size=3,\n stride=1, padding=1, bias=False,\n **bfp_args)\n self.droprate = drop_rate\n self.equal_in_out = (in_planes == out_planes)\n self.conv_shortcut = (not self.equal_in_out) and BFPConv2d(\n in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False, **bfp_args) or None\n\n def forward(self, x):\n if not self.equal_in_out:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equal_in_out else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(\n x if self.equal_in_out else self.conv_shortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n def __init__(\n self, nb_layers, in_planes, out_planes, block, stride,\n drop_rate=0.0, bfp_args={}):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(\n block, in_planes, out_planes, nb_layers, stride, drop_rate,\n bfp_args)\n\n def _make_layer(\n self, block, in_planes, out_planes,\n nb_layers, stride, drop_rate, bfp_args):\n layers = []\n for i in range(nb_layers):\n layers.append(\n block(i == 0 and in_planes or out_planes, out_planes,\n i == 0 and stride or 1,\n drop_rate, bfp_args)\n )\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n def __init__(self, args, net_depth):\n super(WideResNet, self).__init__()\n bfp_args = unpack_bfp_args(dict(vars(args)))\n # define fundamental parameters.\n self.args = args\n widen_factor = self.args.wideresnet_widen_factor\n drop_rate = self.args.drop_rate\n\n assert((net_depth - 4) % 6 == 0)\n num_channels = [16, 16 * widen_factor,\n 32 * widen_factor, 64 * widen_factor]\n num_blocks = (net_depth - 4) // 6\n block = BasicBlock\n num_classes = self._decide_num_classes()\n\n # 1st conv before any network block\n self.conv1 = BFPConv2d(3, num_channels[0], kernel_size=3, stride=1,\n padding=1, bias=False, **bfp_args)\n # 1st block\n self.block1 = NetworkBlock(num_blocks,\n num_channels[0], num_channels[1],\n block, 1, drop_rate, bfp_args)\n # 2nd block\n self.block2 = NetworkBlock(num_blocks,\n num_channels[1], num_channels[2],\n block, 2, drop_rate, bfp_args)\n # 3rd block\n self.block3 = NetworkBlock(num_blocks,\n num_channels[2], num_channels[3],\n block, 2, drop_rate, bfp_args)\n\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(num_channels[3])\n self.relu = nn.ReLU(inplace=True)\n self.num_channels = num_channels[3]\n self.fc = BFPLinear(num_channels[3], num_classes, **bfp_args)\n\n self._weight_initialization()\n\n def _decide_num_classes(self):\n if self.args.data == 'cifar10' or self.args.data == 'svhn':\n return 10\n elif self.args.data == 'cifar100':\n return 100\n elif self.args.data == 'imagenet':\n return 1000\n\n def _weight_initialization(self):\n for m in self.modules():\n if isinstance(m, BFPConv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, BFPLinear):\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n out = out.view(-1, self.num_channels)\n return self.fc(out)\n\n\ndef wideresnet(args):\n net_depth = int(args.arch.replace('wideresnet', ''))\n model = WideResNet(args, net_depth)\n return model\n"
] | [
[
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
mesjou/rl-playground | [
"31b5f88106a96d33cece25e1155bb82eb652b5f2"
] | [
"ppo/utils.py"
] | [
"import collections\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\n\ndef make_env(gym_id, seed):\n def thunk():\n env = gym.make(gym_id)\n env = gym.wrappers.RecordEpisodeStatistics(env)\n env.seed(seed)\n env.action_space.seed(seed)\n env.observation_space.seed(seed)\n return env\n\n return thunk\n\n\ndef normc_initializer(std=1.0):\n \"\"\"Custom kernel initalizer copied from OpenAI baselines\"\"\"\n\n def _initializer(shape, dtype=None, partition_info=None):\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n\n return _initializer\n\n\nPPOLossInfo = collections.namedtuple(\n \"LossInfo\", (\"total_loss\", \"value_loss\", \"policy_loss\", \"entropy_loss\", \"approx_kl\", \"clip_fracs\",)\n)\n"
] | [
[
"numpy.square",
"tensorflow.constant",
"numpy.random.randn"
]
] |
alonsoir/spark-deep-learning | [
"3f668d9b4a0aa2ef6fe05df5bf5c1d705cd2530d"
] | [
"python/sparkdl/image/imageIO.py"
] | [
"# Copyright 2017 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom io import BytesIO\nfrom collections import namedtuple\nfrom warnings import warn\n\n# 3rd party\nimport numpy as np\nfrom PIL import Image\n\n# pyspark\nfrom pyspark import Row\nfrom pyspark import SparkContext\nfrom pyspark.sql.types import (BinaryType, IntegerType, StringType, StructField, StructType)\nfrom pyspark.sql.functions import udf\n\n\nimageSchema = StructType([StructField(\"mode\", StringType(), False),\n StructField(\"height\", IntegerType(), False),\n StructField(\"width\", IntegerType(), False),\n StructField(\"nChannels\", IntegerType(), False),\n StructField(\"data\", BinaryType(), False)])\n\n\n# ImageType class for holding metadata about images stored in DataFrames.\n# fields:\n# nChannels - number of channels in the image\n# dtype - data type of the image's \"data\" Column, sorted as a numpy compatible string.\n# channelContent - info about the contents of each channel currently only \"I\" (intensity) and\n# \"RGB\" are supported for 1 and 3 channel data respectively.\n# pilMode - The mode that should be used to convert to a PIL image.\n# sparkMode - Unique identifier string used in spark image representation.\nImageType = namedtuple(\"ImageType\", [\"nChannels\",\n \"dtype\",\n \"channelContent\",\n \"pilMode\",\n \"sparkMode\",\n ])\nclass SparkMode(object):\n RGB = \"RGB\"\n FLOAT32 = \"float32\"\n RGB_FLOAT32 = \"RGB-float32\"\n\nsupportedImageTypes = [\n ImageType(3, \"uint8\", \"RGB\", \"RGB\", SparkMode.RGB),\n ImageType(1, \"float32\", \"I\", \"F\", SparkMode.FLOAT32),\n ImageType(3, \"float32\", \"RGB\", None, SparkMode.RGB_FLOAT32),\n]\npilModeLookup = {t.pilMode: t for t in supportedImageTypes\n if t.pilMode is not None}\nsparkModeLookup = {t.sparkMode: t for t in supportedImageTypes}\n\n\ndef imageArrayToStruct(imgArray, sparkMode=None):\n \"\"\"\n Create a row representation of an image from an image array and (optional) imageType.\n\n to_image_udf = udf(arrayToImageRow, imageSchema)\n df.withColumn(\"output_img\", to_image_udf(df[\"np_arr_col\"])\n\n :param imgArray: ndarray, image data.\n :param sparkMode: spark mode, type information for the image, will be inferred from array if\n the mode is not provide. See SparkMode for valid modes.\n :return: Row, image as a DataFrame Row.\n \"\"\"\n # Sometimes tensors have a leading \"batch-size\" dimension. Assume to be 1 if it exists.\n if len(imgArray.shape) == 4:\n if imgArray.shape[0] != 1:\n raise ValueError(\"The first dimension of a 4-d image array is expected to be 1.\")\n imgArray = imgArray.reshape(imgArray.shape[1:])\n\n if sparkMode is None:\n sparkMode = _arrayToSparkMode(imgArray)\n imageType = sparkModeLookup[sparkMode]\n\n height, width, nChannels = imgArray.shape\n if imageType.nChannels != nChannels:\n msg = \"Image of type {} should have {} channels, but array has {} channels.\"\n raise ValueError(msg.format(sparkMode, imageType.nChannels, nChannels))\n\n # Convert the array to match the image type.\n if not np.can_cast(imgArray, imageType.dtype, 'same_kind'):\n msg = \"Array of type {} cannot safely be cast to image type {}.\"\n raise ValueError(msg.format(imgArray.dtype, imageType.dtype))\n imgArray = np.array(imgArray, dtype=imageType.dtype, copy=False)\n\n data = bytearray(imgArray.tobytes())\n return Row(mode=sparkMode, height=height, width=width, nChannels=nChannels, data=data)\n\n\ndef imageType(imageRow):\n \"\"\"\n Get type information about the image.\n\n :param imageRow: spark image row.\n :return: ImageType\n \"\"\"\n return sparkModeLookup[imageRow.mode]\n\n\ndef imageStructToArray(imageRow):\n \"\"\"\n Convert an image to a numpy array.\n\n :param imageRow: Row, must use imageSchema.\n :return: ndarray, image data.\n \"\"\"\n imType = imageType(imageRow)\n shape = (imageRow.height, imageRow.width, imageRow.nChannels)\n return np.ndarray(shape, imType.dtype, imageRow.data)\n\n\ndef _arrayToSparkMode(arr):\n assert len(arr.shape) == 3, \"Array should have 3 dimensions but has shape {}\".format(arr.shape)\n num_channels = arr.shape[2]\n if num_channels == 1:\n if arr.dtype not in [np.float16, np.float32, np.float64]:\n raise ValueError(\"incompatible dtype (%s) for numpy array for float32 mode\" %\n arr.dtype.string)\n return SparkMode.FLOAT32\n elif num_channels != 3:\n raise ValueError(\"number of channels of the input array (%d) is not supported\" %\n num_channels)\n elif arr.dtype == np.uint8:\n return SparkMode.RGB\n elif arr.dtype in [np.float16, np.float32, np.float64]:\n return SparkMode.RGB_FLOAT32\n else:\n raise ValueError(\"did not find a sparkMode for the given array with num_channels = %d \" +\n \"and dtype %s\" % (num_channels, arr.dtype.string))\n\n\ndef _resizeFunction(size):\n \"\"\" Creates a resize function.\n \n :param size: tuple, size of new image: (height, width). \n :return: function: image => image, a function that converts an input image to an image with \n of `size`.\n \"\"\"\n\n if len(size) != 2:\n raise ValueError(\"New image size should have for [hight, width] but got {}\".format(size))\n\n def resizeImageAsRow(imgAsRow):\n imgAsArray = imageStructToArray(imgAsRow)\n imgType = imageType(imgAsRow)\n imgAsPil = Image.fromarray(imgAsArray, imgType.pilMode)\n imgAsPil = imgAsPil.resize(size[::-1])\n imgAsArray = np.array(imgAsPil)\n return imageArrayToStruct(imgAsArray, imgType.sparkMode)\n\n return resizeImageAsRow\n\n\ndef resizeImage(size):\n \"\"\" Create a udf for resizing image.\n \n Example usage:\n dataFrame.select(resizeImage((height, width))('imageColumn'))\n \n :param size: tuple, target size of new image in the form (height, width). \n :return: udf, a udf for resizing an image column to `size`.\n \"\"\"\n return udf(_resizeFunction(size), imageSchema)\n\n\ndef _decodeImage(imageData):\n \"\"\"\n Decode compressed image data into a DataFrame image row.\n\n :param imageData: (bytes, bytearray) compressed image data in PIL compatible format.\n :return: Row, decoded image.\n \"\"\"\n try:\n img = Image.open(BytesIO(imageData))\n except IOError:\n return None\n\n if img.mode in pilModeLookup:\n mode = pilModeLookup[img.mode]\n else:\n msg = \"We don't currently support images with mode: {mode}\"\n warn(msg.format(mode=img.mode))\n return None\n imgArray = np.asarray(img)\n image = imageArrayToStruct(imgArray, mode.sparkMode)\n return image\n\n# Creating a UDF on import can cause SparkContext issues sometimes.\n# decodeImage = udf(_decodeImage, imageSchema)\n\ndef filesToDF(sc, path, numPartitions=None):\n \"\"\"\n Read files from a directory to a DataFrame.\n\n :param sc: SparkContext.\n :param path: str, path to files.\n :param numPartition: int, number or partitions to use for reading files.\n :return: DataFrame, with columns: (filePath: str, fileData: BinaryType)\n \"\"\"\n numPartitions = numPartitions or sc.defaultParallelism\n schema = StructType([StructField(\"filePath\", StringType(), False),\n StructField(\"fileData\", BinaryType(), False)])\n rdd = sc.binaryFiles(path, minPartitions=numPartitions).repartition(numPartitions)\n rdd = rdd.map(lambda x: (x[0], bytearray(x[1])))\n return rdd.toDF(schema)\n\n\ndef readImages(imageDirectory, numPartition=None):\n \"\"\"\n Read a directory of images (or a single image) into a DataFrame.\n\n :param sc: spark context\n :param imageDirectory: str, file path.\n :param numPartition: int, number or partitions to use for reading files.\n :return: DataFrame, with columns: (filepath: str, image: imageSchema).\n \"\"\"\n return _readImages(imageDirectory, numPartition, SparkContext.getOrCreate())\n\n\ndef _readImages(imageDirectory, numPartition, sc):\n decodeImage = udf(_decodeImage, imageSchema)\n imageData = filesToDF(sc, imageDirectory, numPartitions=numPartition)\n return imageData.select(\"filePath\", decodeImage(\"fileData\").alias(\"image\"))\n"
] | [
[
"numpy.ndarray",
"numpy.array",
"numpy.can_cast",
"numpy.asarray"
]
] |
Pherokung/VIRTUON | [
"987cf4e37a72b214f02f0f7fbda68c0cc74e6de4"
] | [
"model_deployment/model/grapy/dataloaders/custom_transforms.py"
] | [
"import torch\nimport math\nimport numbers\nimport random\nimport numpy as np\n\nfrom PIL import Image, ImageOps\nfrom torchvision import transforms\n\nclass RandomCrop(object):\n def __init__(self, size, padding=0):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size # h, w\n self.padding = padding\n\n def __call__(self, sample):\n img, mask = sample['image'], sample['label']\n\n if self.padding > 0:\n img = ImageOps.expand(img, border=self.padding, fill=0)\n mask = ImageOps.expand(mask, border=self.padding, fill=0)\n\n assert img.size == mask.size\n w, h = img.size\n th, tw = self.size # target size\n if w == tw and h == th:\n return {'image': img,\n 'label': mask}\n if w < tw or h < th:\n img = img.resize((tw, th), Image.BILINEAR)\n mask = mask.resize((tw, th), Image.NEAREST)\n return {'image': img,\n 'label': mask}\n\n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n img = img.crop((x1, y1, x1 + tw, y1 + th))\n mask = mask.crop((x1, y1, x1 + tw, y1 + th))\n\n return {'image': img,\n 'label': mask}\n\nclass RandomCrop_new(object):\n def __init__(self, size, padding=0):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size # h, w\n self.padding = padding\n\n def __call__(self, sample):\n img, mask = sample['image'], sample['label']\n\n if self.padding > 0:\n img = ImageOps.expand(img, border=self.padding, fill=0)\n mask = ImageOps.expand(mask, border=self.padding, fill=0)\n\n assert img.size == mask.size\n w, h = img.size\n th, tw = self.size # target size\n if w == tw and h == th:\n return {'image': img,\n 'label': mask}\n\n new_img = Image.new('RGB',(tw,th),'black') # size is w x h; and 'white' is 255\n new_mask = Image.new('L',(tw,th),'white') # same above\n\n # if w > tw or h > th\n x1 = y1 = 0\n if w > tw:\n x1 = random.randint(0,w - tw)\n if h > th:\n y1 = random.randint(0,h - th)\n # crop\n img = img.crop((x1,y1, x1 + tw, y1 + th))\n mask = mask.crop((x1,y1, x1 + tw, y1 + th))\n new_img.paste(img,(0,0))\n new_mask.paste(mask,(0,0))\n\n # x1 = random.randint(0, w - tw)\n # y1 = random.randint(0, h - th)\n # img = img.crop((x1, y1, x1 + tw, y1 + th))\n # mask = mask.crop((x1, y1, x1 + tw, y1 + th))\n\n return {'image': new_img,\n 'label': new_mask}\n\nclass Paste(object):\n def __init__(self, size,):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size # h, w\n\n def __call__(self, sample):\n img, mask = sample['image'], sample['label']\n\n assert img.size == mask.size\n w, h = img.size\n th, tw = self.size # target size\n assert (w <=tw) and (h <= th)\n if w == tw and h == th:\n return {'image': img,\n 'label': mask}\n\n new_img = Image.new('RGB',(tw,th),'black') # size is w x h; and 'white' is 255\n new_mask = Image.new('L',(tw,th),'white') # same above\n\n new_img.paste(img,(0,0))\n new_mask.paste(mask,(0,0))\n\n return {'image': new_img,\n 'label': new_mask}\n\nclass CenterCrop(object):\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n w, h = img.size\n th, tw = self.size\n x1 = int(round((w - tw) / 2.))\n y1 = int(round((h - th) / 2.))\n img = img.crop((x1, y1, x1 + tw, y1 + th))\n mask = mask.crop((x1, y1, x1 + tw, y1 + th))\n\n return {'image': img,\n 'label': mask}\n\nclass RandomHorizontalFlip(object):\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': img,\n 'label': mask}\n\nclass HorizontalFlip(object):\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n # mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': img}\n\nclass HorizontalFlip_only_img(object):\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n # mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': img}\n\nclass RandomHorizontalFlip_cihp(object):\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n # mask = Image.open()\n\n return {'image': img}\n # 'label': mask}\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n \"\"\"\n def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n img = np.array(sample['image']).astype(np.float32)\n # mask = np.array(sample['label']).astype(np.float32)\n img /= 255.0\n img -= self.mean\n img /= self.std\n\n return {'image': img}\n\nclass Normalize_255(object):\n \"\"\"Normalize a tensor image with mean and standard deviation. tf use 255.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n \"\"\"\n def __init__(self, mean=(123.15, 115.90, 103.06), std=(1., 1., 1.)):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n img = np.array(sample['image']).astype(np.float32)\n # mask = np.array(sample['label']).astype(np.float32)\n # img = 255.0\n img -= self.mean\n img /= self.std\n img = img\n img = img[[0,3,2,1],...]\n return {'image': img}\n\nclass Normalize_xception_tf(object):\n # def __init__(self):\n # self.rgb2bgr =\n\n def __call__(self, sample):\n img = np.array(sample['image']).astype(np.float32)\n # mask = np.array(sample['label']).astype(np.float32)\n img = (img*2.0)/255.0 - 1\n # print(img.shape)\n # img = img[[0,3,2,1],...]\n return {'image': img}\n\nclass Normalize_xception_tf_only_img(object):\n # def __init__(self):\n # self.rgb2bgr =\n\n def __call__(self, sample):\n img = np.array(sample['image']).astype(np.float32)\n # mask = np.array(sample['label']).astype(np.float32)\n img = (img*2.0)/255.0 - 1\n # print(img.shape)\n # img = img[[0,3,2,1],...]\n return {'image': img,\n 'label': sample['label']}\n\nclass Normalize_cityscapes(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n \"\"\"\n def __init__(self, mean=(0., 0., 0.)):\n self.mean = mean\n\n def __call__(self, sample):\n img = np.array(sample['image']).astype(np.float32)\n # mask = np.array(sample['label']).astype(np.float32)\n img -= self.mean\n img /= 255.0\n\n return {'image': img,}\n # 'label': mask}\n\nclass ToTensor_(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n def __init__(self):\n self.rgb2bgr = transforms.Lambda(lambda x:x[[2,1,0],...])\n\n def __call__(self, sample):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))\n # mask = np.expand_dims(np.array(sample['label']).astype(np.float32), -1).transpose((2, 0, 1))\n # mask[mask == 255] = 0\n\n img = torch.from_numpy(img).float()\n img = self.rgb2bgr(img)\n # mask = torch.from_numpy(mask).float()\n\n return {'image': img}\n\nclass ToTensor_only_img(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n def __init__(self):\n self.rgb2bgr = transforms.Lambda(lambda x:x[[2,1,0],...])\n\n def __call__(self, sample):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))\n # mask = np.expand_dims(np.array(sample['label']).astype(np.float32), -1).transpose((2, 0, 1))\n # mask[mask == 255] = 0\n\n img = torch.from_numpy(img).float()\n img = self.rgb2bgr(img)\n # mask = torch.from_numpy(mask).float()\n\n\n return {'image': img,}\n # 'label': sample['label']}\n\nclass FixedResize(object):\n def __init__(self, size):\n self.size = tuple(reversed(size)) # size: (h, w)\n\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n\n # assert img.size == mask.size\n\n img = img.resize(self.size, Image.BILINEAR)\n # mask = mask.resize(self.size, Image.NEAREST)\n\n return {'image': img}\n # 'label': mask}\n\nclass Keep_origin_size_Resize(object):\n def __init__(self, max_size, scale=1.0):\n self.size = tuple(reversed(max_size)) # size: (h, w)\n self.scale = scale\n self.paste = Paste(int(max_size[0]*scale))\n\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n\n assert img.size == mask.size\n h, w = self.size\n h = int(h*self.scale)\n w = int(w*self.scale)\n img = img.resize((h, w), Image.BILINEAR)\n # mask = mask.resize((h, w), Image.NEAREST)\n\n return self.paste({'image': img})\n # 'label': mask})\n\nclass Scale(object):\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n # assert img.size == mask.size\n w, h = img.size\n\n if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):\n return {'image': img}\n # 'label': mask}\n oh, ow = self.size\n img = img.resize((ow, oh), Image.BILINEAR)\n # mask = mask.resize((ow, oh), Image.NEAREST)\n\n return {'image': img}\n # 'label': mask}\n\nclass Scale_(object):\n def __init__(self, scale):\n self.scale = scale\n\n def __call__(self, sample):\n img = sample['image']\n # mask = sample['label']\n # assert img.size == mask.size\n w, h = img.size\n ow = int(w*self.scale)\n oh = int(h*self.scale)\n img = img.resize((ow, oh), Image.BILINEAR)\n # mask = mask.resize((ow, oh), Image.NEAREST)\n\n return {'image': img}\n\nclass Scale_only_img(object):\n def __init__(self, scale):\n self.scale = scale\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n # assert img.size == mask.size\n w, h = img.size\n ow = int(w*self.scale)\n oh = int(h*self.scale)\n img = img.resize((ow, oh), Image.BILINEAR)\n # mask = mask.resize((ow, oh), Image.NEAREST)\n\n return {'image': img}\n # 'label': mask}\n\nclass RandomSizedCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(0.45, 1.0) * area\n aspect_ratio = random.uniform(0.5, 2)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n x1 = random.randint(0, img.size[0] - w)\n y1 = random.randint(0, img.size[1] - h)\n\n img = img.crop((x1, y1, x1 + w, y1 + h))\n mask = mask.crop((x1, y1, x1 + w, y1 + h))\n assert (img.size == (w, h))\n\n img = img.resize((self.size, self.size), Image.BILINEAR)\n mask = mask.resize((self.size, self.size), Image.NEAREST)\n\n return {'image': img,\n 'label': mask}\n\n # Fallback\n scale = Scale(self.size)\n crop = CenterCrop(self.size)\n sample = crop(scale(sample))\n return sample\n\nclass RandomRotate(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n rotate_degree = random.random() * 2 * self.degree - self.degree\n img = img.rotate(rotate_degree, Image.BILINEAR)\n mask = mask.rotate(rotate_degree, Image.NEAREST)\n\n return {'image': img,\n 'label': mask}\n\nclass RandomSized_new(object):\n '''what we use is this class to aug'''\n def __init__(self, size,scale1=0.5,scale2=2):\n self.size = size\n # self.scale = Scale(self.size)\n self.crop = RandomCrop_new(self.size)\n self.small_scale = scale1\n self.big_scale = scale2\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n\n w = int(random.uniform(self.small_scale, self.big_scale) * img.size[0])\n h = int(random.uniform(self.small_scale, self.big_scale) * img.size[1])\n\n img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)\n sample = {'image': img, 'label': mask}\n # finish resize\n return self.crop(sample)\n# class Random\n\nclass RandomScale(object):\n def __init__(self, limit):\n self.limit = limit\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n\n scale = random.uniform(self.limit[0], self.limit[1])\n w = int(scale * img.size[0])\n h = int(scale * img.size[1])\n\n img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)\n\n return {'image': img, 'label': mask}"
] | [
[
"numpy.array",
"torch.from_numpy"
]
] |
minhongqi/federated | [
"52ba53dba2f0f171b34a616179436772ff18883e"
] | [
"utils/datasets/infinite_emnist_test.py"
] | [
"# Copyright 2021, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom utils.datasets import infinite_emnist\n\n\ndef _compute_dataset_length(dataset):\n return dataset.reduce(0, lambda x, _: x + 1)\n\n\nclass InfiniteEmnistTest(tf.test.TestCase):\n\n def test_get_infinite_creates_transforming_client_data(self):\n raw_client_data = tff.simulation.datasets.emnist.get_synthetic()\n inf_client_data = infinite_emnist.get_infinite(\n raw_client_data, num_pseudo_clients=2)\n self.assertIsInstance(inf_client_data,\n tff.simulation.datasets.TransformingClientData)\n\n def test_get_infinite_preserves_element_type_structure(self):\n raw_client_data = tff.simulation.datasets.emnist.get_synthetic()\n inf_client_data = infinite_emnist.get_infinite(\n raw_client_data, num_pseudo_clients=5)\n self.assertEqual(raw_client_data.element_type_structure,\n inf_client_data.element_type_structure)\n\n def test_get_infinite_creates_pseudo_clients(self):\n raw_client_data = tff.simulation.datasets.emnist.get_synthetic()\n self.assertLen(raw_client_data.client_ids, 1)\n inf_client_data = infinite_emnist.get_infinite(\n raw_client_data, num_pseudo_clients=10)\n self.assertLen(inf_client_data.client_ids, 10)\n\n def test_get_infinite_preserves_original_client(self):\n raw_client_data = tff.simulation.datasets.emnist.get_synthetic()\n self.assertLen(raw_client_data.client_ids, 1)\n raw_dataset = raw_client_data.create_tf_dataset_for_client(\n raw_client_data.client_ids[0])\n inf_client_data = infinite_emnist.get_infinite(\n raw_client_data, num_pseudo_clients=1)\n self.assertLen(inf_client_data.client_ids, 1)\n inf_dataset = inf_client_data.create_tf_dataset_for_client(\n inf_client_data.client_ids[0])\n length1 = _compute_dataset_length(raw_dataset)\n length2 = _compute_dataset_length(inf_dataset)\n self.assertEqual(length1, length2)\n raw_dataset_iter = iter(raw_dataset)\n inf_dataset_iter = iter(inf_dataset)\n for _ in range(int(length1)):\n raw_batch = next(raw_dataset_iter)\n inf_batch = next(inf_dataset_iter)\n self.assertAllClose(raw_batch, inf_batch)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
]
] |
coco-cao-jinglu/coco-linkedin-easyapply | [
"6b8f55e7666c7f6c123f89cbd21de4d9cb109069"
] | [
"easyapplybot.py"
] | [
"import time, random, os, csv, platform\nimport logging\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport pyautogui\n\nfrom urllib.request import urlopen\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport re\nimport yaml\nfrom datetime import datetime, timedelta\n\nlog = logging.getLogger(__name__)\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n\n\ndef setupLogger():\n dt = datetime.strftime(datetime.now(), \"%m_%d_%y %H_%M_%S \")\n\n if not os.path.isdir('./logs'):\n os.mkdir('./logs')\n\n # TODO need to check if there is a log dir available or not\n logging.basicConfig(filename=('./logs/' + str(dt) + 'applyJobs.log'), filemode='w',\n format='%(asctime)s::%(name)s::%(levelname)s::%(message)s', datefmt='./logs/%d-%b-%y %H:%M:%S')\n log.setLevel(logging.DEBUG)\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.DEBUG)\n c_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', '%H:%M:%S')\n c_handler.setFormatter(c_format)\n log.addHandler(c_handler)\n\n\nclass EasyApplyBot:\n setupLogger()\n # MAX_SEARCH_TIME is 10 hours by default, feel free to modify it\n MAX_SEARCH_TIME = 10 * 60 * 60\n\n def __init__(self,\n username,\n password,\n uploads={},\n filename='output.csv',\n blacklist=[],\n blackListTitles=[]):\n\n log.info(\"Welcome to Easy Apply Bot\")\n dirpath = os.getcwd()\n log.info(\"current directory is : \" + dirpath)\n\n self.uploads = uploads\n past_ids = self.get_appliedIDs(filename)\n self.appliedJobIDs = past_ids if past_ids != None else []\n self.filename = filename\n self.options = self.browser_options()\n self.browser = driver\n self.wait = WebDriverWait(self.browser, 30)\n self.blacklist = blacklist\n self.blackListTitles = blackListTitles\n self.start_linkedin(username, password)\n\n def get_appliedIDs(self, filename):\n try:\n df = pd.read_csv(filename,\n header=None,\n names=['timestamp', 'jobID', 'job', 'company', 'attempted', 'result'],\n lineterminator='\\n',\n encoding='utf-8')\n\n df['timestamp'] = pd.to_datetime(df['timestamp'], format=\"%Y-%m-%d %H:%M:%S\")\n df = df[df['timestamp'] > (datetime.now() - timedelta(days=2))]\n jobIDs = list(df.jobID)\n log.info(f\"{len(jobIDs)} jobIDs found\")\n return jobIDs\n except Exception as e:\n log.info(str(e) + \" jobIDs could not be loaded from CSV {}\".format(filename))\n return None\n\n def browser_options(self):\n options = Options()\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument('--no-sandbox')\n options.add_argument(\"--disable-extensions\")\n\n # Disable webdriver flags or you will be easily detectable\n options.add_argument(\"--disable-blink-features\")\n options.add_argument(\"--disable-blink-features=AutomationControlled\")\n return options\n\n def start_linkedin(self, username, password):\n log.info(\"Logging in.....Please wait :) \")\n self.browser.get(\"https://www.linkedin.com/login?trk=guest_homepage-basic_nav-header-signin\")\n try:\n user_field = self.browser.find_element_by_id(\"username\")\n pw_field = self.browser.find_element_by_id(\"password\")\n login_button = self.browser.find_element_by_css_selector(\".btn__primary--large\")\n user_field.send_keys(username)\n user_field.send_keys(Keys.TAB)\n time.sleep(2)\n pw_field.send_keys(password)\n time.sleep(2)\n login_button.click()\n time.sleep(3)\n except TimeoutException:\n log.info(\"TimeoutException! Username/password field or login button not found\")\n\n def fill_data(self):\n self.browser.set_window_size(0, 0)\n self.browser.set_window_position(2000, 2000)\n\n def start_apply(self, positions, locations):\n start = time.time()\n self.fill_data()\n\n combos = []\n while len(combos) < len(positions) * len(locations):\n position = positions[random.randint(0, len(positions) - 1)]\n location = locations[random.randint(0, len(locations) - 1)]\n combo = (position, location)\n if combo not in combos:\n combos.append(combo)\n log.info(f\"Applying to {position}: {location}\")\n location = \"&location=\" + location\n self.applications_loop(position, location)\n if len(combos) > 500:\n break\n\n # self.finish_apply() --> this does seem to cause more harm than good, since it closes the browser which we usually don't want, other conditions will stop the loop and just break out\n\n def applications_loop(self, position, location):\n\n count_application = 0\n count_job = 0\n jobs_per_page = 0\n start_time = time.time()\n\n log.info(\"Looking for jobs.. Please wait..\")\n\n self.browser.set_window_position(0, 0)\n self.browser.maximize_window()\n self.browser, _ = self.next_jobs_page(position, location, jobs_per_page)\n log.info(\"Looking for jobs.. Please wait..\")\n\n while time.time() - start_time < self.MAX_SEARCH_TIME:\n try:\n log.info(f\"{(self.MAX_SEARCH_TIME - (time.time() - start_time)) // 60} minutes left in this search\")\n\n # sleep to make sure everything loads, add random to make us look human.\n randoTime = random.uniform(3.5, 4.9)\n log.debug(f\"Sleeping for {round(randoTime, 1)}\")\n time.sleep(randoTime)\n self.load_page(sleep=1)\n\n # LinkedIn displays the search results in a scrollable <div> on the left side, we have to scroll to its bottom\n\n scrollresults = self.browser.find_element_by_class_name(\n \"jobs-search-results\"\n )\n # Selenium only detects visible elements; if we scroll to the bottom too fast, only 8-9 results will be loaded into IDs list\n for i in range(300, 3000, 100):\n self.browser.execute_script(\"arguments[0].scrollTo(0, {})\".format(i), scrollresults)\n\n time.sleep(1)\n\n # get job links\n links = self.browser.find_elements_by_xpath(\n '//div[@data-job-id]'\n )\n\n if len(links) == 0:\n break\n\n # get job ID of each job link\n IDs = []\n for link in links:\n children = link.find_elements_by_xpath(\n './/a[@data-control-name]'\n )\n for child in children:\n if child.text not in self.blacklist:\n temp = link.get_attribute(\"data-job-id\")\n jobID = temp.split(\":\")[-1]\n IDs.append(int(jobID))\n IDs = set(IDs)\n\n # remove already applied jobs\n before = len(IDs)\n jobIDs = [x for x in IDs if x not in self.appliedJobIDs]\n after = len(jobIDs)\n\n # it assumed that 25 jobs are listed in the results window\n if len(jobIDs) == 0 and len(IDs) > 23:\n jobs_per_page = jobs_per_page + 25\n count_job = 0\n self.avoid_lock()\n self.browser, jobs_per_page = self.next_jobs_page(position,\n location,\n jobs_per_page)\n # loop over IDs to apply\n for i, jobID in enumerate(jobIDs):\n count_job += 1\n self.get_job_page(jobID)\n\n # get easy apply button\n button = self.get_easy_apply_button()\n # word filter to skip positions not wanted\n\n if button is not False:\n if any(word in self.browser.title for word in blackListTitles):\n log.info('skipping this application, a blacklisted keyword was found in the job position')\n string_easy = \"* Contains blacklisted keyword\"\n result = False\n else:\n string_easy = \"* has Easy Apply Button\"\n log.info(\"Clicking the EASY apply button\")\n button.click()\n time.sleep(3)\n result = self.send_resume()\n count_application += 1\n else:\n log.info(\"The button does not exist.\")\n string_easy = \"* Doesn't have Easy Apply Button\"\n result = False\n\n position_number = str(count_job + jobs_per_page)\n log.info(f\"\\nPosition {position_number}:\\n {self.browser.title} \\n {string_easy} \\n\")\n\n self.write_to_file(button, jobID, self.browser.title, result)\n\n # sleep every 20 applications\n if count_application != 0 and count_application % 20 == 0:\n sleepTime = random.randint(500, 900)\n log.info(f\"\"\"********count_application: {count_application}************\\n\\n\n Time for a nap - see you in:{int(sleepTime / 60)} min\n ****************************************\\n\\n\"\"\")\n time.sleep(sleepTime)\n\n # go to new page if all jobs are done\n if count_job == len(jobIDs):\n jobs_per_page = jobs_per_page + 25\n count_job = 0\n log.info(\"\"\"****************************************\\n\\n\n Going to next jobs page, YEAAAHHH!!\n ****************************************\\n\\n\"\"\")\n self.avoid_lock()\n self.browser, jobs_per_page = self.next_jobs_page(position,\n location,\n jobs_per_page)\n except Exception as e:\n print(e)\n\n def write_to_file(self, button, jobID, browserTitle, result):\n def re_extract(text, pattern):\n target = re.search(pattern, text)\n if target:\n target = target.group(1)\n return target\n\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n attempted = False if button == False else True\n job = re_extract(browserTitle.split(' | ')[0], r\"\\(?\\d?\\)?\\s?(\\w.*)\")\n company = re_extract(browserTitle.split(' | ')[1], r\"(\\w.*)\")\n\n toWrite = [timestamp, jobID, job, company, attempted, result]\n with open(self.filename, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(toWrite)\n\n def get_job_page(self, jobID):\n\n job = 'https://www.linkedin.com/jobs/view/' + str(jobID)\n self.browser.get(job)\n self.job_page = self.load_page(sleep=0.5)\n return self.job_page\n\n def get_easy_apply_button(self):\n try:\n button = self.browser.find_elements_by_xpath(\n '//button[contains(@class, \"jobs-apply\")]/span[1]'\n )\n\n EasyApplyButton = button[0]\n except:\n EasyApplyButton = False\n\n return EasyApplyButton\n\n def send_resume(self):\n def is_present(button_locator):\n return len(self.browser.find_elements(button_locator[0],\n button_locator[1])) > 0\n\n try:\n time.sleep(random.uniform(1.5, 2.5))\n next_locater = (By.CSS_SELECTOR,\n \"button[aria-label='Continue to next step']\")\n review_locater = (By.CSS_SELECTOR,\n \"button[aria-label='Review your application']\")\n submit_locater = (By.CSS_SELECTOR,\n \"button[aria-label='Submit application']\")\n submit_application_locator = (By.CSS_SELECTOR,\n \"button[aria-label='Submit application']\")\n error_locator = (By.CSS_SELECTOR,\n \"p[data-test-form-element-error-message='true']\")\n upload_locator = (By.CSS_SELECTOR, \"input[name='file']\")\n follow_locator = (By.CSS_SELECTOR, \"label[for='follow-company-checkbox']\")\n\n submitted = False\n while True:\n\n # Upload Cover Letter if possible\n if is_present(upload_locator):\n\n input_buttons = self.browser.find_elements(upload_locator[0],\n upload_locator[1])\n for input_button in input_buttons:\n parent = input_button.find_element(By.XPATH, \"..\")\n sibling = parent.find_element(By.XPATH, \"preceding-sibling::*\")\n grandparent = sibling.find_element(By.XPATH, \"..\")\n for key in self.uploads.keys():\n sibling_text = sibling.text\n gparent_text = grandparent.text\n if key.lower() in sibling_text.lower() or key in gparent_text.lower():\n input_button.send_keys(self.uploads[key])\n\n # input_button[0].send_keys(self.cover_letter_loctn)\n time.sleep(random.uniform(4.5, 6.5))\n\n # Click Next or submitt button if possible\n button = None\n buttons = [next_locater, review_locater, follow_locator,\n submit_locater, submit_application_locator]\n for i, button_locator in enumerate(buttons):\n if is_present(button_locator):\n button = self.wait.until(EC.element_to_be_clickable(button_locator))\n\n if is_present(error_locator):\n for element in self.browser.find_elements(error_locator[0],\n error_locator[1]):\n text = element.text\n if \"Please enter a valid answer\" in text:\n button = None\n break\n if button:\n button.click()\n time.sleep(random.uniform(1.5, 2.5))\n if i in (3, 4):\n submitted = True\n if i != 2:\n break\n if button == None:\n log.info(\"Could not complete submission\")\n break\n elif submitted:\n log.info(\"Application Submitted\")\n break\n\n time.sleep(random.uniform(1.5, 2.5))\n\n\n except Exception as e:\n log.info(e)\n log.info(\"cannot apply to this job\")\n raise (e)\n\n return submitted\n\n def load_page(self, sleep=1):\n scroll_page = 0\n while scroll_page < 4000:\n self.browser.execute_script(\"window.scrollTo(0,\" + str(scroll_page) + \" );\")\n scroll_page += 200\n time.sleep(sleep)\n\n if sleep != 1:\n self.browser.execute_script(\"window.scrollTo(0,0);\")\n time.sleep(sleep * 3)\n\n page = BeautifulSoup(self.browser.page_source, \"lxml\")\n return page\n\n def avoid_lock(self):\n x, _ = pyautogui.position()\n pyautogui.moveTo(x + 200, pyautogui.position().y, duration=1.0)\n pyautogui.moveTo(x, pyautogui.position().y, duration=0.5)\n pyautogui.keyDown('ctrl')\n pyautogui.press('esc')\n pyautogui.keyUp('ctrl')\n time.sleep(0.5)\n pyautogui.press('esc')\n\n def next_jobs_page(self, position, location, jobs_per_page):\n self.browser.get(\n \"https://www.linkedin.com/jobs/search/?f_LF=f_AL&keywords=\" +\n position + location + \"&start=\" + str(jobs_per_page))\n self.avoid_lock()\n log.info(\"Lock avoided.\")\n self.load_page()\n return (self.browser, jobs_per_page)\n\n def finish_apply(self):\n self.browser.close()\n\n\nif __name__ == '__main__':\n\n with open(\"config.yaml\", 'r') as stream:\n try:\n parameters = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise exc\n\n assert len(parameters['positions']) > 0\n assert len(parameters['locations']) > 0\n assert parameters['username'] is not None\n assert parameters['password'] is not None\n\n if 'uploads' in parameters.keys() and type(parameters['uploads']) == list:\n raise Exception(\"uploads read from the config file appear to be in list format\" +\n \" while should be dict. Try removing '-' from line containing\" +\n \" filename & path\")\n\n log.info({k: parameters[k] for k in parameters.keys() if k not in ['username', 'password']})\n\n output_filename = [f for f in parameters.get('output_filename', ['output.csv']) if f != None]\n output_filename = output_filename[0] if len(output_filename) > 0 else 'output.csv'\n blacklist = parameters.get('blacklist', [])\n blackListTitles = parameters.get('blackListTitles', [])\n\n uploads = {} if parameters.get('uploads', {}) == None else parameters.get('uploads', {})\n for key in uploads.keys():\n assert uploads[key] != None\n\n bot = EasyApplyBot(parameters['username'],\n parameters['password'],\n uploads=uploads,\n filename=output_filename,\n blacklist=blacklist,\n blackListTitles=blackListTitles\n )\n\n locations = [l for l in parameters['locations'] if l != None]\n positions = [p for p in parameters['positions'] if p != None]\n bot.start_apply(positions, locations)\n"
] | [
[
"pandas.to_datetime",
"pandas.read_csv"
]
] |
empiricalstateofmind/netrd | [
"30652431a050033436232c925844e32ed6c9acca"
] | [
"netrd/distance/frobenius.py"
] | [
"\"\"\"\nfrobenius.py\n------------\n\nFrobenius norm between two adjacency matrices.\n\n\"\"\"\n\nimport numpy as np\nimport networkx as nx\nfrom .base import BaseDistance\n\n\nclass Frobenius(BaseDistance):\n \"\"\"The Frobenius distance between their adjacency matrices.\"\"\"\n\n def dist(self, G1, G2):\n \"\"\"Frobenius distance between two graphs.\n\n If :math:`a_{ij}` and :math:`b_{ij}` are the two adjacency matrices\n we define\n\n .. math::\n d(G1, G2) = \\sqrt{\\sum_{i,j} |a_{ij} - b_{ij}|^2}\n\n\n The results dictionary also stores a 2-tuple of the underlying\n adjacency matrices in the key `'adjacency_matrices'`.\n\n Parameters\n ----------\n G1, G2 (nx.Graph)\n two graphs to compare\n\n Returns\n -------\n float\n the distance between `G1` and `G2`\n\n Notes\n -----\n\n The graphs must have the same number of nodes.\n\n \"\"\"\n adj1 = nx.to_numpy_array(G1)\n adj2 = nx.to_numpy_array(G2)\n dist = np.linalg.norm((adj1 - adj2))\n self.results['dist'] = dist\n self.results['adjacency_matrices'] = adj1, adj2\n return dist\n"
] | [
[
"numpy.linalg.norm"
]
] |
ladyteam/phonopy | [
"455ef61dfa15c01fb6b516461b52f15aefbf92b3"
] | [
"phonopy/gruneisen/core.py"
] | [
"\"\"\"Mode Grueneisen parameter calculation.\"\"\"\n# Copyright (C) 2012 Atsushi Togo\n# All rights reserved.\n#\n# This file is part of phonopy.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of the phonopy project nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom typing import Union\n\nimport numpy as np\n\nfrom phonopy.harmonic.dynamical_matrix import DynamicalMatrix, DynamicalMatrixNAC\nfrom phonopy.phonon.band_structure import estimate_band_connection\nfrom phonopy.phonon.degeneracy import rotate_eigenvectors\n\n\nclass GruneisenBase:\n \"\"\"Base class of mode Grueneisen parameter calculation classes.\"\"\"\n\n def __init__(\n self,\n dynmat: Union[DynamicalMatrix, DynamicalMatrixNAC],\n dynmat_plus: Union[DynamicalMatrix, DynamicalMatrixNAC],\n dynmat_minus: Union[DynamicalMatrix, DynamicalMatrixNAC],\n delta_strain=None,\n qpoints=None,\n is_band_connection=False,\n ):\n \"\"\"Init method.\"\"\"\n self._dynmat = dynmat\n self._dynmat_plus = dynmat_plus\n self._dynmat_minus = dynmat_minus\n if delta_strain is None:\n volume = dynmat.primitive.volume\n volume_plus = dynmat_plus.primitive.volume\n volume_minus = dynmat_minus.primitive.volume\n dV = volume_plus - volume_minus\n self._delta_strain = dV / volume\n else:\n self._delta_strain = delta_strain\n self._is_band_connection = is_band_connection\n self._qpoints = qpoints\n\n self._gruneisen = None\n self._eigenvalues = None\n if qpoints is not None:\n self._set_gruneisen()\n\n def set_qpoints(self, qpoints):\n \"\"\"Set q-points.\"\"\"\n self._qpoints = qpoints\n self._set_gruneisen()\n\n def get_gruneisen(self):\n \"\"\"Return mode Grueneisen parameters.\"\"\"\n return self._gruneisen\n\n def get_eigenvalues(self):\n \"\"\"Return eigenvalues.\"\"\"\n return self._eigenvalues\n\n def get_eigenvectors(self):\n \"\"\"Return eigenvectors.\"\"\"\n return self._eigenvectors\n\n def _set_gruneisen(self):\n if self._is_band_connection:\n self._q_direction = self._qpoints[0] - self._qpoints[-1]\n band_order = range(len(self._dynmat.primitive) * 3)\n prev_eigvecs = None\n\n edDe = [] # <e|dD|e>\n eigvals = []\n eigvecs = []\n for i, q in enumerate(self._qpoints):\n if self._is_band_connection and self._dynmat.is_nac():\n self._dynmat.run(q, q_direction=self._q_direction)\n else:\n self._dynmat.run(q)\n\n dm = self._dynmat.dynamical_matrix\n evals, evecs = np.linalg.eigh(dm)\n evals_at_q = evals.real\n dD = self._get_dD(q, self._dynmat_minus, self._dynmat_plus)\n evecs_at_q, edDe_at_q = rotate_eigenvectors(evals_at_q, evecs, dD)\n\n if self._is_band_connection:\n if prev_eigvecs is not None:\n band_order = estimate_band_connection(\n prev_eigvecs, evecs_at_q, band_order\n )\n eigvals.append(evals_at_q[band_order])\n eigvecs.append(evecs_at_q[:, band_order])\n edDe.append(edDe_at_q[band_order])\n prev_eigvecs = evecs_at_q\n else:\n eigvals.append(evals_at_q)\n eigvecs.append(evecs_at_q)\n edDe.append(edDe_at_q)\n\n edDe = np.array(edDe, dtype=\"double\", order=\"C\")\n self._eigenvalues = np.array(eigvals, dtype=\"double\", order=\"C\")\n itemsize = self._eigenvalues.itemsize\n self._eigenvectors = np.array(\n eigvecs, dtype=(\"c%d\" % (itemsize * 2)), order=\"C\"\n )\n self._gruneisen = -edDe / self._delta_strain / self._eigenvalues / 2\n\n def _get_dD(\n self,\n q,\n d_a: Union[DynamicalMatrix, DynamicalMatrixNAC],\n d_b: Union[DynamicalMatrix, DynamicalMatrixNAC],\n ):\n if self._is_band_connection and d_a.is_nac() and d_b.is_nac():\n d_a.run(q, q_direction=self._q_direction)\n d_b.run(q, q_direction=self._q_direction)\n else:\n d_a.run(q)\n d_b.run(q)\n dm_a = d_a.dynamical_matrix\n dm_b = d_b.dynamical_matrix\n return dm_b - dm_a\n"
] | [
[
"numpy.linalg.eigh",
"numpy.array"
]
] |
connycode89/Scratchy | [
"1463ec71093bcbb3b89d085893a85deb0a221942"
] | [
"kMeans Clustering.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 13 15:39:56 2017\n\n@author: cdonovan\n\"\"\"\n\n# python 3.5\n\nimport numpy as np\n\n# input data here in the form of a 2D numpy array\n# the array should be entirely numeric & have rows as observations & columns as features\n# data = ?\ndata = np.array([[1,1,1],[1,1,2],[8,8,9],[7,8,9]])\n\ndef euclidean(pt1, pt2):\n \"\"\" Euclidean distance between 2 points, pt1 and pt2\n \"\"\"\n diff = pt1-pt2\n square = diff**2\n sqrt = np.sum(square)**0.5\n return sqrt\n\ndef random_centroids(data, k):\n \"\"\" picking random initial centroids\n \"\"\"\n # get the min and max values for each feature in data\n mins = np.min(data,axis=0)\n maxes = np.max(data,axis=0)\n zipper = list(zip(mins,maxes))\n k_centroids = np.array([list(map(lambda x:np.random.uniform(x[0],x[1]),zipper)) for num in range(k)])\n return k_centroids\n \ndef dists_to_centroids(data, centroids):\n \"\"\" generates an array of distances between the data and the k centroids\n A row for each data observation and a column for each centroid\n \"\"\"\n list2 = []\n for num in range(len(centroids)):\n list1 = list(map(lambda x:euclidean(x,centroids[num]),data))\n list2.append(list1)\n dists = np.array(list2).T\n return dists\n\ndef iterate_once(data, prev_clusts, cents):\n list_cents = []\n for num in range(len(cents)):\n clust_num = data[prev_clusts==num]\n if len(clust_num)!=0:\n cent2 = np.mean(clust_num,axis=0)\n list_cents.append(list(cent2))\n else:\n list_cents.append(list(random_centroids(data,1)[0]))\n new_cents = np.array(list_cents)\n return new_cents\n\ndef algorithm(data,k):\n centroids = random_centroids(data,k) \n dists = dists_to_centroids(data,centroids)\n clusts = np.argmin(dists, axis=1)\n while True: \n centroids = iterate_once(data, clusts, centroids)\n dists = dists_to_centroids(data,centroids)\n clusts2 = np.argmin(dists, axis=1)\n if np.all(clusts==clusts2):\n break\n wss = np.sum(np.min(dists_to_centroids(data,centroids), axis=1))\n return clusts2, wss\n\nk = 2\nalgorithm(data,k)\n\n# to add: rest of algorithm\n# plotting\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.argmin",
"numpy.sum",
"numpy.min",
"numpy.mean",
"numpy.random.uniform",
"numpy.all"
]
] |
cardin-higley-lab/CBASS | [
"0d0b58497313027388351feffc79766f815b47b5"
] | [
"python/Pipeline/.ipynb_checkpoints/CLAMS_L1_GetTrough-checkpoint.py"
] | [
"import time\nimport numpy as np\nfrom scipy import fftpack\n\n# The wavelet functions\nimport pywt\nfrom scipy import signal\nfrom scipy.signal import butter, lfilter, hilbert, filtfilt, sosfiltfilt\n\ndef GetTrough(db2LFP, inSampleRate, db1FilterBand, inRefChan, chLabel=None, chDataFormat=None, sOPTION=None):\n '''\n L1 of the bout pipeline: Identifies events in the activity band defined\n by db1FilterBand. Events correspond to the trougths of oscillatory\n activity at the band of interest in a reference channel inRefChan. They\n are represented as the Hilbert transform of the band filtered activity of\n each channel. The representation can either use complex or polar\n coordinates depending on the value of the optional variable chDataFormat.\n\n Input -------------------------------------------------------------------\n\n db2LFP: a (channel x time sample) matrix containing the signal\n of interest - originaly meant to be a recording of the\n Local Field Potential(LFP) but it can be any\n multichannel time series.\n inSampleRate: a positive number describing the sample rate of the\n signal\n db1FilterBand: a (1 x 2) array describing the frequency band of\n interest i.e. [30 80] for 30 to 80 Hz.\n inRefChan: a number specifying a reference channel. Events will be\n aligned to the trought of the band specific activity in\n that channel for trough identification. Default is the\n last channel of db2LFP.\n chLabel (optional) a charactaer array describing the band of\n interest - (i.e. 'gamma' for [30 80Hz])\n chDataFormat: (optional) a character array specifying the format of\n the hilbert transforms output. Can be 'complex' or\n 'polar'. Default is 'complex'.\n\n Output ------------------------------------------------------------------\n\n sTROUGH: a structure containing the following fields:\n -.db1FilterBand an (1 x 2) array describing the frequency\n band of interest i.e. [30 80] for 30 to 80 Hz.\n -.db2Trough a (2 * channel x trough) matrix containing the\n hilbert transform of each channel of sREC.db2LFP filtered\n in the band defined in sTROUGH.db1FilterBand at the trough\n of the filtered signal in the reference channel inRefChan\n -.in1Index the indices of the trough in sREC.db2LFP\n\n checks for the proper number of arguments\n '''\n \n verbose = sOPTION.blVerbose\n \n # Creates a Label if not provided\n if chLabel==None: chLabel = str(db1FilterBand[0]) + '-' + str(db1FilterBand[1]) + 'Hz' \n if chDataFormat==None or not chDataFormat in ['polar', 'complex'] :\n pritn('{} is not a valid method set to UMAP'.format(chDataFormat))\n chDataFormat = 'complex'\n\n\n # start = time.time()\n # Filtering with Butterworth\n [b,a] = butter(2, 2*db1FilterBand / inSampleRate, 'bandpass') #Nth-order digital or analog Butterworth filter and return the filter coefficients.\n # if verbose: \n # print('Window: ', 2 * db1FilterBand / inSampleRate)\n # print('Coefficients: b:{}, \\na:{}'.format(b,a))\n\n # Filter\n if not sOPTION.TransformMETHOD == 'wavelet': # If I am using wavelet, don't do the filtering\n start_filtering = time.time()\n print('Using ', sOPTION.FilterMETHOD)\n if sOPTION.FilterMETHOD == 'lfilter':\n db2LFP_filt = lfilter(b, a, db2LFP).T # 1D filter. This matches: filter(B, A, db2LFP');\n elif sOPTION.FilterMETHOD == 'filtfilt':\n # db2LFP_filt = filtfilt(b, a, db2LFP, method=\"gust\").T # Apply a digital filter forward and backward to a signal.\n db2LFP_filt = 2*filtfilt(b, a, db2LFP.T, padlen=1,axis=0) # 2x to match matlab's output\n if verbose: print('--Time for fitering: {}'.format(time.time()-start_filtering))\n\n # if verbose:\n # print('db2LFP_filt.shape: {}'.format(db2LFP_filt.shape))\n # print(db2LFP_filt[:5,:5])\n \n\n ## Hilbert transform, amplitude and phase\n print('Using ', sOPTION.TransformMETHOD)\n start_transform = time.time()\n if sOPTION.TransformMETHOD == 'hilbert':\n # db2_Hilbert = hilbert(db2LFP_filt, axis=0) #hilbert(db2LFP_filt.T).T # Old (original), try: hilbert(db2LFP_filt, axis=0)\n FastHilbert = lambda x: hilbert(x, fftpack.next_fast_len(len(x)), axis=0)[:len(x)]\n db2_Hilbert = FastHilbert(db2LFP_filt) # Old (original)\n # print('db2_Hilbert.shape: ',db2_Hilbert.shape)\n # print('db2_Hilbert[:10,:10]: ',db2_Hilbert[:10,:10])\n # FastHilbert = lambda x: signal.hilbert(x, fftpack.next_fast_len(len(x)))[:len(x)]\n # db2_Hilbert = FastHilbert(db2LFP_filt.T).T # Old (original)\n # Had to transpose since the Hilbert is computed along the last axis (ie, (15, 5249484)). The second transpose puts it back in the right orientation ((5249484, 15))\n\n elif sOPTION.TransformMETHOD == 'fft2': # Not functional yet \n # print('len(db2LFP_filt): ',len(db2LFP_filt))\n db2_Hilbert = 2*np.fft.fftshift(np.fft.fft2(db2LFP_filt))/len(db2LFP_filt)\n\n elif sOPTION.TransformMETHOD == 'fft': # Not functional yet \n # print('len(db2LFP_filt): ',len(db2LFP_filt))\n db2_Hilbert = 2 * np.fft.fftshift(np.fft.fft(db2LFP_filt))/len(db2LFP_filt)\n\n elif sOPTION.TransformMETHOD == 'wavelet':\n wav = pywt.ContinuousWavelet('cmor1.5-1.0') # Define the wavelet\n # if verbose:\n # print('Central freq: ',pywt.central_frequency(wav, precision=10))\n # print('In Hz: {}Hz '.format(pywt.scale2frequency(wav, 45, precision=8) * inSampleRate))\n\n #Compute the transform\n db2_Hilbert = []\n for idx in range(db2LFP.shape[0]):\n arr, _=pywt.cwt(db2LFP[idx,:],45,wav, sampling_period = 1/inSampleRate)\n db2_Hilbert.append(arr)\n db2_Hilbert = np.concatenate(db2_Hilbert,axis=0).T # Gets it in the usual format\n \n # Compute the amplitude and phase of the transformed signal\n db1_Amp = np.abs(db2_Hilbert)\n db1_Phase = np.angle(db2_Hilbert)\n if verbose: print('--Time for transformation: {}'.format(time.time()-start_transform))\n \n # if verbose:\n # print('db2LFP_filt.T.shape: ',db2LFP_filt.T.shape)\n # print('db2_Hilbert.shape: ',db2_Hilbert.shape)\n # # print('db2_Hilbert[:10,:10]: ',db2_Hilbert[:10,:10])\n # print('db1_Amp[:10,:10]: ',db1_Amp[:10,:10])\n # print('db1_Phase[:10,:10]: ',db1_Phase[:10,:10])\n # print('db1_Amp.shape: ',db1_Amp.shape)\n # print('db1_Phase.shape: ',db1_Phase.shape)\n \n \n ## Finds the indices of troughs on the reference channels\n in1Index = np.where((db1_Phase[:-1, sOPTION.inRefChan] > 0) & (db1_Phase[1:, sOPTION.inRefChan] < 0))[0]\n # if verbose:\n # print('in1Index: ',in1Index)\n # print(len(in1Index))\n \n '''\n Format the troughs so that each row is a motif and col 1:15 is the\n real part and col 16:30 the imaginary part of the hilbert transform\n of the filtered signal\n '''\n # if verbose:\n # print('np.real(db2_Hilbert[bl1RefTrough, :]).shape: ',np.real(db2_Hilbert[in1Index, :]).shape)\n # print('np.imag(db2_Hilbert[bl1RefTrough, :]).shape: ',np.imag(db2_Hilbert[in1Index, :]).shape)\n # print('np.real(db2_Hilbert[bl1RefTrough, :]): ',np.real(db2_Hilbert[in1Index, :]))\n # print('np.imag(db2_Hilbert[bl1RefTrough, :]): ',np.imag(db2_Hilbert[in1Index, :]))\n \n if sOPTION.chDataFormat=='complex':\n db2Trough = np.concatenate((np.real(db2_Hilbert[in1Index, :]), np.imag(db2_Hilbert[in1Index, :])),axis=1);\n else:\n db2Trough = np.concatenate((db1_Amp[in1Index, :], db1_Phase[in1Index, :]), axis=1)\n # if verbose: \n # print('db2Trough.shape: ',db2Trough.shape)\n # print(db2Trough[:10,:10])\n \n\n class sTROUGH:\n pass\n sTROUGH.chLabel = chLabel;\n sTROUGH.db1FilterBand = db1FilterBand;\n sTROUGH.db2Trough = db2Trough;\n sTROUGH.in1Index = in1Index;\n \n # print('Time to compute troughs: {}'.format(time.time()-start))\n \n return sTROUGH"
] | [
[
"numpy.concatenate",
"numpy.angle",
"numpy.fft.fft2",
"scipy.signal.butter",
"numpy.real",
"numpy.where",
"scipy.signal.filtfilt",
"numpy.fft.fft",
"scipy.signal.lfilter",
"numpy.abs",
"numpy.imag"
]
] |
arash94sh/tabnet | [
"c97f4b4e365e2a582caa29136eb7306c1bfe5ab8"
] | [
"pytorch_tabnet/pretraining_utils.py"
] | [
"from torch.utils.data import DataLoader\nfrom pytorch_tabnet.utils import (\n create_sampler,\n PredictDataset,\n)\nfrom sklearn.utils import check_array\n\n\ndef create_dataloaders(\n X_train, eval_set, weights, batch_size, num_workers, drop_last, pin_memory\n):\n \"\"\"\n Create dataloaders with or without subsampling depending on weights and balanced.\n\n Parameters\n ----------\n X_train : np.ndarray\n Training data\n eval_set : list of np.array\n List of eval sets\n weights : either 0, 1, dict or iterable\n if 0 (default) : no weights will be applied\n if 1 : classification only, will balanced class with inverse frequency\n if dict : keys are corresponding class values are sample weights\n if iterable : list or np array must be of length equal to nb elements\n in the training set\n batch_size : int\n how many samples per batch to load\n num_workers : int\n how many subprocesses to use for data loading. 0 means that the data\n will be loaded in the main process\n drop_last : bool\n set to True to drop the last incomplete batch, if the dataset size is not\n divisible by the batch size. If False and the size of dataset is not\n divisible by the batch size, then the last batch will be smaller\n pin_memory : bool\n Whether to pin GPU memory during training\n\n Returns\n -------\n train_dataloader, valid_dataloader : torch.DataLoader, torch.DataLoader\n Training and validation dataloaders\n \"\"\"\n need_shuffle, sampler = create_sampler(weights, X_train)\n\n train_dataloader = DataLoader(\n PredictDataset(X_train),\n batch_size=batch_size,\n sampler=sampler,\n shuffle=need_shuffle,\n num_workers=num_workers,\n drop_last=drop_last,\n pin_memory=pin_memory,\n )\n\n valid_dataloaders = []\n for X in eval_set:\n valid_dataloaders.append(\n DataLoader(\n PredictDataset(X),\n batch_size=batch_size,\n sampler=sampler,\n shuffle=need_shuffle,\n num_workers=num_workers,\n drop_last=drop_last,\n pin_memory=pin_memory,\n )\n )\n\n return train_dataloader, valid_dataloaders\n\n\ndef validate_eval_set(eval_set, eval_name, X_train):\n \"\"\"Check if the shapes of eval_set are compatible with X_train.\n\n Parameters\n ----------\n eval_set : List of numpy array\n The list evaluation set.\n The last one is used for early stopping\n X_train : np.ndarray\n Train owned products\n\n Returns\n -------\n eval_names : list of str\n Validated list of eval_names.\n\n \"\"\"\n eval_names = eval_name or [f\"val_{i}\" for i in range(len(eval_set))]\n assert len(eval_set) == len(\n eval_names\n ), \"eval_set and eval_name have not the same length\"\n\n for set_nb, X in enumerate(eval_set):\n check_array(X)\n msg = (\n f\"Number of columns is different between eval set {set_nb}\"\n + f\"({X.shape[1]}) and X_train ({X_train.shape[1]})\"\n )\n assert X.shape[1] == X_train.shape[1], msg\n return eval_names\n"
] | [
[
"sklearn.utils.check_array"
]
] |
TobiasRoeding/advent-of-code-2021 | [
"3db16d52ad9f4f04ac7f43087f6f504dca41cc43"
] | [
"src/day11.py"
] | [
"import numpy as np\n\n\nclass Day11:\n def __init__(self, input=\"src/input/day11.txt\"):\n self.INPUT = input\n\n def read_input(self):\n with open(self.INPUT, \"r\") as fp:\n lines = fp.readlines()\n lines = [list(line.strip()) for line in lines]\n return np.array(lines).astype(int)\n\n def has_flashable(self, grid):\n return np.any(grid > 9)\n\n def flash(self, grid, row, col):\n adjacent_positions = [\n (row - 1, col - 1), # top left\n (row - 1, col), # top middle\n (row - 1, col + 1), # top right\n (row, col - 1), # left\n (row, col + 1), # right\n (row + 1, col - 1), # bottom left\n (row + 1, col), # bottom middle\n (row + 1, col + 1), # bottom right\n ]\n\n for row, col in adjacent_positions:\n if row >= 0 and row < len(grid) and col >= 0 and col < len(grid[0]):\n if grid[row][col] != -1:\n grid[row][col] += 1\n\n return grid\n\n def step(self, grid):\n flashes = 0\n\n # add 1 to each number\n grid += 1\n\n # let numbers greater than 9 flash\n while self.has_flashable(grid):\n for row in range(len(grid)):\n for col in range(len(grid[0])):\n if grid[row][col] > 9:\n self.flash(grid, row, col)\n flashes += 1\n grid[row][col] = -1\n\n # reset numbers from -1 to 0\n grid = np.where(grid == -1, 0, grid)\n\n return grid, flashes\n\n def part1(self):\n grid = self.read_input()\n\n total_flashes = 0\n for i in range(100):\n grid, flashes = self.step(grid)\n total_flashes += flashes\n\n return total_flashes\n\n def part2(self):\n grid = self.read_input()\n\n step = 0\n flashes = 0\n while flashes != 100:\n grid, flashes = self.step(grid)\n step += 1\n return step\n\n def execute(self):\n print(f\"Solution for part 1: {self.part1()}\")\n print(f\"Solution for part 2: {self.part2()}\")\n\n\nif __name__ == \"__main__\":\n Day11().execute()\n"
] | [
[
"numpy.any",
"numpy.array",
"numpy.where"
]
] |
Yindong-Zhang/myGAT | [
"f69132f21785d3a6bf1ec014890adeb124c89e8d"
] | [
"myDataset.py"
] | [
"from torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.dataloader import default_collate\nfrom collections import deque\nfrom dataset.make_dataset import get_dataset\nimport torch\nimport numpy as np\n\nimport random\n\ndef bfs(start, adj, distance):\n \"\"\"\n\n :param start:\n :param adj: a sparse adjacent matrix\n :param distance:\n :return:\n \"\"\"\n num_nodes = adj.shape[0]\n visited = [False, ] * num_nodes\n q = deque()\n q.append((start, 0))\n visited[start] = True\n node_list = [start, ]\n while(len(q) != 0):\n cur_node, cur_dist = q.pop()\n node_list.append(cur_node)\n if(cur_dist + 1 > distance):\n break\n for next_node in adj[cur_node].nonzero()[1]:\n if not visited[next_node]:\n q.append((next_node, cur_dist + 1))\n visited[next_node] = True\n\n while(len(q) != 0):\n node, dist = q.pop()\n node_list.append(node)\n\n return node_list\n\ndef bfs_sample(start, adj, distance, sample_num):\n \"\"\"\n\n :param start:\n :param adj:\n :param distance:\n :param sample_numbers: should be a list specific number of support node sampled at each distance\n :return:\n \"\"\"\n assert distance == len(sample_num), \"BFS distance should equal to length of sample_nums.\"\n num_nodes = adj.shape[0]\n visited = [False, ] * num_nodes\n nodelist = [start, ]\n curlist = [start, ]\n visited[start] = True\n for i in range(distance):\n nextlist = []\n for cur_node in curlist:\n downstream = []\n next_nodes = adj[cur_node].nonzero()[1]\n for node in next_nodes:\n if not visited[node]:\n downstream.append(node)\n if len(downstream) > sample_num[i]:\n random.shuffle(downstream)\n downstream = downstream[:sample_num[i]]\n\n for node in downstream:\n visited[node] = True\n nextlist.extend(downstream)\n\n\n\n nodelist.extend(nextlist)\n curlist = nextlist\n\n return nodelist\n\n\n\nclass SubGraph(Dataset):\n def __init__(self, adj, features, labels, idx, num_samples):\n \"\"\"\n\n :param adj: suppose adj a sparse adjacent matrix\n :param features: a numpy array in shape (num_nodes, num_features)\n :param labels: a numpy array in shape (num_nodes, 1) if not multi label task.\n \"\"\"\n self.adj = adj\n self.features = features\n self.num_samples = num_samples\n self.num_layers = len(num_samples)\n self.labels = labels\n self.idx = idx\n self.num_nodes = len(idx)\n\n def __getitem__(self, item):\n node = self.idx[item]\n nodelist = bfs_sample(node, self.adj, self.num_layers, self.num_samples)\n min_adj = self.adj[nodelist][:, nodelist]\n min_features = self.features[nodelist]\n min_label = self.labels[node:node + 1]\n\n return (min_adj, min_features), min_label\n\n def __len__(self):\n return self.num_nodes\n\ndef custom_collate(batch):\n \"\"\"\n\n :param batch: a list of tuple ((adj, features), label)\n :return:\n \"\"\"\n max_nodes = max(map(lambda x: x[0][0].shape[0], batch))\n batch = [align(data, max_nodes) for data in batch]\n return default_collate(batch)\n\ndef align(data_tuple, max_nodes):\n \"\"\"\n consider adj as sparse matrix and features as dense ndarray.\n :param data_tuple:\n :param max_nodes:\n :return:\n \"\"\"\n (adj, features), label = data_tuple\n adj.resize(max_nodes, max_nodes)\n adj_fill = adj.toarray()\n features_fill = np.zeros((max_nodes, features.shape[1]), )\n features_fill[:features.shape[0]] = features\n\n # convert numpy/scipy to torch tensor\n adj_fill = torch.FloatTensor(adj_fill)\n features_fill = torch.FloatTensor(features_fill)\n label = torch.LongTensor(label)\n return (adj_fill, features_fill), label\n\nif __name__ == \"__main__\":\n # data_name = \"citeseer\"\n # adj, features, labels = get_dataset(data_name, './data/npz/{}.npz'.format(data_name), standardize=True,\n # train_examples_per_class=40, val_examples_per_class=100)\n # features = features.toarray()\n from utils import load_reddit\n adj, features, labels, idx_train, idx_val, idx_test = load_reddit()\n\n # %%\n subgraph = SubGraph(adj, features, labels, np.arange(adj.shape[0]), 2, [25, 10])\n dataloader = DataLoader(subgraph, batch_size = 2, num_workers= 4, collate_fn= custom_collate)\n # %%\n for i, data in enumerate(dataloader):\n (min_adj, min_feat), label = data\n print(min_adj.shape, min_feat.shape, label.shape)\n if i > 20:\n break"
] | [
[
"numpy.zeros",
"torch.FloatTensor",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"numpy.arange",
"torch.utils.data.dataloader.default_collate"
]
] |
Soooyeon-Kim/Python | [
"e9e7e94e4a5a4ac94ff55347201cb4d24a5bb768"
] | [
"crawling/selenium_naver_movie_review.py"
] | [
"import time, re, csv\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\n\r\ndriver = webdriver.Chrome('C:/Users/sooyeon/Downloads/chromedriver.exe')\r\ndriver.get(\r\n \"https://movie.naver.com/movie/bi/mi/point.naver?code=99702\"\r\n)\r\ntime.sleep(2)\r\n\r\n# 영화 제목\r\ndriver.find_element_by_css_selector(\"#content > div.article > div.mv_info_area > div.mv_info > h3 > a\").text\r\n\r\n# 출연진\r\ndriver.find_element_by_css_selector(\"#content > div.article > div.mv_info_area > div.mv_info > dl > dd:nth-child(6)\").text\r\n\r\ndriver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[4])\r\nresult = []\r\n\r\n# pagination \r\nfor i in range(1,11):\r\n\r\n boxes = driver.find_elements_by_css_selector(\"body > div > div > div.score_result > ul > li\")\r\n\r\n scores = []\r\n reviews= []\r\n ids = []\r\n dates = []\r\n for box in boxes:\r\n scores.append(box.find_element_by_css_selector(\"div.star_score > em\").text)\r\n reviews.append(box.find_element_by_css_selector(\"div.score_reple > p\").text)\r\n ids.append(box.find_element_by_css_selector(\"div.score_reple > dl > dt > em:nth-child(1)\").text)\r\n dates.append(box.find_element_by_css_selector(\"div.score_reple > dl > dt > em:nth-child(2)\").text)\r\n\r\n import pandas as pd\r\n\r\n df = pd.DataFrame({'id':ids,'score':scores,'date':dates,'review':reviews})\r\n # 영화 제목 입력\r\n df['movie_name'] = '007 노 타임 투 다이'\r\n result.append(df)\r\n driver.find_element_by_xpath('''//*[@id=\"pagerTagAnchor{}\"]'''.format(str(i))).click()\r\n\r\n# 데이터 프레임\r\nnaver_m = pd.concat(result).reset_index(drop=True)\r\n# csv 변환\r\nnaver_m.to_csv('tmp_naver_movie.csv',index=False)\r\n\r\n"
] | [
[
"pandas.DataFrame",
"pandas.concat"
]
] |
berkeley-stat159/project-zeta-2 | [
"7c35423fbc1407751e1aea6aac99d5d02a82dfdc"
] | [
"code/correlation_analysis_scripts_sub4.py"
] | [
"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport copy\n\n# maximally responded area, percentile setting:\npercent = 80\n\n# object_list\nobject_list = [\"bottle\", \"cat\", \"chair\", \"face\", \"house\", \"scissors\", \"scrambledpix\", \"shoe\"]\n\n# important path:\nbase_path = os.path.abspath(os.path.dirname(__file__))\nbase_path = os.path.join(base_path, \"..\")\nfigure_path = os.path.join(base_path, \"code\", \"images\", \"\")\nfile_path = os.path.join(base_path, \"code\", \"txt\", \"\")\n\n# color display\nnice_cmap_values = np.loadtxt(file_path + 'actc.txt')\nnice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')\n\n# generalized analysis, choose which subject to focus on:\nsubid = \"sub004\"\n\n# generate list for odd and even run values:\nodd_runs = [\"%s_odd_%s\" % (subid, i) for i in object_list]\neven_runs = [\"%s_even_%s\" % (subid, i) for i in object_list]\n\n# separator:\nseparator = \"-\" * 80\n\n\n############################ Start 2D analysis #############################\nprint (\"Advanced correlation analysis:\")\nprint (separator)\nprint (\"\")\n\n# load even and odd run results\nall_runs = {}\nfor i in odd_runs:\n all_runs[i] = np.loadtxt(file_path + i + \".txt\")\nfor i in even_runs:\n all_runs[i] = np.loadtxt(file_path + i + \".txt\")\n\n# reshape to 3d images\nall_3d = {}\nfor key, txt in all_runs.items():\n all_3d[key] = np.reshape(txt, (-1, 25, 1))\n\n\n# make a copy of the images for making figures:\nall_3d_fig = copy.deepcopy(all_3d)\n\n# save each 3d image as figure\nfor key, fig in all_3d_fig.items():\n fig[fig == 0] = np.nan\n plt.imshow(fig[:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n plt.title(\"%s\" % key)\n plt.savefig(figure_path + \"%s.png\" % key)\n plt.clf()\nplt.close()\n\n# save all 3d images as one compiled figure\nfig = plt.figure(figsize=[8.0, 5])\ni = 1\n# plot odd run results\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(all_3d_fig[\"%s_odd_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\n# plot even run results\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(all_3d_fig[\"%s_even_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)\n# label the figure:\nfig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')\nfig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')\nfig.text(0.05, 0.93, 'Average brain images for odd runs / even runs of %s' % subid, fontsize=16, weight='bold')\n# save figure\nplt.savefig(figure_path + \"odd_even_compile_%s.png\" % subid)\n# close pyplot window\nplt.close()\n\n# report\nprint (\"Average odd run and even run results are saved as images\")\nprint (separator)\n\n\n# Run correlation:\nall_results = []\nprint (\"correlation analysis:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(all_runs[i], all_runs[j])\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n all_results.append(result)\ntable_result = np.array(all_results)\nnp.savetxt(file_path + \"correlation_value_%s.txt\" % subid, np.ravel(table_result))\n\n\n# make table to display the correlation:\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(0.2, 0.85, \"Correlation between odd runs and even runs for %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"correlation_table_%s.png\" % subid)\n\n\n###############################################################################\n# remove the maximally responded area and perform the correlation once again:\n\n# create a copy of data to work on this analysis:\nnew_all_runs = copy.deepcopy(all_runs)\n\n# remove data that is >= 80 percentile of all data\nfor key, result in new_all_runs.items():\n thresh = np.percentile(result, q=80)\n nparray = np.array(result)\n nparray[nparray >= thresh] = 0\n new_all_runs[key] = nparray\n\n# reshape the new_all_runs:\nnew_all_3d = {}\nfor key, txt in new_all_runs.items():\n new_all_3d[key] = np.reshape(txt, (-1, 25, 1))\n\n# make a copy of the images for making figures:\nnew_all_3d_fig = copy.deepcopy(new_all_3d)\n\n# clear the background\nfor key, fig in new_all_3d_fig.items():\n fig[fig == 0] = np.nan\n\n# save all 3d images as one compiled figure\nfig = plt.figure(figsize=[8.0, 5])\ni = 1\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(new_all_3d_fig[\"%s_odd_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(new_all_3d_fig[\"%s_even_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)\n# label the figure:\nfig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')\nfig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')\nfig.text(0.08, 0.93, \"Average brain images after removing 80%% max for %s\" % subid, fontsize=14, weight='bold')\nplt.savefig(figure_path + \"non_max_odd_even_compile_%s.png\" % subid)\nplt.close()\n\n\n# Run correlation:\nnon_max_all_results = []\nprint (\"correlation analysis of non-maximal results:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(new_all_runs[i], new_all_runs[j])\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n non_max_all_results.append(result)\n\nnon_max_table_result = np.array(non_max_all_results)\nnp.savetxt(file_path + \"non_max_correlation_value_%s.txt\" % subid, np.ravel(non_max_table_result))\n# make table to display the correlation:\n\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=non_max_table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(1.0, 0.85, \"Correlation of non_maximal responded brain of %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"non_max_correlation_table_%s.png\" % subid)\nplt.close()\n\n\n# generate bar plot\nind = np.arange(8)\nwidth = 0.35\nfig = plt.figure(figsize=(10, 24))\nfor plot_num in range(8):\n i = plot_num -1\n ax = plt.subplot(8, 1, plot_num, frameon=False)\n bar_plot1 = ax.bar(ind, table_result[i, :], width, color='royalblue')\n bar_plot2 = ax.bar(ind+width, non_max_table_result[i, :], width, color='deepskyblue')\n # add some label:\n ax.set_ylabel(\"Correlation\")\n ax.set_title(\"%s\" % object_list[i])\n ax.set_xticks(ind+width)\n ax.set_xticklabels(object_list)\n ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.axhline(0, color='black', linewidth=2)\n ax.axvline(0, color='black', linewidth=2)\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.95)\nplt.savefig(figure_path + \"%s_2d_total_correlation_bar_both.png\" % subid)\n\n\n# generate individual bar plot\nind = np.arange(8)\nwidth = 0.35\nfig = plt.figure(figsize=(12, 5))\nfor i in range(8):\n ax = plt.subplot(111, frameon=False)\n bar_plot1 = ax.bar(ind, table_result[i, :], width, color='royalblue')\n bar_plot2 = ax.bar(ind+width, non_max_table_result[i, :], width, color='deepskyblue')\n # add some label:\n ax.set_ylabel(\"Correlation\")\n ax.set_title(\"%s\" % object_list[i])\n ax.set_xticks(ind+width)\n ax.set_xticklabels(object_list)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\n ax.axhline(0, color='black', linewidth=2)\n ax.axvline(0, color='black', linewidth=2)\n ax.legend((bar_plot1[0], bar_plot2[0]), ('All responding voxels', 'Excluding max responded voxels'), bbox_to_anchor=(0.7, 1.06), loc=2, borderaxespad=0., fontsize=12)\n plt.savefig(figure_path + \"%s_2d_%s_total_correlation_bar_both.png\" % (subid, object_list[i]))\n plt.clf()\nplt.close()\n\n##################################################################\n\n# subtract the mean and try the correlation:\n\n# create a copy of data to work on this analysis:\nsubtrmean_all_runs = copy.deepcopy(all_runs)\n\n# performing subtraction:\n# even result - odd_scramblepix\n# odd result - even_scramblepix\n\ntotal_mean = np.zeros_like(subtrmean_all_runs[\"%s_odd_face\" % subid])\ntotal_num = 0\nfor key, result in subtrmean_all_runs.items():\n nparray = np.array(result)\n total_mean += nparray\n total_num += 1\ntotal_mean = total_mean/total_num\n\nsubtract_mean_result = {}\nfor key, result in subtrmean_all_runs.items():\n nparray = np.array(result)\n subtract_mean_result[key] = nparray - total_mean\n\n# reshape the subtract_mean_result:\nsubtract_mean_all_3d = {}\nfor key, txt in subtract_mean_result.items():\n subtract_mean_all_3d[key] = np.reshape(txt, (-1, 25, 1))\n\n# make a copy of the images for making figures:\nsubtract_mean_all_3d_fig = copy.deepcopy(subtract_mean_all_3d)\n\n# clear the background\nfor key, fig in subtract_mean_all_3d_fig.items():\n fig[fig == 0] = np.nan\n\n# save all 3d images as one compiled figure\nfig = plt.figure(figsize=[8.0, 5])\ni = 1\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(subtract_mean_all_3d_fig[\"%s_odd_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nfor item in object_list:\n plt.subplot(2, 8, i, xticks=[], yticks=[])\n plt.imshow(subtract_mean_all_3d_fig[\"%s_even_%s\" % (subid, item)][:, :, 0], interpolation=\"nearest\", cmap=nice_cmap)\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.2, bottom=0.05, top=0.9)\n# label the figure:\nfig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')\nfig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')\nfig.text(0.16, 0.93, 'Brain images after subtracting mean for %s' % subid, fontsize=16, weight='bold')\nplt.savefig(figure_path + \"subtract_mean_odd_even_compile_%s.png\" % subid)\nplt.close()\n\n\n# Run correlation:\nsubtract_mean_all_results = []\nprint (\"correlation analysis of subtracted results:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(subtract_mean_result[i], subtract_mean_result[j])\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n subtract_mean_all_results.append(result)\n\nsubtract_mean_table_result = np.array(subtract_mean_all_results)\n\n# make table to display the correlation:\n\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=subtract_mean_table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(0.3, 0.85, \"Correlation of mean subtracted brain images of %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"subtract_mean_correlation_table_%s.png\" % subid)\nplt.close()\n\nprint (separator)\n################################## Start 3D analysis ######################################\nprint (\"Advanced correlation analysis with 3D results:\")\nprint (separator)\nprint (\"\")\n# load even and odd run results\nall_runs_3d = {}\nfor i in odd_runs:\n all_runs_3d[i] = np.loadtxt(file_path + i + \"_3d.txt\")\nfor i in even_runs:\n all_runs_3d[i] = np.loadtxt(file_path + i + \"_3d.txt\")\n\n# reshape to 3d images\nall_3d_for3d = {}\nfor key, txt in all_runs_3d.items():\n all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))\n\n# make a copy of the images for making figures:\nall_3d_for3d_fig = copy.deepcopy(all_3d_for3d)\n\n# clear background\nfor key, fig in all_3d_for3d_fig.items():\n fig[fig == 0] = np.nan\n\n# save all 3d images as one compiled figure for each z\nfig = plt.figure(figsize=[8.0, 20])\ni = 1\n\nfor item in object_list:\n for index in range(5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(all_3d_for3d_fig[\"%s_odd_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"Odd Run %s\" % item, fontsize=8, weight='bold')\n i += 1\n for index in range(5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(all_3d_for3d_fig[\"%s_even_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"Even Run %s\" % item, fontsize=8, weight='bold')\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.835)\n# label the figure:\nfig.text(0.25, 0.85, 'Average brain 3D images for odd runs / even runs of %s' % subid, fontsize=16, weight='bold')\nplt.savefig(figure_path + \"3d_odd_even_compile_%s.png\" % subid)\nplt.close()\n\n# Run correlation:\nall_results_3d = []\nprint (\"3D correlation analysis:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(np.ravel(all_runs_3d[i]), np.ravel(all_runs_3d[j]))\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n all_results_3d.append(result)\n\ntable_result_3d = np.array(all_results_3d)\nnp.savetxt(file_path + \"3d_correlation_value_%s.txt\" % subid, np.ravel(table_result_3d))\n\n# make table to display the correlation:\n\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=table_result_3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(0.2, 0.85, \"Correlation between 3D odd runs and even runs for %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"3d_correlation_table_%s.png\" % subid)\n\n\n#########################################################################################\n\n# remove the maximally responded 3D area and perform the correlation once again:\n\n# create a copy of data to work on this analysis:\nnew_all_runs_for3d = copy.deepcopy(all_runs_3d)\n\n# remove data that is >= 80 percentile of all data\nfor key, result in new_all_runs_for3d.items():\n thresh = np.percentile(result, q=90)\n nparray = np.array(result)\n nparray[nparray >= thresh] = 0\n new_all_runs_for3d[key] = nparray\n\n# reshape the new_all_runs:\nnew_all_3d_for3d = {}\nfor key, txt in new_all_runs_for3d.items():\n new_all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))\n\n# make a copy of the images for making figures:\nnew_all_3d_fig_for3d = copy.deepcopy(new_all_3d_for3d)\n\n# clear the background\nfor key, fig in new_all_3d_fig_for3d.items():\n fig[fig == 0] = np.nan\n\n# save all 3d images as one compiled figure\nfig = plt.figure(figsize=[8.0, 12.0])\ni = 1\nfor item in object_list:\n for index in range(5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(new_all_3d_fig_for3d[\"%s_odd_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nfor item in object_list:\n for index in range(5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(new_all_3d_fig_for3d[\"%s_even_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)\n# label the figure:\nfig.text(0.2, 0.85, \"Average 3D brain images after removing 80%% max for %s\" % subid, weight='bold')\nplt.savefig(figure_path + \"3d_non_max_odd_even_compile_%s.png\" % subid)\nplt.close()\n\n\n# Run correlation:\nnon_max_all_results_for3d = []\nprint (\"correlation analysis of 3D non-maximal results:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(new_all_runs_for3d[i], new_all_runs_for3d[j])\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n non_max_all_results_for3d.append(result)\n\nnon_max_table_result_for3d = np.array(non_max_all_results_for3d)\nnp.savetxt(file_path + \"3d_non_max_correlation_value_%s.txt\" % subid, np.ravel(non_max_table_result_for3d))\n\n# make table to display the correlation:\n\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=non_max_table_result_for3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(0.16, 0.85, \"3D Correlation of non_maximal responded brain of %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"3d_non_max_correlation_table_%s.png\" % subid)\nplt.close()\n\n# generate bar plot\nind = np.arange(8)\nwidth = 0.35\nfig = plt.figure(figsize=(10, 24))\nfor plot_num in range(8):\n i = plot_num -1\n ax = plt.subplot(8, 1, plot_num, frameon=False)\n bar_plot1 = ax.bar(ind, table_result_3d[i, :], width, color='darkgoldenrod')\n bar_plot2 = ax.bar(ind+width, non_max_table_result_for3d[i, :], width, color='tan')\n # add some label:\n ax.set_ylabel(\"Correlation\")\n ax.set_title(\"%s\" % object_list[i])\n ax.set_xticks(ind+width)\n ax.set_xticklabels(object_list)\n ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.axhline(0, color='black', linewidth=2)\n ax.axvline(0, color='black', linewidth=2)\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.95)\nplt.savefig(figure_path + \"%s_3d_total_correlation_bar_both.png\" % subid)\n\n# generate individual bar plot\nind = np.arange(8)\nwidth = 0.35\nfig = plt.figure(figsize=(12, 5))\nfor i in range(8):\n ax = plt.subplot(111, frameon=False)\n bar_plot1 = ax.bar(ind, table_result_3d[i, :], width, color='darkgoldenrod')\n bar_plot2 = ax.bar(ind+width, non_max_table_result_for3d[i, :], width, color='tan')\n # add some label:\n ax.set_ylabel(\"Correlation\")\n ax.set_title(\"%s\" % object_list[i])\n ax.set_xticks(ind+width)\n ax.set_xticklabels(object_list)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\n ax.axhline(0, color='black', linewidth=2)\n ax.axvline(0, color='black', linewidth=2)\n ax.legend((bar_plot1[0], bar_plot2[0]), ('All responding voxels', 'Excluding max responded voxels'), bbox_to_anchor=(0.7, 1.06), loc=2, borderaxespad=0., fontsize=12)\n plt.savefig(figure_path + \"%s_3d_%s_total_correlation_bar_both.png\" % (subid, object_list[i]))\n plt.clf()\nplt.close()\n\n\n############################# subtract mean and do the correlation for 3D ##################################\n# subtract the mean and try the correlation:\n\n# create a copy of data to work on this analysis:\nsubtrmean_all_runs_3d = copy.deepcopy(all_runs_3d)\n\n# performing subtraction:\n# even result - odd_scramblepix\n# odd result - even_scramblepix\n\ntotal_mean_3d = np.zeros_like(subtrmean_all_runs_3d[\"%s_odd_face\" % subid])\ntotal_num_3d = 0\nfor key, result in subtrmean_all_runs_3d.items():\n nparray = np.array(result)\n total_mean_3d += nparray\n total_num_3d += 1\ntotal_mean_3d = total_mean_3d/total_num_3d\n\nsubtract_mean_result_for3d = {}\nfor key, result in subtrmean_all_runs_3d.items():\n nparray = np.array(result)\n subtract_mean_result_for3d[key] = nparray - total_mean_3d\n\n# reshape the subtract_mean_result:\nsubtract_mean_all_3d_for3d = {}\nfor key, txt in subtract_mean_result_for3d.items():\n subtract_mean_all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))\n\n# make a copy of the images for making figures:\nsubtract_mean_all_3d_fig_for3d = copy.deepcopy(subtract_mean_all_3d_for3d)\n\n# clear the background\nfor key, fig in subtract_mean_all_3d_fig_for3d.items():\n fig[fig == 0] = np.nan\n\n\n# save all 3d images as one compiled figure\nfig = plt.figure(figsize=[8.0, 12.0])\ni = 1\nfor item in object_list:\n for index in range(5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(subtract_mean_all_3d_fig_for3d[\"%s_odd_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nfor item in object_list:\n for index in range (5):\n plt.subplot(16, 5, i, xticks=[], yticks=[])\n plt.imshow(subtract_mean_all_3d_fig_for3d[\"%s_even_%s\" % (subid, item)][:, :, index], interpolation=\"nearest\", cmap=nice_cmap)\n if index == 2:\n plt.title(\"%s\" % item, fontsize=8, weight='bold')\n i += 1\nplt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)\n# label the figure:\n# fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')\n# fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')\nfig.text(0.16, 0.93, '3D Brain images after subtracting mean for %s' % subid, fontsize=16, weight='bold')\nplt.savefig(figure_path + \"3d_subtract_mean_odd_even_compile_%s.png\" % subid)\nplt.close()\n\n\n# Run correlation:\nsubtract_mean_all_results_for3d = []\nprint (\"correlation analysis of subtracted results:\")\nfor i in odd_runs:\n result = []\n for j in even_runs:\n corr = np.corrcoef(subtract_mean_result_for3d[i], subtract_mean_result_for3d[j])\n result.append(corr[0, 1])\n print (\"%s vs %s: %.4f\" % (i, j, corr[0, 1]))\n subtract_mean_all_results_for3d.append(result)\n\nsubtract_mean_table_result_for3d = np.array(subtract_mean_all_results_for3d)\n\n# make table to display the correlation:\n\nfig = plt.figure(figsize=(8, 4))\nplt.subplot(111, frameon=False, xticks=[], yticks=[])\ntable = plt.table(cellText=subtract_mean_table_result_for3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')\nplt.subplots_adjust(left=0.3, bottom=0, top=0.95)\nfig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)\nfig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)\nfig.text(0.3, 0.85, \"3D Correlation of mean subtracted brain images of %s\" % subid, weight='bold')\ntable.scale(1.2, 1.2)\nplt.savefig(figure_path + \"3d_subtract_mean_correlation_table_%s.png\" % subid)\nplt.close()\n\nprint (separator)\n\nprint (\"Complete!!!\")"
] | [
[
"numpy.array",
"numpy.zeros_like",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"numpy.percentile",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.arange",
"numpy.ravel",
"numpy.corrcoef",
"matplotlib.pyplot.clf",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplot"
]
] |
jjuraska/slug2slug | [
"3a1629a05ad6204aac07c24c6250b06311bc28b2"
] | [
"data_loader.py"
] | [
"import os\nimport io\nimport random\nimport string\nimport re\nimport json\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nimport nltk\nfrom nltk import FreqDist\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nimport config\n\n\nEMPH_TOKEN = config.EMPH_TOKEN\nCONTRAST_TOKEN = config.CONTRAST_TOKEN\nCONCESSION_TOKEN = config.CONCESSION_TOKEN\n\n\n# TODO: redesign the data loading so as to be object-oriented\ndef load_training_data(data_trainset, data_devset, input_concat=False, generate_vocab=False, skip_if_exist=True):\n \"\"\"Generate source and target files in the required input format for the model training.\n \"\"\"\n\n training_source_file = os.path.join(config.DATA_DIR, 'training_source.txt')\n training_target_file = os.path.join(config.DATA_DIR, 'training_target.txt')\n dev_source_file = os.path.join(config.DATA_DIR, 'dev_source.txt')\n dev_target_file = os.path.join(config.DATA_DIR, 'dev_target.txt')\n\n if skip_if_exist:\n # If there is an existing source and target file, skip their generation\n if os.path.isfile(training_source_file) and \\\n os.path.isfile(training_target_file) and \\\n os.path.isfile(dev_source_file) and \\\n os.path.isfile(dev_target_file):\n print('Found existing input files. Skipping their generation.')\n return\n\n dataset = init_training_data(data_trainset, data_devset)\n dataset_name = dataset['dataset_name']\n x_train, y_train, x_dev, y_dev = dataset['data']\n _, _, slot_sep, val_sep, val_sep_end = dataset['separators']\n\n # Preprocess the MRs and the utterances\n x_train = [preprocess_mr(x, dataset['separators']) for x in x_train]\n x_dev = [preprocess_mr(x, dataset['separators']) for x in x_dev]\n y_train = [preprocess_utterance(y) for y in y_train]\n y_dev = [preprocess_utterance(y) for y in y_dev]\n\n # Produce sequences of extracted words from the meaning representations (MRs) in the trainset\n x_train_seq = []\n for i, mr in enumerate(x_train):\n slot_ctr = 0\n emph_idxs = set()\n # contrast_idxs = set()\n # concession_idxs = set()\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n\n if slot == EMPH_TOKEN:\n emph_idxs.add(slot_ctr)\n # elif slot == CONTRAST_TOKEN:\n # contrast_idxs.add(slot_ctr)\n # elif slot == CONCESSION_TOKEN:\n # concession_idxs.add(slot_ctr)\n else:\n mr_dict[slot] = value\n slot_ctr += 1\n\n # Delexicalize the MR and the utterance\n y_train[i] = delex_sample(mr_dict, y_train[i], dataset=dataset_name, input_concat=input_concat)\n\n slot_ctr = 0\n\n # Convert the dictionary to a list\n x_train_seq.append([])\n for key, val in mr_dict.items():\n # Insert the emphasis token where appropriate\n if slot_ctr in emph_idxs:\n x_train_seq[i].append(EMPH_TOKEN)\n # Insert the contrast token where appropriate\n # if slot_ctr in contrast_idxs:\n # x_train_seq[i].append(CONTRAST_TOKEN)\n # # Insert the concession token where appropriate\n # if slot_ctr in concession_idxs:\n # x_train_seq[i].append(CONCESSION_TOKEN)\n\n if len(val) > 0:\n x_train_seq[i].extend([key] + val.split())\n else:\n x_train_seq[i].append(key)\n\n slot_ctr += 1\n\n if input_concat:\n # Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating\n x_train_seq[i].append('<STOP>')\n\n # Produce sequences of extracted words from the meaning representations (MRs) in the devset\n x_dev_seq = []\n for i, mr in enumerate(x_dev):\n slot_ctr = 0\n emph_idxs = set()\n # contrast_idxs = set()\n # concession_idxs = set()\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n\n if slot == EMPH_TOKEN:\n emph_idxs.add(slot_ctr)\n # elif slot == CONTRAST_TOKEN:\n # contrast_idxs.add(slot_ctr)\n # elif slot == CONCESSION_TOKEN:\n # concession_idxs.add(slot_ctr)\n else:\n mr_dict[slot] = value\n slot_ctr += 1\n\n # Delexicalize the MR and the utterance\n y_dev[i] = delex_sample(mr_dict, y_dev[i], dataset=dataset_name, input_concat=input_concat)\n\n slot_ctr = 0\n\n # Convert the dictionary to a list\n x_dev_seq.append([])\n for key, val in mr_dict.items():\n # Insert the emphasis token where appropriate\n if slot_ctr in emph_idxs:\n x_dev_seq[i].append(EMPH_TOKEN)\n # Insert the contrast token where appropriate\n # if slot_ctr in contrast_idxs:\n # x_dev_seq[i].append(CONTRAST_TOKEN)\n # # Insert the concession token where appropriate\n # if slot_ctr in concession_idxs:\n # x_dev_seq[i].append(CONCESSION_TOKEN)\n\n if len(val) > 0:\n x_dev_seq[i].extend([key] + val.split())\n else:\n x_dev_seq[i].append(key)\n\n slot_ctr += 1\n\n if input_concat:\n # Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating\n x_dev_seq[i].append('<STOP>')\n\n y_train_seq = [word_tokenize(y) for y in y_train]\n y_dev_seq = [word_tokenize(y) for y in y_dev]\n\n # Generate a vocabulary file if necessary\n if generate_vocab:\n generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq),\n vocab_filename='vocab.lang_gen.tokens')\n # generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq),\n # vocab_filename='vocab.lang_gen_multi_vocab.source')\n # generate_vocab_file(np.concatenate(y_train_seq + y_dev_seq),\n # vocab_filename='vocab.lang_gen_multi_vocab.target')\n\n with io.open(training_source_file, 'w', encoding='utf8') as f_x_train:\n for line in x_train_seq:\n f_x_train.write('{}\\n'.format(' '.join(line)))\n\n with io.open(training_target_file, 'w', encoding='utf8') as f_y_train:\n for line in y_train:\n f_y_train.write(line + '\\n')\n\n with io.open(dev_source_file, 'w', encoding='utf8') as f_x_dev:\n for line in x_dev_seq:\n f_x_dev.write('{}\\n'.format(' '.join(line)))\n\n with io.open(dev_target_file, 'w', encoding='utf8') as f_y_dev:\n for line in y_dev:\n f_y_dev.write(line + '\\n')\n\n return np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq).flatten()\n\n\ndef load_test_data(data_testset, input_concat=False):\n \"\"\"Generate source and target files in the required input format for the model testing.\n \"\"\"\n\n test_source_file = os.path.join(config.DATA_DIR, 'test_source.txt')\n test_source_dict_file = os.path.join(config.DATA_DIR, 'test_source_dict.json')\n test_target_file = os.path.join(config.DATA_DIR, 'test_target.txt')\n test_reference_file = os.path.join(config.METRICS_DIR, 'test_references.txt')\n\n dataset = init_test_data(data_testset)\n dataset_name = dataset['dataset_name']\n x_test, y_test = dataset['data']\n _, _, slot_sep, val_sep, val_sep_end = dataset['separators']\n\n # Preprocess the MRs\n x_test = [preprocess_mr(x, dataset['separators']) for x in x_test]\n\n # Produce sequences of extracted words from the meaning representations (MRs) in the testset\n x_test_seq = []\n x_test_dict = []\n for i, mr in enumerate(x_test):\n slot_ctr = 0\n emph_idxs = set()\n # contrast_idxs = set()\n # concession_idxs = set()\n mr_dict = OrderedDict()\n mr_dict_cased = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n\n if slot == EMPH_TOKEN:\n emph_idxs.add(slot_ctr)\n # elif slot == CONTRAST_TOKEN:\n # contrast_idxs.add(slot_ctr)\n # elif slot == CONCESSION_TOKEN:\n # concession_idxs.add(slot_ctr)\n else:\n mr_dict[slot] = value\n mr_dict_cased[slot] = value_orig\n slot_ctr += 1\n\n # Build an MR dictionary with original values\n x_test_dict.append(mr_dict_cased)\n\n # Delexicalize the MR\n delex_sample(mr_dict, dataset=dataset_name, mr_only=True, input_concat=input_concat)\n\n slot_ctr = 0\n\n # Convert the dictionary to a list\n x_test_seq.append([])\n for key, val in mr_dict.items():\n # Insert the emphasis token where appropriate\n if slot_ctr in emph_idxs:\n x_test_seq[i].append(EMPH_TOKEN)\n # Insert the contrast token where appropriate\n # if slot_ctr in contrast_idxs:\n # x_test_seq[i].append(CONTRAST_TOKEN)\n # # Insert the concession token where appropriate\n # if slot_ctr in concession_idxs:\n # x_test_seq[i].append(CONCESSION_TOKEN)\n\n if len(val) > 0:\n x_test_seq[i].extend([key] + val.split())\n else:\n x_test_seq[i].append(key)\n\n slot_ctr += 1\n\n if input_concat:\n # Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating\n x_test_seq[i].append('<STOP>')\n\n with io.open(test_source_file, 'w', encoding='utf8') as f_x_test:\n for line in x_test_seq:\n f_x_test.write('{}\\n'.format(' '.join(line)))\n\n with io.open(test_source_dict_file, 'w', encoding='utf8') as f_x_test_dict:\n json.dump(x_test_dict, f_x_test_dict)\n\n if len(y_test) > 0:\n with io.open(test_target_file, 'w', encoding='utf8') as f_y_test:\n for line in y_test:\n f_y_test.write(line + '\\n')\n\n # Reference file for calculating metrics for test predictions\n with io.open(test_reference_file, 'w', encoding='utf8') as f_y_test:\n for i, line in enumerate(y_test):\n if i > 0 and x_test[i] != x_test[i - 1]:\n f_y_test.write('\\n')\n f_y_test.write(line + '\\n')\n\n\ndef generate_vocab_file(token_sequences, vocab_filename, vocab_size=10000):\n vocab_file = os.path.join(config.DATA_DIR, vocab_filename)\n\n distr = FreqDist(token_sequences)\n vocab = distr.most_common(min(len(distr), vocab_size - 3)) # cap the vocabulary size\n\n vocab_with_reserved_tokens = ['<pad>', '<EOS>'] + list(map(lambda tup: tup[0], vocab)) + ['UNK']\n\n with io.open(vocab_file, 'w', encoding='utf8') as f_vocab:\n for token in vocab_with_reserved_tokens:\n f_vocab.write('{}\\n'.format(token))\n\n\ndef get_vocabulary(token_sequences, vocab_size=10000):\n distr = FreqDist(token_sequences)\n vocab = distr.most_common(min(len(distr), vocab_size)) # cap the vocabulary size\n\n vocab_set = set(map(lambda tup: tup[0], vocab))\n\n return vocab_set\n\n\n# TODO: generalize and utilize in the loading functions\ndef tokenize_mr(mr):\n \"\"\"Produces a (delexicalized) sequence of tokens from the input MR.\n Method used in the client to preprocess a single MR that is sent to the service for utterance generation.\n \"\"\"\n\n slot_sep = ','\n val_sep = '['\n val_sep_end = ']'\n \n mr_seq = []\n slot_ctr = 0\n emph_idxs = set()\n mr_dict = OrderedDict()\n mr_dict_cased = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n\n if slot == EMPH_TOKEN:\n emph_idxs.add(slot_ctr)\n else:\n mr_dict[slot] = value\n mr_dict_cased[slot] = value_orig\n slot_ctr += 1\n\n # Delexicalize the MR\n delex_sample(mr_dict, mr_only=True)\n\n slot_ctr = 0\n\n # Convert the dictionary to a list\n for key, val in mr_dict.items():\n # Insert the emphasis token where appropriate\n if slot_ctr in emph_idxs:\n mr_seq.append(EMPH_TOKEN)\n\n if len(val) > 0:\n mr_seq.extend([key] + val.split())\n else:\n mr_seq.append(key)\n\n slot_ctr += 1\n\n return mr_seq, mr_dict_cased\n\n\ndef load_training_data_for_eval(data_trainset, data_model_outputs_train, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):\n dataset_name = ''\n slot_sep = ''\n val_sep = ''\n val_sep_end = None\n\n if '/rest_e2e/' in data_trainset or '\\\\rest_e2e\\\\' in data_trainset:\n x_train, y_train_1 = read_rest_e2e_dataset_train(data_trainset)\n y_train_2 = read_predictions(data_model_outputs_train)\n dataset_name = 'rest_e2e'\n slot_sep = ','\n val_sep = '['\n val_sep_end = ']'\n elif '/tv/' in data_trainset or '\\\\tv\\\\' in data_trainset:\n x_train, y_train_1, y_train_2 = read_tv_dataset_train(data_trainset)\n if data_model_outputs_train is not None:\n y_train_2 = read_predictions(data_model_outputs_train)\n dataset_name = 'tv'\n slot_sep = ';'\n val_sep = '='\n elif '/laptop/' in data_trainset or '\\\\laptop\\\\' in data_trainset:\n x_train, y_train_1, y_train_2 = read_laptop_dataset_train(data_trainset)\n if data_model_outputs_train is not None:\n y_train_2 = read_predictions(data_model_outputs_train)\n dataset_name = 'laptop'\n slot_sep = ';'\n val_sep = '='\n else:\n raise FileNotFoundError\n\n # parse the utterances into lists of words\n y_train_1 = [preprocess_utterance(y) for y in y_train_1]\n y_train_2 = [preprocess_utterance(y) for y in y_train_2]\n \n\n # produce sequences of extracted words from the meaning representations (MRs) in the trainset\n x_train_seq = []\n for i, mr in enumerate(x_train):\n mr_dict = OrderedDict()\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value\n\n if delex == True:\n # delexicalize the MR and the utterance\n y_train_1[i] = delex_sample(mr_dict, y_train_1[i], dataset=dataset_name, utterance_only=True)\n y_train_2[i] = delex_sample(mr_dict, y_train_2[i], dataset=dataset_name)\n\n # convert the dictionary to a list\n x_train_seq.append([])\n for key, val in mr_dict.items():\n if len(val) > 0:\n x_train_seq[i].extend([key, val])\n else:\n x_train_seq[i].append(key)\n\n\n # create source vocabulary\n if os.path.isfile('data/eval_vocab_source.json'):\n with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:\n x_vocab = json.load(f_x_vocab)\n else:\n x_distr = FreqDist([x_token for x in x_train_seq for x_token in x])\n x_vocab = x_distr.most_common(min(len(x_distr), vocab_size - 2)) # cap the vocabulary size\n with io.open('data/eval_vocab_source.json', 'w', encoding='utf8') as f_x_vocab:\n json.dump(x_vocab, f_x_vocab, ensure_ascii=False)\n\n x_idx2word = [word[0] for word in x_vocab]\n x_idx2word.insert(0, '<PADDING>')\n x_idx2word.append('<NA>')\n x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}\n\n # create target vocabulary\n if os.path.isfile('data/eval_vocab_target.json'):\n with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:\n y_vocab = json.load(f_y_vocab)\n else:\n y_distr = FreqDist([y_token for y in y_train_1 for y_token in y] + [y_token for y in y_train_2 for y_token in y])\n y_vocab = y_distr.most_common(min(len(y_distr), vocab_size - 2)) # cap the vocabulary size\n with io.open('data/eval_vocab_target.json', 'w', encoding='utf8') as f_y_vocab:\n json.dump(y_vocab, f_y_vocab, ensure_ascii=False)\n\n y_idx2word = [word[0] for word in y_vocab]\n y_idx2word.insert(0, '<PADDING>')\n y_idx2word.append('<NA>')\n y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}\n\n\n # produce sequences of indexes from the MRs in the training set\n x_train_enc = token_seq_to_idx_seq(x_train_seq, x_word2idx, max_input_seq_len)\n\n # produce sequences of indexes from the utterances in the training set\n y_train_1_enc = token_seq_to_idx_seq(y_train_1, y_word2idx, max_output_seq_len)\n\n # produce sequences of indexes from the utterances in the training set\n y_train_2_enc = token_seq_to_idx_seq(y_train_2, y_word2idx, max_output_seq_len)\n\n # produce the list of the target labels in the training set\n labels_train = np.concatenate((np.ones(len(y_train_1_enc)), np.zeros(len(y_train_2_enc))))\n\n\n return (np.concatenate((np.array(x_train_enc), np.array(x_train_enc))),\n np.concatenate((np.array(y_train_1_enc), np.array(y_train_2_enc))),\n labels_train)\n\n\ndef load_dev_data_for_eval(data_devset, data_model_outputs_dev, vocab_size, max_input_seq_len, max_output_seq_len, delex=True):\n dataset_name = ''\n slot_sep = ''\n val_sep = ''\n val_sep_end = None\n\n if '/rest_e2e/' in data_devset or '\\\\rest_e2e\\\\' in data_devset:\n x_dev, y_dev_1 = read_rest_e2e_dataset_dev(data_devset)\n y_dev_2 = read_predictions(data_model_outputs_dev)\n dataset_name = 'rest_e2e'\n slot_sep = ','\n val_sep = '['\n val_sep_end = ']'\n elif '/tv/' in data_devset or '\\\\tv\\\\' in data_devset:\n x_dev, y_dev_1, y_dev_2 = read_tv_dataset_dev(data_devset)\n if data_model_outputs_dev is not None:\n y_dev_2 = read_predictions(data_model_outputs_dev)\n dataset_name = 'tv'\n slot_sep = ';'\n val_sep = '='\n elif '/laptop/' in data_devset or '\\\\laptop\\\\' in data_devset:\n x_dev, y_dev_1, y_dev_2 = read_laptop_dataset_dev(data_devset)\n if data_model_outputs_dev is not None:\n y_dev_2 = read_predictions(data_model_outputs_dev)\n dataset_name = 'laptop'\n slot_sep = ';'\n val_sep = '='\n else:\n raise FileNotFoundError\n\n # parse the utterances into lists of words\n y_dev_1 = [preprocess_utterance(y) for y in y_dev_1]\n y_dev_2 = [preprocess_utterance(y) for y in y_dev_2]\n \n\n # produce sequences of extracted words from the meaning representations (MRs) in the devset\n x_dev_seq = []\n for i, mr in enumerate(x_dev):\n mr_dict = OrderedDict()\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value\n \n if delex == True:\n # delexicalize the MR and the utterance\n y_dev_1[i] = delex_sample(mr_dict, y_dev_1[i], dataset=dataset_name, utterance_only=True)\n y_dev_2[i] = delex_sample(mr_dict, y_dev_2[i], dataset=dataset_name)\n\n # convert the dictionary to a list\n x_dev_seq.append([])\n for key, val in mr_dict.items():\n if len(val) > 0:\n x_dev_seq[i].extend([key, val])\n else:\n x_dev_seq[i].append(key)\n\n\n # load the source vocabulary\n with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:\n x_vocab = json.load(f_x_vocab)\n\n x_idx2word = [word[0] for word in x_vocab]\n x_idx2word.insert(0, '<PADDING>')\n x_idx2word.append('<NA>')\n x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}\n\n # load the target vocabulary\n with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:\n y_vocab = json.load(f_y_vocab)\n\n y_idx2word = [word[0] for word in y_vocab]\n y_idx2word.insert(0, '<PADDING>')\n y_idx2word.append('<NA>')\n y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}\n \n\n # produce sequences of indexes from the MRs in the devset\n x_dev_enc = token_seq_to_idx_seq(x_dev_seq, x_word2idx, max_input_seq_len)\n\n # produce sequences of indexes from the utterances in the devset\n y_dev_1_enc = token_seq_to_idx_seq(y_dev_1, y_word2idx, max_output_seq_len)\n\n # produce sequences of indexes from the utterances in the devset\n y_dev_2_enc = token_seq_to_idx_seq(y_dev_2, y_word2idx, max_output_seq_len)\n\n # produce the list of the target labels in the devset\n labels_dev = np.concatenate((np.ones(len(y_dev_1_enc)), np.zeros(len(y_dev_2_enc))))\n\n\n return (np.concatenate((np.array(x_dev_enc), np.array(x_dev_enc))),\n np.concatenate((np.array(y_dev_1_enc), np.array(y_dev_2_enc))),\n labels_dev)\n\n\ndef load_test_data_for_eval(data_testset, data_model_outputs_test, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):\n dataset_name = ''\n slot_sep = ''\n val_sep = ''\n val_sep_end = None\n\n if '/rest_e2e/' in data_testset or '\\\\rest_e2e\\\\' in data_testset:\n x_test, _ = read_rest_e2e_dataset_test(data_testset)\n y_test = read_predictions(data_model_outputs_test)\n dataset_name = 'rest_e2e'\n slot_sep = ','\n val_sep = '['\n val_sep_end = ']'\n elif '/tv/' in data_testset or '\\\\tv\\\\' in data_testset:\n x_test, _, y_test = read_tv_dataset_test(data_testset)\n if data_model_outputs_test is not None:\n y_test = read_predictions(data_model_outputs_test)\n dataset_name = 'tv'\n slot_sep = ';'\n val_sep = '='\n elif '/laptop/' in data_testset or '\\\\laptop\\\\' in data_testset:\n x_test, _, y_test = read_laptop_dataset_test(data_testset)\n if data_model_outputs_test is not None:\n y_test = read_predictions(data_model_outputs_test)\n dataset_name = 'laptop'\n slot_sep = ';'\n val_sep = '='\n else:\n raise FileNotFoundError\n\n # parse the utterances into lists of words\n y_test = [preprocess_utterance(y) for y in y_test]\n #y_test_1 = [preprocess_utterance(y) for y in y_test_1]\n #y_test_2 = [preprocess_utterance(y) for y in y_test_2]\n \n\n # produce sequences of extracted words from the meaning representations (MRs) in the testset\n x_test_seq = []\n for i, mr in enumerate(x_test):\n mr_dict = OrderedDict()\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value\n\n if delex == True:\n # delexicalize the MR and the utterance\n y_test[i] = delex_sample(mr_dict, y_test[i], dataset=dataset_name)\n #y_test_1[i] = delex_sample(mr_dict, y_test_1[i], dataset=dataset_name, utterance_only=True)\n #y_test_2[i] = delex_sample(mr_dict, y_test_2[i], dataset=dataset_name)\n\n # convert the dictionary to a list\n x_test_seq.append([])\n for key, val in mr_dict.items():\n if len(val) > 0:\n x_test_seq[i].extend([key, val])\n else:\n x_test_seq[i].append(key)\n\n\n # load the source vocabulary\n with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:\n x_vocab = json.load(f_x_vocab)\n\n x_idx2word = [word[0] for word in x_vocab]\n x_idx2word.insert(0, '<PADDING>')\n x_idx2word.append('<NA>')\n x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}\n\n # load the target vocabulary\n with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:\n y_vocab = json.load(f_y_vocab)\n\n y_idx2word = [word[0] for word in y_vocab]\n y_idx2word.insert(0, '<PADDING>')\n y_idx2word.append('<NA>')\n y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}\n\n\n # produce sequences of indexes from the MRs in the test set\n x_test_enc = token_seq_to_idx_seq(x_test_seq, x_word2idx, max_input_seq_len)\n\n # produce sequences of indexes from the utterances in the test set\n y_test_enc = token_seq_to_idx_seq(y_test, y_word2idx, max_output_seq_len)\n #y_test_1_enc = token_seq_to_idx_seq(y_test_1, y_word2idx, max_output_seq_len)\n #y_test_2_enc = token_seq_to_idx_seq(y_test_2, y_word2idx, max_output_seq_len)\n\n # produce the list of the target labels in the test set\n labels_test = np.ones(len(y_test_enc))\n #labels_test = np.concatenate((np.ones(len(y_test_1_enc)), np.zeros(len(y_test_2_enc))))\n\n\n return (np.array(x_test_enc),\n np.array(y_test_enc),\n labels_test,\n x_idx2word,\n y_idx2word)\n\n #return (np.concatenate((np.array(x_test_enc), np.array(x_test_enc))),\n # np.concatenate((np.array(y_test_1_enc), np.array(y_test_2_enc))),\n # labels_test,\n # x_idx2word,\n # y_idx2word)\n\n\n# ---- AUXILIARY FUNCTIONS ----\n\n\ndef init_training_data(data_trainset, data_devset):\n if 'rest_e2e' in data_trainset and 'rest_e2e' in data_devset:\n x_train, y_train = read_rest_e2e_dataset_train(data_trainset)\n x_dev, y_dev = read_rest_e2e_dataset_dev(data_devset)\n dataset_name = 'rest_e2e'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ', '\n val_sep = '['\n val_sep_end = ']'\n elif 'video_game' in data_trainset and 'video_game' in data_devset:\n x_train, y_train = read_video_game_dataset_train(data_trainset)\n x_dev, y_dev = read_video_game_dataset_dev(data_devset)\n dataset_name = 'video_game'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ', '\n val_sep = '['\n val_sep_end = ']'\n elif 'tv' in data_trainset and 'tv' in data_devset:\n x_train, y_train, _ = read_tv_dataset_train(data_trainset)\n x_dev, y_dev, _ = read_tv_dataset_dev(data_devset)\n dataset_name = 'tv'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n elif 'laptop' in data_trainset and 'laptop' in data_devset:\n x_train, y_train, _ = read_laptop_dataset_train(data_trainset)\n x_dev, y_dev, _ = read_laptop_dataset_dev(data_devset)\n dataset_name = 'laptop'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n elif 'hotel' in data_trainset and 'hotel' in data_devset:\n x_train, y_train, _ = read_hotel_dataset_train(data_trainset)\n x_dev, y_dev, _ = read_hotel_dataset_dev(data_devset)\n dataset_name = 'hotel'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n else:\n raise ValueError('Unexpected file name or path: {0}, {1}'.format(data_trainset, data_devset))\n\n return {\n 'dataset_name': dataset_name,\n 'data': (x_train, y_train, x_dev, y_dev),\n 'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)\n }\n\n\ndef init_test_data(data_testset):\n if 'rest_e2e' in data_testset:\n x_test, y_test = read_rest_e2e_dataset_test(data_testset)\n dataset_name = 'rest_e2e'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ', '\n val_sep = '['\n val_sep_end = ']'\n elif 'video_game' in data_testset:\n x_test, y_test = read_video_game_dataset_test(data_testset)\n dataset_name = 'video_game'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ', '\n val_sep = '['\n val_sep_end = ']'\n elif 'tv' in data_testset:\n x_test, y_test, _ = read_tv_dataset_test(data_testset)\n dataset_name = 'tv'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n elif 'laptop' in data_testset:\n x_test, y_test, _ = read_laptop_dataset_test(data_testset)\n dataset_name = 'laptop'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n elif 'hotel' in data_testset:\n x_test, y_test, _ = read_hotel_dataset_test(data_testset)\n dataset_name = 'hotel'\n da_sep = '('\n da_sep_end = ')'\n slot_sep = ';'\n val_sep = '='\n val_sep_end = None\n else:\n raise ValueError('Unexpected file name or path: {0}'.format(data_testset))\n\n return {\n 'dataset_name': dataset_name,\n 'data': (x_test, y_test),\n 'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)\n }\n\n\ndef read_rest_e2e_dataset_train(data_trainset):\n # read the training data from file\n df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_train = df_train.mr.tolist()\n y_train = df_train.ref.tolist()\n\n return x_train, y_train\n\n\ndef read_rest_e2e_dataset_dev(data_devset):\n # read the development data from file\n df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_dev = df_dev.mr.tolist()\n y_dev = df_dev.ref.tolist()\n\n return x_dev, y_dev\n\n\ndef read_rest_e2e_dataset_test(data_testset):\n # read the test data from file\n df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_test = df_test.iloc[:, 0].tolist()\n y_test = []\n if df_test.shape[1] > 1:\n y_test = df_test.iloc[:, 1].tolist()\n\n return x_test, y_test\n\n\ndef read_video_game_dataset_train(data_trainset):\n # read the training data from file\n df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_train = df_train.mr.tolist()\n y_train = df_train.ref.tolist()\n\n return x_train, y_train\n\n\ndef read_video_game_dataset_dev(data_devset):\n # read the development data from file\n df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_dev = df_dev.mr.tolist()\n y_dev = df_dev.ref.tolist()\n\n return x_dev, y_dev\n\n\ndef read_video_game_dataset_test(data_testset):\n # read the test data from file\n df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']\n x_test = df_test.iloc[:, 0].tolist()\n y_test = []\n if df_test.shape[1] > 1:\n y_test = df_test.iloc[:, 1].tolist()\n\n return x_test, y_test\n\n\ndef read_tv_dataset_train(path_to_trainset):\n with io.open(path_to_trainset, encoding='utf8') as f_trainset:\n # Skip the comment block at the beginning of the file\n f_trainset, _ = skip_comment_block(f_trainset, '#')\n\n # read the training data from file\n df_train = pd.read_json(f_trainset, encoding='utf8')\n\n x_train = df_train.iloc[:, 0].tolist()\n y_train = df_train.iloc[:, 1].tolist()\n y_train_alt = df_train.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_train):\n x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n \n # convert plural nouns to \"[noun] -s\" or \"[noun] -es\" form\n for i, utt in enumerate(y_train):\n y_train[i] = replace_plural_nouns(utt)\n for i, utt in enumerate(y_train_alt):\n y_train_alt[i] = replace_plural_nouns(utt)\n \n return x_train, y_train, y_train_alt\n\n\ndef read_tv_dataset_dev(path_to_devset):\n with io.open(path_to_devset, encoding='utf8') as f_devset:\n # Skip the comment block at the beginning of the file\n f_devset, _ = skip_comment_block(f_devset, '#')\n\n # read the development data from file\n df_dev = pd.read_json(f_devset, encoding='utf8')\n\n x_dev = df_dev.iloc[:, 0].tolist()\n y_dev = df_dev.iloc[:, 1].tolist()\n y_dev_alt = df_dev.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_dev):\n x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n \n # convert plural nouns to \"[noun] -s\" or \"[noun] -es\" form\n for i, utt in enumerate(y_dev):\n y_dev[i] = replace_plural_nouns(utt)\n for i, utt in enumerate(y_dev_alt):\n y_dev_alt[i] = replace_plural_nouns(utt)\n\n return x_dev, y_dev, y_dev_alt\n\n\ndef read_tv_dataset_test(path_to_testset):\n with io.open(path_to_testset, encoding='utf8') as f_testset:\n # Skip the comment block at the beginning of the file\n f_testset, _ = skip_comment_block(f_testset, '#')\n\n # read the test data from file\n df_test = pd.read_json(f_testset, encoding='utf8')\n\n x_test = df_test.iloc[:, 0].tolist()\n y_test = df_test.iloc[:, 1].tolist()\n y_test_alt = df_test.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_test):\n x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_test, y_test, y_test_alt\n\n\ndef read_laptop_dataset_train(path_to_trainset):\n with io.open(path_to_trainset, encoding='utf8') as f_trainset:\n # Skip the comment block at the beginning of the file\n f_trainset, _ = skip_comment_block(f_trainset, '#')\n\n # read the training data from file\n df_train = pd.read_json(f_trainset, encoding='utf8')\n\n x_train = df_train.iloc[:, 0].tolist()\n y_train = df_train.iloc[:, 1].tolist()\n y_train_alt = df_train.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_train):\n x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_train, y_train, y_train_alt\n\n\ndef read_laptop_dataset_dev(path_to_devset):\n with io.open(path_to_devset, encoding='utf8') as f_devset:\n # Skip the comment block at the beginning of the file\n f_devset, _ = skip_comment_block(f_devset, '#')\n\n # read the development data from file\n df_dev = pd.read_json(f_devset, encoding='utf8')\n\n x_dev = df_dev.iloc[:, 0].tolist()\n y_dev = df_dev.iloc[:, 1].tolist()\n y_dev_alt = df_dev.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_dev):\n x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_dev, y_dev, y_dev_alt\n\n\ndef read_laptop_dataset_test(path_to_testset):\n with io.open(path_to_testset, encoding='utf8') as f_testset:\n # Skip the comment block at the beginning of the file\n f_testset, _ = skip_comment_block(f_testset, '#')\n\n # read the test data from file\n df_test = pd.read_json(f_testset, encoding='utf8')\n\n x_test = df_test.iloc[:, 0].tolist()\n y_test = df_test.iloc[:, 1].tolist()\n y_test_alt = df_test.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_test):\n x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_test, y_test, y_test_alt\n\n\ndef read_hotel_dataset_train(path_to_trainset):\n with io.open(path_to_trainset, encoding='utf8') as f_trainset:\n # Skip the comment block at the beginning of the file\n f_trainset, _ = skip_comment_block(f_trainset, '#')\n\n # read the training data from file\n df_train = pd.read_json(f_trainset, encoding='utf8')\n\n x_train = df_train.iloc[:, 0].tolist()\n y_train = df_train.iloc[:, 1].tolist()\n y_train_alt = df_train.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_train):\n x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_train, y_train, y_train_alt\n\n\ndef read_hotel_dataset_dev(path_to_devset):\n with io.open(path_to_devset, encoding='utf8') as f_devset:\n # Skip the comment block at the beginning of the file\n f_devset, _ = skip_comment_block(f_devset, '#')\n\n # read the development data from file\n df_dev = pd.read_json(f_devset, encoding='utf8')\n\n x_dev = df_dev.iloc[:, 0].tolist()\n y_dev = df_dev.iloc[:, 1].tolist()\n y_dev_alt = df_dev.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_dev):\n x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_dev, y_dev, y_dev_alt\n\n\ndef read_hotel_dataset_test(path_to_testset):\n with io.open(path_to_testset, encoding='utf8') as f_testset:\n # Skip the comment block at the beginning of the file\n f_testset, _ = skip_comment_block(f_testset, '#')\n\n # read the test data from file\n df_test = pd.read_json(f_testset, encoding='utf8')\n\n x_test = df_test.iloc[:, 0].tolist()\n y_test = df_test.iloc[:, 1].tolist()\n y_test_alt = df_test.iloc[:, 2].tolist()\n\n # TODO: remove from here and use the universal DA extraction instead\n # transform the MR to contain the DA type as the first slot\n for i, mr in enumerate(x_test):\n x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')\n\n return x_test, y_test, y_test_alt\n\n\ndef read_predictions(path_to_predictions):\n # read the test data from file\n with io.open(path_to_predictions, encoding='utf8') as f_predictions:\n y_pred = f_predictions.readlines()\n\n return y_pred\n\n\ndef skip_comment_block(fd, comment_symbol):\n \"\"\"Reads the initial lines of the file (represented by the file descriptor) corresponding to a comment block.\n All consecutive lines starting with the given symbol are considered to be part of the comment block.\n \"\"\"\n\n comment_block = ''\n\n line_beg = fd.tell()\n line = fd.readline()\n while line != '':\n if not line.startswith(comment_symbol):\n fd.seek(line_beg)\n break\n\n comment_block += line\n line_beg = fd.tell()\n line = fd.readline()\n\n return fd, comment_block\n\n\ndef replace_plural_nouns(utt):\n stemmer = WordNetLemmatizer()\n\n pos_tags = nltk.pos_tag(nltk.word_tokenize(utt))\n tokens_to_replace = []\n tokens_new = []\n\n for token, tag in pos_tags:\n #if tag == 'NNS':\n if token in ['inches', 'watts']:\n tokens_to_replace.append(token)\n tokens_new.append(split_plural_noun(token, stemmer))\n \n for token_to_replace, token_new in zip(tokens_to_replace, tokens_new):\n utt = utt.replace(token_to_replace, token_new)\n\n return utt\n\n\ndef split_plural_noun(word, stemmer):\n stem = stemmer.lemmatize(word)\n if stem not in word or stem == word:\n return word\n\n suffix = word.replace(stem, '')\n\n return stem + ' -' + suffix\n\n\ndef replace_commas_in_mr_values(mr, val_sep, val_sep_end):\n mr_new = ''\n val_beg_cnt = 0\n val_end_cnt = 0\n\n for c in mr:\n # If comma inside a value, replace the comma with placeholder\n if c == ',' and val_beg_cnt > val_end_cnt:\n mr_new += config.COMMA_PLACEHOLDER\n continue\n\n # Keep track of value beginning and end\n if c == val_sep:\n val_beg_cnt += 1\n elif c == val_sep_end:\n val_end_cnt += 1\n\n mr_new += c\n\n return mr_new\n\n\ndef put_back_commas_in_mr_values(mrs):\n return [mr.replace(config.COMMA_PLACEHOLDER, ',') for mr in mrs]\n\n\ndef preprocess_da_in_mr(mr, separators):\n # Unpack separators\n da_sep, da_sep_end, slot_sep, val_sep, val_sep_end = separators\n\n # If no DA indication is expected in the data, return the MR unchanged\n if da_sep is None:\n return mr\n\n # Verify if DA type is indicated at the beginning of the MR\n da_sep_idx = mr.find(da_sep)\n slot_sep_idx = mr.find(slot_sep)\n val_sep_idx = mr.find(val_sep)\n if da_sep_idx < 0 or 0 <= slot_sep_idx < da_sep_idx or 0 <= val_sep_idx < da_sep_idx:\n return mr\n\n # Extract the DA type from the beginning of the MR\n da_type = mr[:da_sep_idx].lstrip('?') # Strip the '?' symbol present in Laptop and TV datasets\n slot_value_pairs = mr[da_sep_idx + 1:]\n if da_sep_end is not None:\n slot_value_pairs = slot_value_pairs.rstrip(da_sep_end)\n\n # Convert the extracted DA to the slot-value form and prepend it to the remainder of the MR\n mr_new = 'da' + val_sep + da_type\n if val_sep_end is not None:\n mr_new += val_sep_end\n if len(slot_value_pairs) > 0:\n mr_new += slot_sep + slot_value_pairs\n\n return mr_new\n\n\n# TODO: merge with the above function\ndef preprocess_mr_for_tv_laptop(mr, da_sep, slot_sep, val_sep):\n sep_idx = mr.find(da_sep)\n da_type = mr[:sep_idx].lstrip('?')\n slot_value_pairs = mr[sep_idx:].strip('()')\n\n mr_new = 'da=' + da_type\n if len(slot_value_pairs) > 0:\n mr_new += slot_sep + slot_value_pairs\n\n mr_modified = ''\n for slot_value in mr_new.split(slot_sep):\n slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep)\n # If the value is enclosed in apostrophes, remove them\n if value_orig.startswith('\\'') and value_orig.endswith('\\''):\n value_orig = value_orig[1:-1]\n\n mr_modified += slot + val_sep + value_orig + slot_sep\n\n mr_new = mr_modified[:-1]\n\n if da_type in ['compare', 'suggest']:\n slot_counts = {}\n mr_modified = ''\n for slot_value in mr_new.split(slot_sep):\n slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep)\n if slot in ['da', 'position']:\n mr_modified += slot\n else:\n slot_counts[slot] = slot_counts.get(slot, 0) + 1\n mr_modified += slot + str(slot_counts[slot])\n\n mr_modified += val_sep + value_orig + slot_sep\n\n mr_new = mr_modified[:-1]\n\n return mr_new\n\n\ndef preprocess_mr(mr, separators):\n # Transform the MR to list the DA type as the first slot, if its indication is present in the MR\n mr_new = preprocess_da_in_mr(mr, separators)\n\n # Replace commas in values if comma is the slot separator\n if separators[2].strip() == ',' and separators[4] is not None:\n mr_new = replace_commas_in_mr_values(mr_new, separators[3], separators[4])\n\n return mr_new\n\n\ndef preprocess_utterance(utt):\n return ' '.join(word_tokenize(utt.lower()))\n\n\ndef parse_slot_and_value(slot_value, val_sep, val_sep_end=None):\n sep_idx = slot_value.find(val_sep)\n if sep_idx > -1:\n # Parse the slot\n slot = slot_value[:sep_idx].strip()\n # Parse the value\n if val_sep_end is not None:\n value = slot_value[sep_idx + 1:-1].strip()\n else:\n value = slot_value[sep_idx + 1:].strip()\n else:\n # Parse the slot\n if val_sep_end is not None:\n slot = slot_value[:-1].strip()\n else:\n slot = slot_value.strip()\n # Set the value to the empty string\n value = ''\n\n slot_processed = slot.replace(' ', '').lower()\n if not slot_processed.startswith('__'):\n slot_processed = slot_processed.replace('_', '')\n\n value = value.replace(config.COMMA_PLACEHOLDER, ',')\n # TODO: fix the cases where a period is in the value\n # TODO: (e.g., the suggest DA file (2 slots) or verify_attribute DA file (4 slots) in the video game dataset)\n value_processed = ' '.join(word_tokenize(value.lower()))\n\n return slot_processed, value_processed, slot, value\n\n\ndef delex_sample(mr, utterance=None, dataset=None, slots_to_delex=None, mr_only=False, input_concat=False, utterance_only=False):\n \"\"\"Delexicalizes a single sample (MR and the corresponding utterance).\n By default, the slots 'name', 'near' and 'food' are delexicalized (for the E2E dataset).\n\n All fields (E2E): name, near, area, food, customer rating, familyFriendly, eatType, priceRange\n \"\"\"\n\n if not mr_only and utterance is None:\n raise ValueError('the \\'utterance\\' argument must be provided when \\'mr_only\\' is False.')\n\n if slots_to_delex is not None:\n delex_slots = slots_to_delex\n else:\n if dataset == 'rest_e2e':\n delex_slots = ['name', 'near', 'food']\n # delex_slots = ['name', 'releaseyear', 'expreleasedate', 'developer'] # counterfeit video_game\n elif dataset == 'video_game':\n delex_slots = ['name', 'releaseyear', 'expreleasedate', 'developer']\n elif dataset == 'tv':\n delex_slots = ['name', 'family', 'hdmiport', 'screensize', 'price', 'audio', 'resolution', 'powerconsumption', 'color', 'count']\n elif dataset == 'laptop':\n delex_slots = ['name', 'family', 'processor', 'memory', 'drive', 'battery', 'weight', 'dimension', 'design', 'platform', 'warranty', 'count']\n elif dataset == 'hotel':\n delex_slots = ['name', 'address', 'postcode', 'area', 'near', 'phone', 'count']\n else:\n # By default, assume the dataset is 'rest_e2e'\n delex_slots = ['name', 'near', 'food']\n\n # Sort the slots to be delexed in a descending order of their value's length (to avoid delexing of a value that is\n # a substring of another value to be delexed)\n delex_slots_sorted = [(s, v) for s, v in mr.items()\n if s.rstrip(string.digits) in delex_slots and v not in ['dontcare', 'none', '']]\n delex_slots_sorted = [s for s, v in sorted(delex_slots_sorted, key=lambda x: len(x[1]), reverse=True)]\n\n mr_update = {}\n\n # for slot, value in mr.items():\n for slot in delex_slots_sorted:\n value = mr[slot]\n if value not in ['dontcare', 'none', '']:\n # Assemble a placeholder token for the value\n placeholder = create_placeholder(slot, value)\n\n values_alt = [value]\n # Specify special rules for individual slots, including alternative representations of the values\n if slot == 'address':\n if 'street' in value:\n values_alt.append(re.sub(r'\\b{}\\b'.format('street'), 'st', value))\n elif 'avenue' in value:\n values_alt.append(re.sub(r'\\b{}\\b'.format('avenue'), 'ave', value))\n elif slot == 'name':\n # If name is contained in the developer slot value, delexicalize the developer slot first\n if not mr_only and 'developer' in mr and value in mr['developer']:\n dev_placeholder = create_placeholder('developer', mr['developer'])\n dev_val_preproc = ' '.join(word_tokenize(mr['developer']))\n utterance = re.sub(r'\\b{}\\b'.format(dev_val_preproc), dev_placeholder, utterance)\n mr_update['developer'] = dev_placeholder\n elif slot in ['developer', 'expreleasedate']:\n values_alt = [value.replace(';', ',')]\n\n utterance_delexed = utterance\n if not mr_only:\n for val in values_alt:\n # Replace the value (whole-word matches only) with the placeholder\n utterance_delexed = re.sub(r'\\b{}\\b'.format(val), placeholder, utterance)\n if utterance_delexed != utterance:\n break\n\n # Do not replace value with a placeholder token unless there is an exact match in the utterance\n if slot not in mr_update and (mr_only or utterance_delexed != utterance or slot == 'name'):\n mr_update[slot] = placeholder\n utterance = utterance_delexed\n else:\n if input_concat:\n mr_update[slot] = value.replace(' ', '_')\n\n if not utterance_only:\n for slot, new_value in mr_update.items():\n mr[slot] = new_value\n\n if not mr_only:\n # Tokenize punctuation missed by tokenizer (such as after years and numbers in titles) before delexicalization\n utterance = utterance.replace(config.DELEX_SUFFIX + ',', config.DELEX_SUFFIX + ' ,')\n utterance = utterance.replace(config.DELEX_SUFFIX + '.', config.DELEX_SUFFIX + ' .')\n\n return utterance\n\n\ndef counterfeit_sample(mr, utt, target_dataset=None, slots_to_replace=None, slot_value_dict=None):\n \"\"\"Counterfeits a single E2E sample (MR and the corresponding utterance).\n \"\"\"\n\n mr_counterfeit = {}\n utt_counterfeit = utt\n\n if slots_to_replace is None:\n if target_dataset == 'rest_e2e':\n slots_to_replace = ['name', 'near', 'food']\n elif target_dataset == 'video_game':\n slots_to_replace = ['name', 'releaseyear', 'expreleasedate', 'developer']\n elif target_dataset == 'tv':\n slots_to_replace = ['name', 'family', 'hdmiport', 'screensize', 'price', 'audio', 'resolution', 'powerconsumption', 'color', 'count']\n elif target_dataset == 'laptop':\n slots_to_replace = ['name', 'family', 'processor', 'memory', 'drive', 'battery', 'weight', 'dimension', 'design', 'platform', 'warranty', 'count']\n elif target_dataset == 'hotel':\n slots_to_replace = ['name', 'address', 'postcode', 'area', 'near', 'phone', 'count']\n else:\n slots_to_replace = []\n\n if target_dataset == 'video_game':\n for slot_orig, value_orig in mr.items():\n slot_counterfeit = slot_orig\n value_counterfeit = value_orig\n\n if slot_orig.rstrip(string.digits) in slots_to_replace:\n # Substitute the slot with the corresponding slot from the target domain\n slot_counterfeit = e2e_slot_to_video_game_slot(slot_orig)\n while slot_counterfeit in mr_counterfeit:\n slot_counterfeit = e2e_slot_to_video_game_slot(slot_orig)\n\n if slot_orig == 'food':\n # If value mentioned in the MR verbatim, replace with a sampled value from the target domain\n if value_orig in utt_counterfeit:\n value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])\n value_realization = value_counterfeit\n utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)\n\n # Replace related keywords/phrases with alternatives matching the target domain\n if slot_counterfeit == 'releaseyear':\n phrase_counterfeit1 = random.choice(['was released in', 'came out in'])\n phrase_counterfeit2 = random.choice(['released in', 'from'])\n elif slot_counterfeit == 'expreleasedate':\n phrase_counterfeit1 = random.choice(['will be released on', 'is expected to come out', 'is coming out on'])\n phrase_counterfeit2 = random.choice(['to be released on', 'expected to be released on', 'slated for release on'])\n else:\n phrase_counterfeit1 = ''\n phrase_counterfeit2 = ''\n\n utt_counterfeit = re.sub(r'\\bserves\\b', phrase_counterfeit1, utt_counterfeit)\n utt_counterfeit = re.sub(r'\\bserving\\b', phrase_counterfeit2, utt_counterfeit)\n utt_counterfeit = re.sub(r'\\bprovides\\b', phrase_counterfeit1, utt_counterfeit)\n utt_counterfeit = re.sub(r'\\bproviding\\b', phrase_counterfeit2, utt_counterfeit)\n utt_counterfeit = re.sub(r'\\bfood\\b', '', utt_counterfeit)\n elif slot_orig == 'customerrating':\n # If value mentioned in the MR verbatim, replace with a sampled value from the target domain\n if value_orig in utt_counterfeit:\n value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])\n value_realization = value_counterfeit\n utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)\n\n # Replace related keywords/phrases with alternatives matching the target domain\n if slot_counterfeit == 'rating':\n phrase_counterfeit = 'rating'\n elif slot_counterfeit == 'esrb':\n phrase_counterfeit = 'esrb rating'\n else:\n phrase_counterfeit = ''\n\n for w in ['customer ratings', 'customer rating', 'ratings', 'rating']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n elif slot_orig == 'pricerange':\n # If value mentioned in the MR verbatim, replace with a sampled value from the target domain\n if value_orig in utt_counterfeit:\n value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])\n if ',' in value_counterfeit:\n value_items = [val.strip() for val in value_counterfeit.split(',')]\n value_items_shuffled = random.sample(value_items, len(value_items))\n value_realization = ', '.join(value_items_shuffled[:-1]) + ' and ' + value_items_shuffled[-1]\n else:\n value_realization = value_counterfeit\n utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)\n\n # Replace related keywords/phrases with alternatives matching the target domain\n if slot_counterfeit == 'playerperspective':\n phrase_counterfeit = 'perspective'\n else:\n phrase_counterfeit = ''\n\n for w in ['price range', 'priced', 'prices', 'price']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n elif slot_orig == 'familyfriendly':\n if slot_counterfeit == 'hasmultiplayer':\n phrase_counterfeit = 'multiplayer'\n elif slot_counterfeit == 'availableonsteam':\n phrase_counterfeit = 'steam'\n elif slot_counterfeit == 'haslinuxrelease':\n phrase_counterfeit = 'linux'\n elif slot_counterfeit == 'hasmacrelease':\n phrase_counterfeit = 'mac'\n else:\n phrase_counterfeit = ''\n\n for w in ['families', 'children', 'kids', 'family', 'child', 'kid']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n\n for w in ['-friendly', ' friendly']:\n utt_counterfeit = re.sub(r'\\b{}\\b'.format(w), ' supporting', utt_counterfeit)\n\n utt_counterfeit = re.sub(r'\\ballow', 'offer', utt_counterfeit)\n elif slot_orig == 'area':\n # If value mentioned in the MR verbatim, replace with a sampled value from the target domain\n if value_orig in utt_counterfeit:\n value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])\n if ',' in value_counterfeit:\n value_items = [val.strip() for val in value_counterfeit.split(',')]\n value_items_shuffled = random.sample(value_items, len(value_items))\n value_realization = ', '.join(value_items_shuffled[:-1]) + ' and ' + value_items_shuffled[-1]\n else:\n value_realization = value_counterfeit\n utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)\n\n # Replace related keywords/phrases with alternatives matching the target domain\n if slot_counterfeit == 'platforms':\n phrase_counterfeit = random.choice(['available for', 'available on', 'released for', 'released on'])\n else:\n phrase_counterfeit = ''\n\n for w in ['located in']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n\n for w in ['area']:\n phrase_counterfeit = 'platform' + ('s' if ',' in value_counterfeit else '')\n utt_counterfeit = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n elif slot_orig == 'eattype':\n # If value mentioned in the MR verbatim, replace with a sampled value from the target domain\n if value_orig in utt_counterfeit:\n value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])\n if ',' in value_counterfeit:\n value_items = [val.strip() for val in value_counterfeit.split(',')]\n value_items_shuffled = random.sample(value_items, len(value_items))\n value_realization = ' '.join(value_items_shuffled) + ' game'\n else:\n value_realization = value_counterfeit + ' game'\n utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)\n elif slot_orig == 'near':\n if slot_counterfeit == 'developer':\n phrase_counterfeit = random.choice(['developed by', 'made by'])\n else:\n phrase_counterfeit = ''\n\n for w in ['located near', 'situated by']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n\n utt_counterfeit = re.sub(r'\\bnear\\b', random.choice(['by', 'from']), utt_counterfeit)\n\n mr_counterfeit[slot_counterfeit] = value_counterfeit\n\n # Replace general keywords/phrases with alternatives matching the target domain\n for w in ['place', 'venue', 'establishment', 'eatery', 'restaurant']:\n utt_counterfeit = re.sub(r'\\b{}\\b'.format(w), 'game', utt_counterfeit)\n utt_counterfeit = re.sub(r'\\bnear\\b'.format(w), 'for', utt_counterfeit)\n elif target_dataset == 'hotel':\n for slot_orig, value_orig in mr.items():\n slot_counterfeit = slot_orig\n value_counterfeit = value_orig\n\n if slot_orig.rstrip(string.digits) in slots_to_replace:\n # Substitute the slot with the corresponding slot from the target domain\n slot_counterfeit = e2e_slot_to_hotel_slot(slot_orig)\n while slot_counterfeit in mr_counterfeit:\n slot_counterfeit = e2e_slot_to_hotel_slot(slot_orig)\n\n if slot_orig == 'familyfriendly':\n if slot_counterfeit == 'acceptscreditcards':\n phrase_counterfeit = 'credit card'\n elif slot_counterfeit == 'dogsallowed':\n phrase_counterfeit = 'dog'\n elif slot_counterfeit == 'hasinternet':\n phrase_counterfeit = 'internet'\n else:\n phrase_counterfeit = ''\n\n for w in ['families', 'children', 'kids']:\n utt_counterfeit = re.sub(r'\\b{}\\b'.format(w),\n phrase_counterfeit + 's' if phrase_counterfeit != 'internet' else phrase_counterfeit,\n utt)\n if utt_counterfeit != utt:\n break\n if utt_counterfeit == utt:\n for w in ['family', 'child', 'kid']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n elif slot_orig == 'customerrating' or slot_orig == 'food':\n if slot_counterfeit == 'address':\n phrase_counterfeit = 'address'\n elif slot_counterfeit == 'phone':\n phrase_counterfeit = 'phone number'\n elif slot_counterfeit == 'postcode':\n phrase_counterfeit = 'postcode'\n else:\n phrase_counterfeit = ''\n\n if slot_orig == 'customerrating':\n for w in ['customer rating of', 'customer ratings', 'customer rating', 'ratings', 'rating']:\n utt_counterfeit_sub = re.sub(r'\\b{}\\b'.format(w), phrase_counterfeit, utt_counterfeit)\n if utt_counterfeit_sub != utt_counterfeit:\n utt_counterfeit = utt_counterfeit_sub\n break\n elif slot_orig == 'food':\n utt_counterfeit = re.sub(r'\\b{}\\b'.format('food'), phrase_counterfeit, utt_counterfeit)\n else:\n raise AttributeError('provided domain does not exist')\n\n mr_counterfeit[slot_counterfeit] = value_counterfeit\n\n return mr_counterfeit, utt_counterfeit\n\n\ndef create_placeholder(slot, value):\n \"\"\"Assemble a placeholder token for the given slot value.\"\"\"\n\n vowels = 'aeiou'\n\n placeholder = config.DELEX_PREFIX\n value = value.lower()\n\n if value[0] in vowels:\n placeholder += 'vow_'\n else:\n placeholder += 'con_'\n\n if slot in ['name', 'developer']:\n if value.startswith(('the ', 'a ', 'an ')):\n placeholder += 'det_'\n elif slot == 'food':\n if 'food' not in value:\n placeholder += 'cuisine_'\n\n placeholder += (slot + config.DELEX_SUFFIX)\n\n return placeholder\n\n\ndef e2e_slot_to_hotel_slot(slot):\n \"\"\"Map an E2E slot onto a slot in the Hotel domain. If there are multiple tokens in the corresponding category\n in the Hotel domain, randomly pick one from that category.\n \"\"\"\n\n slot_map = {\n 'food': ['address', 'phone', 'postcode'],\n 'customerrating': ['address', 'phone', 'postcode'],\n 'familyfriendly': ['acceptscreditcards', 'dogsallowed', 'hasinternet'],\n 'eattype': ['type']\n }\n\n if slot in slot_map:\n if len(slot_map[slot]) == 1:\n return slot_map[slot][0]\n else:\n return random.choice(slot_map[slot])\n else:\n return slot\n\n\ndef e2e_slot_to_video_game_slot(slot):\n \"\"\"Map an E2E slot onto a slot in the Video Game domain. If there are multiple tokens in the corresponding category\n in the Video Game domain, randomly pick one from that category.\n \"\"\"\n\n slot_map = {\n 'food': ['releaseyear', 'expreleasedate'], # delexed\n 'customerrating': ['rating', 'esrb'],\n 'pricerange': ['playerperspective'],\n 'familyfriendly': ['hasmultiplayer', 'availableonsteam', 'haslinuxrelease', 'hasmacrelease'], # boolean\n 'area': ['platforms'],\n 'eattype': ['genres'],\n 'near': ['developer'] # delexed\n }\n\n if slot in slot_map:\n if len(slot_map[slot]) == 1:\n return slot_map[slot][0]\n else:\n return random.choice(slot_map[slot])\n else:\n return slot\n\n\ndef token_seq_to_idx_seq(token_seqences, token2idx, max_output_seq_len):\n # produce sequences of indexes from the utterances in the training set\n idx_sequences = np.zeros((len(token_seqences), max_output_seq_len), dtype=np.int32) # padding implicitly present, as the index of the padding token is 0\n for i, token_seq in enumerate(token_seqences):\n for j, token in enumerate(token_seq):\n # truncate long utterances\n if j >= max_output_seq_len:\n break\n\n # represent each token with the corresponding index\n if token in token2idx:\n idx_sequences[i][j] = token2idx[token]\n else:\n idx_sequences[i][j] = token2idx['<NA>']\n\n return idx_sequences\n\n\n# ---- SCRIPTS ----\n\ndef count_unique_mrs(dataset, filename):\n \"\"\"Counts unique MRs in the datasets and prints the statistics. (Requires the initial comment blocks in\n the TV and Laptop data files to be manually removed first.)\n \"\"\"\n\n if filename.lower().endswith('.csv'):\n df = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename), header=0, encoding='utf8')\n elif filename.lower().endswith('.json'):\n df = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8')\n else:\n raise ValueError('Unexpected file type. Please provide a CSV or a JSON file as input.')\n\n # Remove delexicalized placeholders, if present\n df.iloc[:, 0] = df.iloc[:, 0].replace(r'__.*?__', '', regex=True)\n\n print('Unique MRs (' + dataset + ' -> ' + filename + '): ', end='')\n print(len(df.iloc[:, 0].unique()), '/', len(df.iloc[:, 0]))\n\n\ndef count_mr_overlap(dataset, filename1, filename2):\n \"\"\"Counts unique MRs in the datasets and prints the statistics. (Requires the initial comment blocks in\n the TV and Laptop data files to be manually removed first.)\n \"\"\"\n\n if filename1.lower().endswith('.csv') and filename2.lower().endswith('.csv'):\n df1 = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename1), header=0, encoding='utf8')\n df2 = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename2), header=0, encoding='utf8')\n elif filename1.lower().endswith('.json') and filename2.lower().endswith('.json'):\n df1 = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename1), encoding='utf8')\n df2 = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename2), encoding='utf8')\n else:\n raise ValueError('Unexpected file type. Please provide two CSV or two JSON files as input.')\n\n # Remove delexicalized placeholders, if present\n df1.iloc[:, 0] = df1.iloc[:, 0].replace(r'__.*?__', '', regex=True)\n df2.iloc[:, 0] = df2.iloc[:, 0].replace(r'__.*?__', '', regex=True)\n\n # Identify the samples whose MR matches one in the other file\n df1_overlap = df1[df1.mr.isin(df2.mr)]\n df2_overlap = df2[df2.mr.isin(df1.mr)]\n\n print('Overlapping MRs (' + dataset + '):')\n print('-> ' + filename1 + ':\\t' + str(len(df1_overlap)) + ' out of ' + str(len(df1)))\n print('-> ' + filename2 + ':\\t' + str(len(df2_overlap)) + ' out of ' + str(len(df2)))\n print()\n\n\ndef verify_slot_order(dataset, filename):\n \"\"\"Verifies whether the slot order in all MRs corresponds to the desired order.\n \"\"\"\n\n slots_ordered = ['name', 'eattype', 'food', 'pricerange', 'customerrating', 'area', 'familyfriendly', 'near']\n mrs_dicts = []\n\n # Read in the data\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n mrs, utterances = data_cont['data']\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n for i, mr in enumerate(mrs):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value_orig\n\n mrs_dicts.append(mr_dict)\n\n for mr_dict in mrs_dicts:\n slots = list(mr_dict.keys())\n cur_idx = 0\n\n for slot in slots:\n if slot in slots_ordered:\n slot_idx = slots.index(slot)\n rightmost_idx = slots_ordered.index(slot)\n\n if slot_idx <= rightmost_idx and rightmost_idx >= cur_idx:\n cur_idx = rightmost_idx\n else:\n print('TEST FAILED: {0} has index {1} in the MR, but the order requires index {2}.'.format(\n slot, slot_idx, slots_ordered.index(slot)))\n\n\ndef filter_samples_by_da_type_csv(dataset, filename, das_to_keep):\n \"\"\"Create a new CSV data file by filtering only those samples in the given dataset that contain an MR\n with one of the desired DA types.\n \"\"\"\n\n if not filename.lower().endswith('.csv'):\n raise ValueError('Unexpected file type. Please provide a CSV file as input.')\n\n data_filtered = []\n\n # Read in the data\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n mrs, utterances = data_cont['data']\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n # Append the opening parenthesis to the DA names, so as to avoid matching DAs whose names have these as prefixes\n das_to_keep = tuple(da + '(' for da in das_to_keep)\n\n # Filter MRs with the desired DA types only\n for mr, utt in zip(mrs, utterances):\n if mr.startswith(das_to_keep):\n data_filtered.append([mr, utt])\n\n # Save the filtered dataset to a new file\n filename_out = os.path.splitext(filename)[0] + ' [filtered].csv'\n pd.DataFrame(data_filtered).to_csv(os.path.join(config.DATA_DIR, dataset, filename_out),\n header=['mr', 'ref'],\n index=False,\n encoding='utf8')\n\n\ndef filter_samples_by_da_type_json(dataset, filename, das_to_keep):\n \"\"\"Create a new JSON data file by filtering only those samples in the given dataset that contain an MR\n with one of the desired DA types.\n \"\"\"\n\n if not filename.lower().endswith('.json'):\n raise ValueError('Unexpected file type. Please provide a JSON file as input.')\n\n data_filtered = []\n\n with io.open(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8') as f_dataset:\n # Skip and store the comment at the beginning of the file\n f_dataset, comment_block = skip_comment_block(f_dataset, '#')\n\n # Read the dataset from file\n data = json.load(f_dataset, encoding='utf8')\n\n # Append the opening parenthesis to the DA names, so as to avoid matching DAs whose names have these as prefixes\n das_to_keep = tuple(da + '(' for da in das_to_keep)\n\n # Filter MRs with the desired DA types only\n for sample in data:\n mr = sample[0]\n if mr.startswith(das_to_keep):\n data_filtered.append(sample)\n\n # Save the filtered dataset to a new file\n filename_out = os.path.splitext(filename)[0] + ' [filtered].json'\n with io.open(os.path.join(config.DATA_DIR, dataset, filename_out), 'w', encoding='utf8') as f_dataset_filtered:\n f_dataset_filtered.write(comment_block)\n json.dump(data_filtered, f_dataset_filtered, indent=4, ensure_ascii=False)\n\n\ndef filter_samples_by_slot_count_csv(dataset, filename, min_count=None, max_count=None, eliminate_position_slot=True):\n \"\"\"Create a new CSV data file by filtering only those samples in the given dataset that contain an MR\n with the number of slots in the desired range.\n \"\"\"\n\n if not filename.lower().endswith('.csv'):\n raise ValueError('Unexpected file type. Please provide a CSV file as input.')\n\n data_filtered = []\n\n # Read in the data\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n mrs, utterances = data_cont['data']\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n for mr, utt in zip(mrs, utterances):\n mr_dict = OrderedDict()\n cur_min_count = min_count or 0\n cur_max_count = max_count or 20\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n _, _, slot_orig, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot_orig] = value_orig\n\n if 'da' in mr_dict:\n cur_min_count += 1\n cur_max_count += 1\n if 'position' in mr_dict:\n if eliminate_position_slot:\n if mr_dict['position'] == 'inner':\n continue\n elif mr_dict['position'] == 'outer':\n mr = mr.replace(', position[outer]', '')\n cur_min_count += 1\n cur_max_count += 1\n\n if min_count is not None and len(mr_dict) < cur_min_count or \\\n max_count is not None and len(mr_dict) > cur_max_count:\n continue\n\n data_filtered.append([mr, utt])\n\n # Save the filtered dataset to a new file\n filename_out = ''.join(filename.split('.')[:-1])\n if min_count is not None:\n filename_out += '_min{}'.format(min_count)\n if max_count is not None:\n filename_out += '_max{}'.format(max_count)\n filename_out += '_slots.csv'\n\n pd.DataFrame(data_filtered).to_csv(os.path.join(config.DATA_DIR, dataset, filename_out),\n header=['mr', 'ref'],\n index=False,\n encoding='utf8')\n\n\ndef filter_samples_by_slot_count_json(dataset, filename, min_count=None, max_count=None, eliminate_position_slot=True):\n \"\"\"Create a new JSON data file by filtering only those samples in the given dataset that contain an MR\n with the number of slots in the desired range.\n \"\"\"\n\n if not filename.lower().endswith('.json'):\n raise ValueError('Unexpected file type. Please provide a JSON file as input.')\n\n data_filtered = []\n\n with io.open(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8') as f_dataset:\n # Skip and store the comment at the beginning of the file\n f_dataset, comment_block = skip_comment_block(f_dataset, '#')\n\n # Read the dataset from file\n data = json.load(f_dataset, encoding='utf8')\n\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n # Filter MRs with a number of slots in the desired range only\n for sample in data:\n mr = sample[0]\n\n mr_dict = OrderedDict()\n cur_min_count = min_count or 0\n cur_max_count = max_count or 20\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n _, _, slot_orig, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot_orig] = value_orig\n\n if 'da' in mr_dict:\n cur_min_count += 1\n cur_max_count += 1\n if 'position' in mr_dict:\n if eliminate_position_slot:\n if mr_dict['position'] == 'inner':\n continue\n elif mr_dict['position'] == 'outer':\n mr = mr.replace(';position=outer', '')\n cur_min_count += 1\n cur_max_count += 1\n\n if min_count is not None and len(mr_dict) < cur_min_count or \\\n max_count is not None and len(mr_dict) > cur_max_count:\n continue\n\n data_filtered.append([mr, sample[1], sample[2]])\n\n # Save the filtered dataset to a new file\n filename_out = ''.join(filename.split('.')[:-1])\n if min_count is not None:\n filename_out += '_min{}'.format(min_count)\n if max_count is not None:\n filename_out += '_max{}'.format(max_count)\n filename_out += '_slots.json'\n\n with io.open(os.path.join(config.DATA_DIR, dataset, filename_out), 'w', encoding='utf8') as f_dataset_filtered:\n f_dataset_filtered.write(comment_block)\n json.dump(data_filtered, f_dataset_filtered, indent=4, ensure_ascii=False)\n\n\ndef counterfeit_dataset_from_e2e(filename, target_dataset, out_type='csv', slot_value_dict_path=None):\n \"\"\"Creates a counterfeit target dataset from the E2E restaurant dataset by mapping the E2E slots onto similar\n slots in the target domain. Boolean slots are handled by heuristically replacing the corresponding mention\n in the reference utterance to reflect the slot from the target domain that replaced the original E2E one.\n The counterfeit dataset is stored in a JSON format.\n \"\"\"\n\n source_slots = ['name', 'eattype', 'food', 'pricerange', 'customerrating', 'area', 'familyfriendly', 'near']\n\n data_counterfeit = []\n data_out = []\n\n # Read in the data\n data_cont = init_test_data(os.path.join(config.E2E_DATA_DIR, filename))\n mrs, utterances = data_cont['data']\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n # Preprocess the utterances\n utterances = [preprocess_utterance(utt) for utt in utterances]\n\n if slot_value_dict_path is not None:\n with open(slot_value_dict_path, 'r', encoding='utf8') as f_slot_values:\n slot_value_dict = json.load(f_slot_values)\n else:\n slot_value_dict = None\n\n for mr, utt in zip(mrs, utterances):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value\n\n # Delexicalize the MR and the utterance\n data_counterfeit.append(counterfeit_sample(mr_dict, utt,\n target_dataset=target_dataset,\n slots_to_replace=source_slots,\n slot_value_dict=slot_value_dict))\n\n if target_dataset in ['video_game']:\n for mr, utt in data_counterfeit:\n mr_str = mr_to_string(mr, da='inform')\n data_out.append([mr_str, utt])\n elif target_dataset in ['laptop', 'tv', 'hotel']:\n for mr, utt in data_counterfeit:\n mr_str = 'inform('\n for slot, val in mr.items():\n mr_str += slot + '=\\'' + val + '\\';'\n mr_str = mr_str[:-1] + ')'\n\n data_out.append([mr_str, utt, utt])\n\n # Save the counterfeit dataset to a new file\n if out_type == 'csv':\n filename_out = os.path.splitext(filename)[0] + ' [counterfeit {}].csv'.format(target_dataset)\n df_out = pd.DataFrame(data_out, columns=['mr', 'ref'])\n df_out.to_csv(os.path.join(config.E2E_DATA_DIR, filename_out), index=False, encoding='utf8')\n elif out_type == 'json':\n filename_out = os.path.splitext(filename)[0] + ' [counterfeit {}].json'.format(target_dataset)\n with io.open(os.path.join(config.E2E_DATA_DIR, filename_out), 'w', encoding='utf8') as f_dataset_counterfeit:\n json.dump(data_out, f_dataset_counterfeit, indent=4, ensure_ascii=False)\n\n\ndef get_vocab_overlap(dataset1, filename_train1, filename_dev1, dataset2, filename_train2, filename_dev2):\n \"\"\"Calculates the word overlap between the vocabularies of two datasets.\n \"\"\"\n\n data_trainset1 = os.path.join(config.DATA_DIR, dataset1, filename_train1)\n data_devset1 = os.path.join(config.DATA_DIR, dataset1, filename_dev1)\n data_trainset2 = os.path.join(config.DATA_DIR, dataset2, filename_train2)\n data_devset2 = os.path.join(config.DATA_DIR, dataset2, filename_dev2)\n\n dataset1 = load_training_data(data_trainset1, data_devset1)\n dataset2 = load_training_data(data_trainset2, data_devset2)\n\n vocab1 = get_vocabulary(dataset1)\n vocab2 = get_vocabulary(dataset2)\n\n common_vocab = vocab1.intersection(vocab2)\n\n print('Size of vocab 1:', len(vocab1))\n print('Size of vocab 2:', len(vocab2))\n print('Number of common words:', len(common_vocab))\n\n print('Common words:')\n print(common_vocab)\n\n\ndef pool_slot_values(dataset, filenames):\n \"\"\"Gathers all possible values for each slot type in the dataset.\n \"\"\"\n\n # slots_to_pool = ['eattype', 'pricerange', 'customerrating', 'familyfriendly']\n slots_to_pool = None\n slot_poss_values = {}\n\n # Read in the data\n if len(filenames) == 1:\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filenames[0]))\n mrs, utterances = data_cont['data']\n else:\n data_cont = init_training_data(os.path.join(config.DATA_DIR, dataset, filenames[0]),\n os.path.join(config.DATA_DIR, dataset, filenames[1]))\n x_train, y_train, x_dev, y_dev = data_cont['data']\n mrs, utterances = (x_train + x_dev), (y_train + y_dev)\n\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n # Preprocess the MRs\n mrs = [preprocess_mr(mr, data_cont['separators']) for mr in mrs]\n\n for i, mr in enumerate(mrs):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n mr_dict[slot] = value_orig\n\n # For each slot gather all possible values\n for slot, value in mr_dict.items():\n slot = slot.rstrip(string.digits)\n if slots_to_pool is None or slot in slots_to_pool:\n if slot not in slot_poss_values:\n slot_poss_values[slot] = set()\n if len(value) > 0:\n slot_poss_values[slot].add(value)\n\n # Convert the value sets to lists (and make thus the dictionary serializable into JSON)\n for slot in slot_poss_values.keys():\n slot_poss_values[slot] = sorted(list(slot_poss_values[slot]))\n\n # Store the dictionary to a file\n with open(os.path.join(config.DATA_DIR, dataset, 'slot_values.json'), 'w', encoding='utf8') as f_slot_values:\n json.dump(slot_poss_values, f_slot_values, indent=4, sort_keys=True, ensure_ascii=False)\n\n\ndef generate_joint_vocab():\n \"\"\"Generates a joint vocabulary for multiple datasets.\n \"\"\"\n\n data_trainset = os.path.join(config.VIDEO_GAME_DATA_DIR, 'train.csv')\n data_devset = os.path.join(config.VIDEO_GAME_DATA_DIR, 'valid.csv')\n data_video_game = load_training_data(data_trainset, data_devset, skip_if_exist=False)\n\n # data_trainset = os.path.join(config.HOTEL_DATA_DIR, 'train.json')\n # data_devset = os.path.join(config.HOTEL_DATA_DIR, 'valid.json')\n # data_hotel = load_training_data(data_trainset, data_devset, skip_if_exist=False)\n #\n # data_trainset = os.path.join(config.LAPTOP_DATA_DIR, 'train.json')\n # data_devset = os.path.join(config.LAPTOP_DATA_DIR, 'valid.json')\n # data_laptop = load_training_data(data_trainset, data_devset, skip_if_exist=False)\n #\n # data_trainset = os.path.join(config.TV_DATA_DIR, 'train.json')\n # data_devset = os.path.join(config.TV_DATA_DIR, 'valid.json')\n # data_tv = load_training_data(data_trainset, data_devset, skip_if_exist=False)\n\n data_trainset = os.path.join(config.E2E_DATA_DIR, 'trainset_e2e [denoised] [counterfeit video_game].csv')\n data_devset = os.path.join(config.E2E_DATA_DIR, 'devset_e2e [denoised] [counterfeit video_game].csv')\n data_rest = load_training_data(data_trainset, data_devset, skip_if_exist=False)\n\n # generate_vocab_file(np.concatenate((data_rest, data_tv, data_laptop, data_hotel, data_video_game)),\n generate_vocab_file(np.concatenate((data_rest, data_video_game)),\n vocab_filename='vocab.lang_gen.tokens')\n\n\ndef augment_mrs_with_da_type(dataset, filename, da_type):\n # Read in the data\n df = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename), header=0, encoding='utf8')\n mrs = df.mr.tolist()\n\n df['mr'] = [add_da_info_to_mr(mr, da_type) for mr in mrs]\n\n filename_out = os.path.splitext(filename)[0] + ' [with DA].csv'\n df.to_csv(os.path.join(config.DATA_DIR, dataset, filename_out), index=False, encoding='utf8')\n\n\ndef add_da_info_to_mr(mr, da_type):\n return da_type + '(' + mr + ')'\n\n\ndef delex_dataset(dataset, files, slots_to_delex=None, mr_only=False):\n\n if not isinstance(files, list):\n files = [str(files)]\n\n for filename in files:\n # Read in the data\n data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n dataset_name = data_cont['dataset_name']\n mrs_orig, utterances_orig = data_cont['data']\n _, _, slot_sep, val_sep, val_sep_end = data_cont['separators']\n\n # Preprocess the MRs and utterances\n mrs = [preprocess_mr(mr, data_cont['separators']) for mr in mrs_orig]\n utterances = [preprocess_utterance(utt) for utt in utterances_orig]\n\n # Produce sequences of extracted words from the meaning representations (MRs) in the testset\n mrs_delex = []\n utterances_delex = []\n for i, mr in enumerate(mrs):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)\n\n mr_dict[slot] = value\n\n # Delexicalize the MR\n\n if mr_only:\n delex_sample(mr_dict, utterance=utterances[i], dataset=dataset_name, mr_only=True,\n slots_to_delex=slots_to_delex)\n else:\n utterances_delex.append(delex_sample(mr_dict, utterance=utterances[i], dataset=dataset_name,\n slots_to_delex=slots_to_delex))\n\n mrs_delex.append(mr_to_string(mr_dict))\n\n new_df = pd.DataFrame(columns=['mr', 'ref'])\n new_df['mr'] = mrs_delex\n if mr_only:\n new_df['ref'] = utterances_orig\n else:\n new_df['ref'] = utterances_delex\n\n suffix = ' [delex' + (', MR only' if mr_only else '') + ']'\n filename_out = os.path.splitext(filename)[0] + suffix + os.path.splitext(filename)[1]\n file_out = os.path.join(config.DATA_DIR, dataset, filename_out)\n\n new_df.to_csv(file_out, index=False, encoding='utf8')\n\n\ndef mr_to_string(mr_dict, da=None):\n \"\"\"Convert an MR represented by a dictionary to a flat textual form. The input MR is expected to be an OrderedDict\n of slots and values.\n \"\"\"\n\n slot_value_pairs = []\n\n # If there is a \"da\" slot in the MR dictionary, pop it and use its value to indicate the DA type of the MR\n if 'da' in mr_dict:\n if da is None:\n da = mr_dict.pop('da', None)\n else:\n assert mr_dict['da'] == da\n mr_dict.pop('da', None)\n\n # Format the slot-value pairs\n for slot, val in mr_dict.items():\n slot_value_pairs.append(slot + '[{0}]'.format(str(val.strip()) if val is not None else ''))\n\n # Concatenate the formatted slot-value pairs to form a textual MR\n mr = ', '.join(slot_value_pairs)\n\n if da is not None:\n # Prepend the DA, and enclose the list of the MR's slot-value pairs in parentheses\n mr = da + '(' + mr + ')'\n\n return mr\n\n\n# ---- MAIN ----\n\ndef main():\n # count_unique_mrs('rest_e2e', 'trainset_e2e [delex, MR only].csv')\n # count_unique_mrs('rest_e2e', 'devset_e2e [delex, MR only].csv')\n # count_unique_mrs('rest_e2e', 'testset_e2e [delex, MR only].csv')\n\n # count_unique_mrs('video_game', 'train [delex, MR only].csv')\n # count_unique_mrs('video_game', 'valid [delex, MR only].csv')\n # count_unique_mrs('video_game', 'test [delex, MR only].csv')\n\n # ----------\n\n # count_mr_overlap('rest_e2e', 'trainset_e2e.csv', 'devset_e2e.csv')\n # count_mr_overlap('rest_e2e', 'trainset_e2e.csv', 'testset_e2e.csv')\n # count_mr_overlap('rest_e2e', 'devset_e2e.csv', 'testset_e2e.csv')\n\n # count_mr_overlap('video_game', 'train [delex, MR only].csv', 'valid [delex, MR only].csv')\n # count_mr_overlap('video_game', 'train [delex, MR only].csv', 'test [delex, MR only].csv')\n # count_mr_overlap('video_game', 'valid [delex, MR only].csv', 'test [delex, MR only].csv')\n\n # ----------\n\n # verify_slot_order('rest_e2e', 'trainset_e2e_utt_split.csv')\n\n # ----------\n\n das_to_keep = ['inform']\n\n filter_samples_by_da_type_csv('video_game', 'valid.csv', das_to_keep)\n # filter_samples_by_da_type_json('tv', 'train.json', das_to_keep)\n\n # ----------\n\n # filter_samples_by_slot_count_csv('rest_e2e', 'testset_e2e.csv', min_count=3, max_count=4)\n # filter_samples_by_slot_count_json('hotel', 'test_filtered.json', min_count=3, max_count=4)\n\n # ----------\n\n # slot_value_dict_path = os.path.join(config.VIDEO_GAME_DATA_DIR, 'slot_values_train.json')\n\n # counterfeit_dataset_from_e2e('testset_e2e_min3_max4_slots.csv', 'hotel', format='json')\n # counterfeit_dataset_from_e2e('trainset_e2e [denoised].csv', 'video_game', out_type='csv',\n # slot_value_dict_path=slot_value_dict_path)\n\n # ----------\n\n # get_vocab_overlap('rest_e2e', 'trainset_e2e.csv', 'devset_e2e.csv',\n # 'hotel', 'train.json', 'valid.json')\n # get_vocab_overlap('laptop', 'train.json', 'valid.json',\n # 'tv', 'train.json', 'valid.json')\n\n # ----------\n\n # pool_slot_values('rest_e2e', ['trainset_e2e.csv', 'devset_e2e.csv'])\n # pool_slot_values('laptop', ['train.json', 'valid.json'])\n # pool_slot_values('video_game', ['train.csv', 'valid.csv'])\n\n # ----------\n\n # generate_joint_vocab()\n\n # ----------\n\n # augment_mrs_with_da_type('rest_e2e', 'trainset_e2e [denoised].csv', 'inform')\n # augment_mrs_with_da_type('video_game', 'dataset.csv', 'inform')\n\n # ----------\n\n # delex_dataset('rest_e2e', ['devset_e2e.csv'], slots_to_delex=['name', 'near'], mr_only=True)\n # delex_dataset('video_game', ['valid.csv'], slots_to_delex=['name', 'developer'], mr_only=True)\n\n # ----------\n\n # x_test, y_test = read_laptop_dataset_test('data/tv/test.json')\n # print(x_test)\n # print()\n # print(y_test)\n # print()\n # print(len(x_test), len(y_test))\n\n # ----------\n\n # if len(y_test) > 0:\n # with io.open('data/predictions_baseline.txt', 'w', encoding='utf8') as f_y_test:\n # for line in y_test:\n # f_y_test.write(line + '\\n')\n\n # Produce a file from the predictions in the TV/Laptop dataset format by replacing the baseline utterances (in the 3rd column)\n # with io.open('eval/predictions-tv/predictions_ensemble_2way_2.txt', 'r', encoding='utf8') as f_predictions:\n # with io.open('data/tv/test.json', encoding='utf8') as f_testset:\n # # Skip the comment block at the beginning of the file\n # f_testset, _ = skip_comment_block(f_testset, '#')\n #\n # # read the test data from file\n # df = pd.read_json(f_testset, encoding='utf8')\n #\n # df.iloc[:, 2] = f_predictions.readlines()\n # df.to_json('data/tv/test_pred.json', orient='values')\n\n # Produce a file from the predictions in the TV/Laptop dataset format by replacing the baseline utterances (in the 3rd column)\n # with io.open('eval/predictions-laptop/predictions_ensemble_2way_1.txt', 'r', encoding='utf8') as f_predictions:\n # with io.open('data/laptop/test.json', encoding='utf8') as f_testset:\n # # Skip the comment block at the beginning of the file\n # f_testset, _ = skip_comment_block(f_testset, '#')\n #\n # # read the test data from file\n # df = pd.read_json(f_testset, encoding='utf8')\n #\n # df.iloc[:, 2] = f_predictions.readlines()\n # df.to_json('data/laptop/test_pred.json', orient='values')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"pandas.DataFrame",
"pandas.read_json",
"pandas.read_csv"
]
] |
MasazI/gan_basic | [
"37e23e1799616bafa18527aeffc1d3c8e7c5f2ef"
] | [
"wface/sampling_reverse.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport os\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport model\nfrom dataset import load_csv\nimport sampling_from_vec\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nimport datetime\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"z_dim\", 100, \"dimension of dim for Z for sampling\")\nflags.DEFINE_integer(\"gc_dim\", 64, \"dimension of generative filters in conv layer\")\nflags.DEFINE_integer(\"dc_dim\", 64, \"dimension of discriminative filters in conv layer\")\n\nflags.DEFINE_integer(\"sample_num\", 1, \"The size of sample images [1]\")\nflags.DEFINE_integer(\"image_height\", 64, \"The size of image to use (will be center cropped) [64]\")\nflags.DEFINE_integer(\"image_width\", 64, \"The size of image to use (will be center cropped) [64]\")\nflags.DEFINE_integer(\"image_height_org\", 108, \"original image height\")\nflags.DEFINE_integer(\"image_width_org\", 108, \"original image width\")\nflags.DEFINE_integer(\"c_dim\", 3, \"The size of input image channel to use (will be center cropped) [3]\")\n\nflags.DEFINE_string(\"model_name\", \"/media/newton/data/models/gan/rwface_h_fm_gp\", \"model_name\")\nflags.DEFINE_string(\"data_dir\", \"/home/newton/source/gan_basic/face/data/face\", \"data dir path\")\n#flags.DEFINE_string(\"reverser_model_name\", \"rface_h_fm_again\", \"model_name\")\n\n# flags.DEFINE_string(\"model_name\", \"rface\", \"model_name\")\nflags.DEFINE_string(\"g_model_name\", \"/media/newton/data/models/gan/wface_h_fm_gp\", \"model_name\")\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"sample_name\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints [checkpoint]\")\n\nflags.DEFINE_float('gpu_memory_fraction', 0.3, 'gpu memory fraction.')\nflags.DEFINE_string('image_path', '', 'path to image.')\n\nflags.DEFINE_string('mode', 'distribution', 'running mode. <sampling, visualize>')\n\nflags.DEFINE_integer(\"db_size\", 50000, \"original image width\")\n\nflags.DEFINE_integer(\"batch_size\", 1, \"The size of batch images [64]\")\n\nclass DCGAN_SR():\n def __init__(self, model_name, checkpoint_dir):\n self.model_name = model_name\n self.checkpoint_dir = checkpoint_dir\n\n def step(self, samples):\n # reverser\n self.reverser = model.EncoderNoBN(FLAGS.sample_num, FLAGS.dc_dim, FLAGS.z_dim)\n self.R1, R1_logits, R1_inter = self.reverser.inference(samples)\n\n return R1_logits\n\n\ndef reverse(image_path, verbose=False):\n # input noize to generator\n z = tf.placeholder(tf.float32, [None, FLAGS.z_dim], name='z')\n\n # input image to reverser\n samples = tf.placeholder(\n tf.float32,\n [FLAGS.sample_num, FLAGS.image_height, FLAGS.image_width, FLAGS.c_dim],\n name='sample_inputs')\n\n # base model class\n dcgan = DCGAN_SR(FLAGS.model_name, FLAGS.checkpoint_dir)\n\n # generate vector\n vectors = dcgan.step(samples)\n\n # saver\n saver = tf.train.Saver()\n\n # create session\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options=gpu_options))\n sess.run(tf.global_variables_initializer())\n\n # load parameters\n model_dir = os.path.join(FLAGS.model_name, FLAGS.checkpoint_dir)\n ckpt = tf.train.get_checkpoint_state(model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n print(\"Model: %s\" % (ckpt.model_checkpoint_path))\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"No checkpoint file found\")\n exit()\n print(\"Model restored.\")\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n path, ext = os.path.splitext(os.path.basename(image_path))\n if ext == '.csv':\n images = load_csv(image_path)\n vectors_evals = []\n for i, image in enumerate(images):\n # temporaly\n if i == FLAGS.db_size:\n break\n print(\"No.%d %s\" % (i, image[0]))\n pil_img = Image.open(image[0], mode=\"r\")\n pil_img = pil_img.resize((FLAGS.image_height_org, FLAGS.image_width_org))\n img_array = np.asarray(pil_img)\n if img_array.size != FLAGS.image_height_org * FLAGS.image_width_org * FLAGS.c_dim:\n continue\n # img_array = np.reshape(img_array, (FLAGS.image_height_org, FLAGS.image_width_org))\n height_diff = FLAGS.image_height_org - FLAGS.image_height\n width_diff = FLAGS.image_width_org - FLAGS.image_width\n # crop\n img_array = img_array[int(height_diff/2):int(height_diff/2)+FLAGS.image_height, int(width_diff/2):int(width_diff/2)+FLAGS.image_width, :]\n # input for reverser image = tf.subtract(tf.div(image, 127.5), 1.0)\n img_array = img_array / 127.5 - 1.0\n img_array = img_array[None, ...]\n vectors_eval = sess.run(vectors, {samples: img_array})\n if verbose:\n print(vectors_eval)\n print(\"vector:\")\n print(vectors_eval[0])\n vectors_evals.append(vectors_eval[0])\n\n if FLAGS.mode == 'sampling':\n #features_obj = features.Features(images, vectors_evals)\n pass\n # TODO save features object\n elif FLAGS.mode == 'distribution':\n pass\n\n\n elif FLAGS.mode == 'visualize':\n # visualization\n print(\"Calculate NearestNeighbors:\")\n X = np.array(vectors_evals)\n print(X.shape)\n nbrs = NearestNeighbors(n_neighbors=2, algorithm='auto').fit(X)\n distances, indices = nbrs.kneighbors(X)\n print(\"10 ramdom samples\")\n sample_index= np.random.randint(FLAGS.db_size, size=10000)\n for i, index in enumerate(sample_index):\n nbrs_sample = indices[index]\n nbrs_distance = distances[index]\n sample_relate_image = images[nbrs_sample[0]][0]\n top_1_index = nbrs_sample[1]\n top_1_nbrs_distance = nbrs_distance[1]\n if top_1_nbrs_distance >= 3.5:\n continue\n\n nn_image = images[top_1_index][0]\n print(\"No.%d sample similarity.\" % i)\n print(sample_relate_image)\n print(nn_image)\n sample_relate_image_mat = mpimg.imread(sample_relate_image)\n\n nn_image_mat = mpimg.imread(nn_image)\n\n fig = plt.figure()\n a = fig.add_subplot(1, 2, 1)\n lum_img = sample_relate_image_mat\n imgplot = plt.imshow(lum_img)\n a.set_title('Sample')\n\n a = fig.add_subplot(1, 2, 2)\n lum2_img = nn_image_mat\n imgplot = plt.imshow(lum2_img)\n a.set_title('NearestNeighbors Top-1')\n a.set_xlabel(\"distance: %f\" % top_1_nbrs_distance)\n now = datetime.datetime.now()\n utime = now.strftime(\"%s\")\n out_dir = os.path.join(FLAGS.model_name, FLAGS.sample_dir)\n if not gfile.Exists(out_dir):\n gfile.MakeDirs(out_dir)\n out_path = os.path.join(out_dir, \"%d_%s.png\" % (i, utime))\n plt.savefig(out_path)\n\n else:\n pil_img = Image.open(image_path)\n pil_img = pil_img.resize((FLAGS.image_height_org, FLAGS.image_width_org))\n img_array = np.asarray(pil_img)\n #input for reverser image = tf.subtract(tf.div(image, 127.5), 1.0)\n # img_array = np.reshape(img_array, (FLAGS.image_height_org, FLAGS.image_width_org))\n height_diff = FLAGS.image_height_org - FLAGS.image_height\n width_diff = FLAGS.image_width_org - FLAGS.image_width\n # crop\n img_array = img_array[int(height_diff / 2):int(height_diff / 2) + FLAGS.image_height,\n int(width_diff / 2):int(width_diff / 2) + FLAGS.image_width, :]\n org_array = img_array\n\n img_array = img_array/127.5 - 1.0\n img_array = img_array[None, ...]\n vectors_eval = sess.run(vectors, {samples: img_array})\n\n input_vector = vectors_eval[0][None, ...]\n print(input_vector)\n\n sampling_from_vec.sampling(input_vector, org_image=org_array)\n\n # regenerate_sample = sess.run(regenerate, {z: input_vector})\n # out_dir = os.path.join(FLAGS.model_name, FLAGS.sample_dir)\n # now = datetime.datetime.now()\n # utime = now.strftime(\"%s\")\n # if not gfile.Exists(out_dir):\n # gfile.MakeDirs(out_dir)\n # filename = os.path.join(out_dir, \"%s.png\" % (utime))\n # with open(filename, 'wb') as f:\n # f.write(regenerate_sample)\n\n # fig = plt.figure()\n # a = fig.add_subplot(1, 2, 1)\n # lum_img = mpimg.imread(image_path)\n # imgplot = plt.imshow(lum_img)\n # a.set_title('Original')\n #\n # a = fig.add_subplot(1, 2, 2)\n # lum2_img = regenerate_sample\n # imgplot = plt.imshow(lum2_img)\n # a.set_title('Re Sampling')\n #\n # out_dir = os.path.join(FLAGS.model_name, FLAGS.sample_dir)\n # if not gfile.Exists(out_dir):\n # gfile.MakeDirs(out_dir)\n # now = datetime.datetime.now()\n # utime = now.strftime(\"%s\")\n # out_path = os.path.join(out_dir, \"%s.png\" % (utime))\n # plt.savefig(out_path)\n\n\n print(\"finish to predict.\")\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\n\ndef main(_):\n print(\"face DCGANs Reverse.\")\n if FLAGS.image_path == \"\" or FLAGS.image_path is None:\n print(\"Please set specific image_path. --image_path <path to image or csv file witch include path>\")\n return\n image_path = FLAGS.image_path\n reverse(image_path)\n\n\nif __name__ == '__main__':\n tf.app.run()"
] | [
[
"tensorflow.train.start_queue_runners",
"tensorflow.train.get_checkpoint_state",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.savefig",
"tensorflow.train.Saver",
"tensorflow.ConfigProto",
"numpy.random.randint",
"tensorflow.app.run",
"numpy.array",
"tensorflow.train.Coordinator",
"matplotlib.image.imread",
"matplotlib.pyplot.figure",
"tensorflow.placeholder",
"numpy.asarray",
"tensorflow.python.platform.gfile.MakeDirs",
"sklearn.neighbors.NearestNeighbors",
"tensorflow.GPUOptions",
"matplotlib.pyplot.imshow"
]
] |
Retrocamara42/reinforcement_learning_intro_exercices | [
"da3357801ecff91012185b105e8963704ac1316c"
] | [
"exercise_2_3.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nnumAcciones = 10\nepsilon = 0.1\nnum_pasos = 10000\nalfa = 0.1\nmu = [0,0,0,0,0,0,0,0,0,0]\nnum_exper = 100\n\ndef banditArm10(action):\n assert(len(mu)==numAcciones)\n sigma = 1\n s=np.random.normal(mu[action],sigma,1)\n return s\n\ndef actualizarMus():\n for i in range(len(mu)):\n randNum = (np.random.random() - 0.5)\n mu[i] = mu[i] + randNum\n\ndef resetearMus():\n for i in range(len(mu)):\n mu[i] = 0\n\ndef resetarQ():\n Q1 = []\n Q2 = []\n N = []\n\n for i in range(numAcciones):\n Q1.append(0.0)\n Q2.append(0.0)\n N.append(0.0)\n Q1 = np.array(Q1)\n Q2 = np.array(Q2)\n\n return Q1,Q2,N\n\nAverageRewards1=[]\nAverageRewards2=[]\n\nfor i in range(num_pasos+1):\n AverageRewards1.append(0.0)\n\nfor i in range(num_pasos+1):\n AverageRewards2.append(0.0)\n\n\nfor j in range(num_exper):\n Q1,Q2,N = resetarQ()\n for i in range(num_pasos):\n randNum = np.random.random()\n # Acción A - valor del 0 al 9\n Abest1 = np.random.choice(np.flatnonzero(Q1==Q1.max()))\n if(randNum > epsilon):\n A1 = np.random.choice(np.flatnonzero(Q1==Q1.max()))\n else:\n A1 = int(np.random.random()*10)\n if(A1==10):\n A1=9\n\n randNum = np.random.random()\n # Acción A - valor del 0 al 9\n Abest2 = np.random.choice(np.flatnonzero(Q2==Q2.max()))\n if(randNum > epsilon):\n A2 = np.random.choice(np.flatnonzero(Q2==Q2.max()))\n else:\n A2 = int(np.random.random()*10)\n if(A2==10):\n A2=9\n\n # Recompensa R\n R1 = banditArm10(A1)\n R2 = banditArm10(A2)\n\n actualizarMus()\n\n # Num. Recompensas N\n N[A1] = N[A1] + 1\n\n # Valor acción Q1\n Q1[A1] = Q1[A1] + (R1-Q1[A1])/float(N[A1])\n\n # Valor acción Q2\n Q2[A2] = Q2[A2] + (R2-Q2[A2])*alfa\n #AverageRewards.append(banditArm10(Abest)[0])\n AverageRewards1[i+1]=AverageRewards1[i+1]+banditArm10(A1)[0]\n AverageRewards2[i+1]=AverageRewards2[i+1]+banditArm10(A2)[0]\n #AverageRewards1.append(R1)\n #AverageRewards2.append(R2)\n\n print(mu)\n resetearMus()\n\n\n\n\nfor i in range(num_pasos+1):\n AverageRewards1[i] = AverageRewards1[i]/num_exper\n\nfor i in range(num_pasos+1):\n AverageRewards2[i] = AverageRewards2[i]/num_exper\n\ndf = pd.DataFrame(AverageRewards1,index=range(len(AverageRewards1)))\nlines=df.plot.line() #(ylim=[-1,2])\nplt.show()\n\ndf = pd.DataFrame(AverageRewards2,index=range(len(AverageRewards2)))\nlines=df.plot.line() #(ylim=[-1,2])\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.array",
"numpy.random.normal",
"numpy.random.random"
]
] |
ledbagholberton/Neural-Transfer | [
"b2996dd2e970ce498e3743b55f3add8786f49b22"
] | [
"8-main.py"
] | [
"#!/usr/bin/env python3\n\nimport matplotlib.image as mpimg\nimport numpy as np\nimport tensorflow as tf\n\nNST = __import__('8-neural_style').NST\n\n\nif __name__ == '__main__':\n style_image = mpimg.imread(\"starry_night.jpg\")\n content_image = mpimg.imread(\"golden_gate.jpg\")\n\n np.random.seed(0)\n nst = NST(style_image, content_image)\n generated_image = tf.contrib.eager.Variable(nst.content_image)\n grads, J_total, J_content, J_style = nst.compute_grads(generated_image)\n print(J_total)\n print(J_content)\n print(J_style)\n print(grads)\n"
] | [
[
"numpy.random.seed",
"matplotlib.image.imread",
"tensorflow.contrib.eager.Variable"
]
] |
wangzheallen/3DMPPE_POSENET_RELEASE | [
"9fad1f6a95041cc75e70664821a4851e79348745"
] | [
"common/utils/vis.py"
] | [
"import os\nimport cv2\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom config import cfg\n\ndef vis_keypoints(img, kps, kps_lines, kp_thresh=0.4, alpha=1):\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw the keypoints.\n for l in range(len(kps_lines)):\n i1 = kps_lines[l][0]\n i2 = kps_lines[l][1]\n p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32)\n p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32)\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n\ndef vis_3d_skeleton(kpt_3d, kpt_3d_vis, kps_lines, filename=None):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)]\n colors = [np.array((c[2], c[1], c[0])) for c in colors]\n\n for l in range(len(kps_lines)):\n i1 = kps_lines[l][0]\n i2 = kps_lines[l][1]\n x = np.array([kpt_3d[i1,0], kpt_3d[i2,0]])\n y = np.array([kpt_3d[i1,1], kpt_3d[i2,1]])\n z = np.array([kpt_3d[i1,2], kpt_3d[i2,2]])\n\n if kpt_3d_vis[i1,0] > 0 and kpt_3d_vis[i2,0] > 0:\n ax.plot(x, z, -y, c=colors[l], linewidth=2)\n if kpt_3d_vis[i1,0] > 0:\n ax.scatter(kpt_3d[i1,0], kpt_3d[i1,2], -kpt_3d[i1,1], c=colors[l], marker='o')\n if kpt_3d_vis[i2,0] > 0:\n ax.scatter(kpt_3d[i2,0], kpt_3d[i2,2], -kpt_3d[i2,1], c=colors[l], marker='o')\n\n x_r = np.array([0, cfg.input_shape[1]], dtype=np.float32)\n y_r = np.array([0, cfg.input_shape[0]], dtype=np.float32)\n z_r = np.array([0, 1], dtype=np.float32)\n \n if filename is None:\n ax.set_title('3D vis')\n else:\n ax.set_title(filename)\n\n ax.set_xlabel('X Label')\n ax.set_ylabel('Z Label')\n ax.set_zlabel('Y Label')\n #ax.set_xlim([0,cfg.input_shape[1]])\n #ax.set_ylim([0,1])\n #ax.set_zlim([-cfg.input_shape[0],0])\n ax.legend()\n\n plt.show()\n cv2.waitKey(0)\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.get_cmap",
"numpy.copy",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
]
] |
magnetron/pyleecan | [
"2a3338f4ab080ad6488b5ab8746c3fea1f36f177"
] | [
"Tests/Methods/Slot/test_SlotW29_meth.py"
] | [
"# -*- coding: utf-8 -*-\nfrom unittest import TestCase\n\nfrom ....Classes.Segment import Segment\n\nfrom ....Classes.SlotW29 import SlotW29\nfrom numpy import ndarray, arcsin, exp\nfrom ....Classes.LamSlot import LamSlot\nfrom ddt import ddt, data\nfrom ....Methods.Slot.Slot.comp_height import comp_height\nfrom ....Methods.Slot.Slot.comp_surface import comp_surface\nfrom ....Methods.Slot.Slot.comp_angle_opening import comp_angle_opening\nfrom ....Methods.Slot.SlotWind.comp_surface_wind import comp_surface_wind\n\n# For AlmostEqual\nDELTA = 1e-4\n\nslotW29_test = list()\n\n# Internal Slot\nlam = LamSlot(is_internal=True, Rext=0.1325)\nlam.slot = SlotW29(H0=1e-3, H1=1.5e-3, H2=30e-3, W0=12e-3, W1=14e-3, W2=20e-3)\nslotW29_test.append(\n {\n \"test_obj\": lam,\n \"S_exp\": 6.340874e-4,\n \"Ao\": 0.10004,\n \"Aw\": 0.174118,\n \"SW_exp\": 6e-4,\n \"H_exp\": 3.26359e-2,\n }\n)\n\n# External Slot\nlam = LamSlot(is_internal=False, Rint=0.1325)\nlam.slot = SlotW29(H0=1e-3, H1=1.5e-3, H2=30e-3, W0=12e-3, W1=14e-3, W2=20e-3)\nslotW29_test.append(\n {\n \"test_obj\": lam,\n \"S_exp\": 6.31912e-4,\n \"Ao\": 0.10004,\n \"Aw\": 0.133185,\n \"SW_exp\": 6e-4,\n \"H_exp\": 3.2667e-2,\n }\n)\n\n\n@ddt\nclass test_SlotW29_meth(TestCase):\n \"\"\"unittest for SlotW29 methods\"\"\"\n\n @data(*slotW29_test)\n def test_comp_surface(self, test_dict):\n \"\"\"Check that the computation of the surface is correct\n \"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n # Check that the analytical method returns the same result as the numerical one\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n @data(*slotW29_test)\n def test_comp_surface_wind(self, test_dict):\n \"\"\"Check that the computation of the winding surface is correct\n \"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n # Check that the analytical method returns the same result as the numerical one\n b = comp_surface_wind(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n @data(*slotW29_test)\n def test_comp_height(self, test_dict):\n \"\"\"Check that the computation of the height is correct\n \"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_height()\n\n a = result\n b = test_dict[\"H_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n # Check that the analytical method returns the same result as the numerical one\n b = comp_height(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n @data(*slotW29_test)\n def test_comp_angle_opening(self, test_dict):\n \"\"\"Check that the computation of the average opening angle iscorrect\n \"\"\"\n test_obj = test_dict[\"test_obj\"]\n a = test_obj.slot.comp_angle_opening()\n self.assertEqual(a, 2 * arcsin(test_obj.slot.W0 / (2 * 0.1325)))\n # Check that the analytical method returns the same result as the numerical one\n b = comp_angle_opening(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n @data(*slotW29_test)\n def test_comp_angle_wind_eq(self, test_dict):\n \"\"\"Check that the computation of the average angle is correct\n \"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n def test_build_geometry(self):\n \"\"\"check that curve_list is correct\"\"\"\n test_obj = LamSlot(is_internal=False, Rint=1)\n test_obj.slot = SlotW29(W0=0.2, H0=0.1, W1=0.4, H1=0.1, H2=0.6, W2=0.6)\n\n # Rbo=1\n Z1 = exp(1j * float(arcsin(0.1)))\n\n Z2 = Z1 + 0.1\n Z3 = Z1 + 0.1 + 0.1j\n Z4 = Z1 + 0.2 + 0.1j\n Z5 = Z1 + 0.2 + 0.2j\n Z6 = Z1 + 0.8 + 0.2j\n Z7 = Z1 + 0.8 - 0.4j\n Z8 = Z1 + 0.2 - 0.4j\n Z9 = Z1 + 0.2 - 0.3j\n Z10 = Z1 + 0.1 - 0.3j\n Z11 = Z1 + 0.1 - 0.2j\n Z12 = Z1 - 0.2j\n\n [Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, Z9, Z10, Z11, Z12] = [\n Z12,\n Z11,\n Z10,\n Z9,\n Z8,\n Z7,\n Z6,\n Z5,\n Z4,\n Z3,\n Z2,\n Z1,\n ]\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z2))\n curve_list.append(Segment(Z2, Z3))\n curve_list.append(Segment(Z3, Z4))\n curve_list.append(Segment(Z4, Z5))\n curve_list.append(Segment(Z5, Z6))\n curve_list.append(Segment(Z6, Z7))\n curve_list.append(Segment(Z7, Z8))\n curve_list.append(Segment(Z8, Z9))\n curve_list.append(Segment(Z9, Z10))\n curve_list.append(Segment(Z10, Z11))\n curve_list.append(Segment(Z11, Z12))\n\n result = test_obj.slot.build_geometry()\n self.assertEqual(len(result), len(curve_list))\n for i in range(0, len(result)):\n a = result[i].begin\n b = curve_list[i].begin\n self.assertAlmostEqual(\n (a - b) / a,\n 0,\n delta=DELTA,\n msg=\"Wrong build_geo (for begin point \"\n + str(i)\n + \" returned \"\n + str(a)\n + \", expected \"\n + str(b)\n + \")\",\n )\n\n a = result[i].end\n b = curve_list[i].end\n self.assertAlmostEqual(\n (a - b) / a,\n 0,\n delta=DELTA,\n msg=\"Wrong build_geo (for end point \"\n + str(i)\n + \" returned \"\n + str(a)\n + \", expected \"\n + str(b)\n + \")\",\n )\n"
] | [
[
"numpy.arcsin"
]
] |
dalakada/TwiCSv2 | [
"40672a99a201f6e2aab9dd085e1f4a29e8253f3b",
"40672a99a201f6e2aab9dd085e1f4a29e8253f3b"
] | [
"stats_eddie/SVM.py",
"production_code/phase2_Trie_baseline_reintroduction_effectiveness.py"
] | [
"\n# coding: utf-8\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom scipy import stats\n\nclass SVM1():\n def __init__(self,train):\n\n #train the algorithm once\n self.train = pd.read_csv(train,delimiter=\",\",sep='\\s*,\\s*')\n\n self.train['normalized_cap']=self.train['cap']/self.train['cumulative']\n self.train['normalized_capnormalized_substring-cap']=self.train['substring-cap']/self.train['cumulative']\n self.train['normalized_s-o-sCap']=self.train['s-o-sCap']/self.train['cumulative']\n self.train['normalized_all-cap']=self.train['all-cap']/self.train['cumulative']\n self.train['normalized_non-cap']=self.train['non-cap']/self.train['cumulative']\n self.train['normalized_non-discriminative']=self.train['non-discriminative']/self.train['cumulative']\n\n\n '''self.cols = ['length','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative',\n 'normalized_cap',\n 'normalized_capnormalized_substring-cap',\n 'normalized_s-o-sCap',\n 'normalized_all-cap',\n 'normalized_non-cap',\n 'normalized_non-discriminative'\n ]'''\n self.cols = ['length','normalized_cap',\n 'normalized_capnormalized_substring-cap',\n 'normalized_s-o-sCap',\n 'normalized_all-cap',\n 'normalized_non-cap',\n 'normalized_non-discriminative'\n ]\n self.colsRes = ['class']\n\n # self.trainArr = self.train.as_matrix(self.cols) #training array\n # #print(self.trainArr)\n # self.trainRes = self.train.as_matrix(self.colsRes) # training results\n\n self.trainArr = self.train[self.cols]\n self.trainRes = self.train[self.colsRes].values\n \n self.clf = svm.SVC(probability=True)\n self.clf.fit(self.trainArr, self.trainRes) # fit the data to the algorithm\n\n \n\n\n\n def run(self,x_test,z_score_threshold):\n x_test['normalized_cap']=x_test['cap']/x_test['cumulative']\n x_test['normalized_capnormalized_substring-cap']=x_test['substring-cap']/x_test['cumulative']\n x_test['normalized_s-o-sCap']=x_test['s-o-sCap']/x_test['cumulative']\n x_test['normalized_all-cap']=x_test['all-cap']/x_test['cumulative']\n x_test['normalized_non-cap']=x_test['non-cap']/x_test['cumulative']\n x_test['normalized_non-discriminative']=x_test['non-discriminative']/x_test['cumulative']\n\n\n\n\n #setting features\n # testArr= x_test.as_matrix(self.cols)\n # #print(testArr)\n # testRes = x_test.as_matrix(self.colsRes)\n\n\n # In[ ]:\n\n testArr = x_test[self.cols]\n\n\n # In[65]:\n\n #clf = svm.SVC(probability=True)\n #clf.fit(trainArr, trainRes) # fit the data to the algorithm\n\n\n # In[66]:\n\n pred_prob=self.clf.predict_proba(testArr)\n\n\n # In[67]:\n\n prob_first_column=[]\n for i in pred_prob:\n prob_first_column.append(i[1])\n \n\n\n # In[68]:\n\n #print(x_test_filtered.index.size,len(prob_first_column))\n\n\n # In[69]:\n #print(pred_prob) \n x_test['probability']=prob_first_column\n\n\n # In[70]:\n\n #type(x_test)\n\n\n # In[46]:\n\n #type(x_test)\n\n\n # In[71]:\n\n #x_test_filtered.to_csv(\"results3.csv\", sep=',', encoding='utf-8')\n\n\n # In[48]:\n\n return x_test\n\n\n'''\n\n# In[109]:\n\nali.to_csv(\"Classifier_Results.csv\", sep=',', encoding='utf-8')\n\n\n# In[68]:\n\npred_class=clf.predict(testArr)\nprint(pred_class)\n\n\n# In[69]:\n\ntestRes\n\n\n# In[10]:\n\ncount=0\n\n\n# In[11]:\n\nfor i in range(len(pred_class)):\n if pred_class[i]==testRes[i]:\n count+=1\n\n\n# In[12]:\n\ncount\n\n\n# In[13]:\n\nlen(pred_class)\n\n\n# In[14]:\n\nfloat(count)/len(pred_class)\n\n\n# In[22]:\n\nprob_holder=[]\nfor idx, cl in enumerate(pred_prob):\n prob_holder.append(pred_prob[idx][1])\n#x_test.insert(len(x_test.columns),'pred_prob',pred_prob[1])\n#print (pred_prob[,1])\n#x_test.insert(1, 'bar', df['one'])\n\n\n# In[23]:\n\nx_test.to_csv(\"svm_prob.csv\", sep=';', encoding='utf-8')\n\n\n\n# In[24]:\n\nrandom_forest_logistic=pd.read_csv(\"random_forest_logistic.csv\",delimiter=\";\")\n\n\n# In[25]:\n\nrandom_forest_logistic\n\n\n# In[26]:\n\nprob_holder=[]\nfor idx, cl in enumerate(pred_prob):\n prob_holder.append(pred_prob[idx][1])\n#x_test.insert(len(x_test.columns),'pred_prob',pred_prob[1])\n#print (pred_prob[,1])\n#x_test.insert(1, 'bar', df['one'])\n\n\n# In[27]:\n\nrandom_forest_logistic.insert(len(random_forest.columns),'svm_with_prob',prob_holder)\nprint random_forest_logistic\n\n\n# In[29]:\n\nrandom_forest_logistic.to_csv(\"random_forest_logistic_svm_FINAL.csv\", sep=';', encoding='utf-8')\n\n\n# In[34]:\n\nclass_x=0\nTP=0\nTN=0\nFP=0\nFN=0\n\nfor idx, cl in enumerate(pred_prob):\n #print pred_prob[idx][1]\n #if pred_prob[idx][1]>0.6:\n # class_x=1\n #elif pred_prob[idx][1]<=0.6:\n # class_x=0\n class_x = pred_class[idx] \n\n if (class_x ==testRes[idx]) and class_x==1 :\n TP+=1\n elif (class_x ==testRes[idx]) and class_x==0 :\n TN+=1\n if class_x == 1 and testRes[idx]==0:\n FP+=1\n if class_x == 0 and testRes[idx]==1:\n FN+=1\n\n\n# In[35]:\n\nprint TP,TN,FP,FN\n\n\n# In[ ]:\n\n\n\n'''",
"# coding: utf-8\nfrom nltk.corpus import stopwords\nimport pandas as pd\nimport NE_candidate_module as ne\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport string\nimport copy\nimport numpy\nimport math\nfrom itertools import groupby\nfrom operator import itemgetter\nfrom collections import Iterable, OrderedDict\nfrom scipy import stats\nimport emoji\nimport SVM as svm\nimport statistics\nimport pandas as pd\nimport time\nimport datetime\nimport trie as trie\nimport re\nimport pickle\nimport itertools\nfrom scipy import spatial\n\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\nfrom sklearn.cluster import KMeans, MeanShift\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\ncachedStopWords = stopwords.words(\"english\")\ntempList=[\"i\",\"and\",\"or\",\"other\",\"another\",\"across\",\"unlike\",\"anytime\",\"were\",\"you\",\"then\",\"still\",\"till\",\"nor\",\"perhaps\",\"otherwise\",\"until\",\"sometimes\",\"sometime\",\"seem\",\"cannot\",\"seems\",\"because\",\"can\",\"like\",\"into\",\"able\",\"unable\",\"either\",\"neither\",\"if\",\"we\",\"it\",\"else\",\"elsewhere\",\"how\",\"not\",\"what\",\"who\",\"when\",\"where\",\"who's\",\"who’s\",\"let\",\"today\",\"tomorrow\",\"tonight\",\"let's\",\"let’s\",\"lets\",\"know\",\"make\",\"oh\",\"via\",\"i\",\"yet\",\"must\",\"mustnt\",\"mustn't\",\"mustn’t\",\"i'll\",\"i’ll\",\"you'll\",\"you’ll\",\"we'll\",\"we’ll\",\"done\",\"doesnt\",\"doesn't\",\"doesn’t\",\"dont\",\"don't\",\"don’t\",\"did\",\"didnt\",\"didn't\",\"didn’t\",\"much\",\"without\",\"could\",\"couldn't\",\"couldn’t\",\"would\",\"wouldn't\",\"wouldn’t\",\"should\",\"shouldn't\",\"souldn’t\",\"shall\",\"isn't\",\"isn’t\",\"hasn't\",\"hasn’t\",\"wasn't\",\"wasn’t\",\"also\",\"let's\",\"let’s\",\"let\",\"well\",\"just\",\"everyone\",\"anyone\",\"noone\",\"none\",\"someone\",\"theres\",\"there's\",\"there’s\",\"everybody\",\"nobody\",\"somebody\",\"anything\",\"else\",\"elsewhere\",\"something\",\"nothing\",\"everything\",\"i'd\",\"i’d\",\"i’m\",\"won't\",\"won’t\",\"i’ve\",\"i've\",\"they're\",\"they’re\",\"we’re\",\"we're\",\"we'll\",\"we’ll\",\"we’ve\",\"we've\",\"they’ve\",\"they've\",\"they’d\",\"they'd\",\"they’ll\",\"they'll\",\"again\",\"you're\",\"you’re\",\"you've\",\"you’ve\",\"thats\",\"that's\",'that’s','here’s',\"here's\",\"what's\",\"what’s\",\"i’m\",\"i'm\",\"a\",\"so\",\"except\",\"arn't\",\"aren't\",\"arent\",\"this\",\"when\",\"it\",\"it’s\",\"it's\",\"he's\",\"she's\",\"she'd\",\"he'd\",\"he'll\",\"she'll\",\"she’ll\",\"many\",\"can't\",\"cant\",\"can’t\",\"even\",\"yes\",\"no\",\"these\",\"here\",\"there\",\"to\",\"maybe\",\"<hashtag>\",\"<hashtag>.\",\"ever\",\"every\",\"never\",\"there's\",\"there’s\",\"whenever\",\"wherever\",\"however\",\"whatever\",\"always\",\"although\"]\nfor item in tempList:\n if item not in cachedStopWords:\n cachedStopWords.append(item)\ncachedStopWords.remove(\"don\")\ncachedStopWords.remove(\"your\")\ncachedStopWords.remove(\"up\")\ncachedTitles = [\"mr.\",\"mr\",\"mrs.\",\"mrs\",\"miss\",\"ms\",\"sen.\",\"dr\",\"dr.\",\"prof.\",\"president\",\"congressman\"]\nprep_list=[\"in\",\"at\",\"of\",\"on\",\"&;\",\"v.\"] #includes common conjunction as well\narticle_list=[\"a\",\"an\",\"the\"]\nconjoiner=[\"de\"]\nday_list=[\"sunday\",\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\",\"saturday\",\"mon\",\"tues\",\"wed\",\"thurs\",\"fri\",\"sat\",\"sun\"]\nmonth_list=[\"january\",\"february\",\"march\",\"april\",\"may\",\"june\",\"july\",\"august\",\"september\",\"october\",\"november\",\"december\",\"jan\",\"feb\",\"mar\",\"apr\",\"may\",\"jun\",\"jul\",\"aug\",\"sep\",\"oct\",\"nov\",\"dec\"]\nchat_word_list=[\"nope\",\"gee\",\"hmm\",\"bye\",\"please\",\"4get\",\"ooh\",\"reppin\",\"idk\",\"oops\",\"yup\",\"stfu\",\"uhh\",\"2b\",\"dear\",\"yay\",\"btw\",\"ahhh\",\"b4\",\"ugh\",\"ty\",\"cuz\",\"coz\",\"sorry\",\"yea\",\"asap\",\"ur\",\"bs\",\"rt\",\"lmfao\",\"lfmao\",\"slfmao\",\"u\",\"r\",\"nah\",\"umm\",\"ummm\",\"thank\",\"thanks\",\"congrats\",\"whoa\",\"rofl\",\"ha\",\"ok\",\"okay\",\"hey\",\"hi\",\"huh\",\"ya\",\"yep\",\"yeah\",\"fyi\",\"duh\",\"damn\",\"lol\",\"omg\",\"congratulations\",\"fucking\",\"fuck\",\"f*ck\",\"wtf\",\"wth\",\"aka\",\"wtaf\",\"xoxo\",\"rofl\",\"imo\",\"wow\",\"fck\",\"haha\",\"hehe\",\"hoho\"]\nstring.punctuation=string.punctuation+'…‘’'\n\n\n\nclass EntityResolver ():\n\n\n def executor(self,max_batch_value,TweetBase,CTrie,phase2stopwordList,z_score_threshold,reintroduction_threshold,raw_tweets_for_others):\n # def executor(self,TweetBase,CTrie,phase2stopwordList,z_score_threshold,reintroduction_threshold,raw_tweets_for_others)\n\n\n # SET CB\n # print(phase2stopwordList)\n candidate_featureBase_DF,data_frame_holder,phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag,candidates_to_annotate,converted_candidates=self.set_cb(TweetBase,CTrie,phase2stopwordList,z_score_threshold,reintroduction_threshold)\n \n candidate_featureBase_DF.to_csv(\"candidate_base_new.csv\", sep=',', encoding='utf-8')\n print(candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"c.j. mccollum\"])\n # print(candidate_featureBase_DF[candidate_featureBase_DF.candidate=='knows'])\n # print(candidate_featureBase_DF[candidate_featureBase_DF.candidate=='democrat'])\n\n # SET TF \n untrashed_tweets=self.set_tf(data_frame_holder,\n candidate_featureBase_DF,\n phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag)\n\n # print('untrashed_tweets: ', len(untrashed_tweets))\n # untrashed_tweets.to_csv(\"phase2output.csv\", sep=',', encoding='utf-8')\n\n\n\n ######## EXPERIMENT FUNCTION STARTS #################################\n ########\n ########\n #input: tf, candidate_featureBase_DF \n #output: incomplete_tweets[candidates_with_label], [good_candidates], [bad_candidates]\n self.set_column_for_candidates_in_incomplete_tweets(candidate_featureBase_DF,untrashed_tweets)\n ########\n ########\n # tp,fp,f1,accuracy calculations.\n # input: tf .[good_candidates],[annotation]\n # output : incomplete tweets.['tp'],['fp'],[f1]\n # self.calculate_tp_fp_f1(z_score_threshold,untrashed_tweets)\n ########\n ########\n #SAVE INCOMING TWEETS FOR ANNOTATION FOR OTHERS\n # self.raw_tweets_for_others=pd.concat([self.raw_tweets_for_others,raw_tweets_for_others ])\n ########\n ########\n ########\n # tp,fp,f1,accuracy calculations.\n # input: tf .[good_candidates],[annotation]\n # output : incomplete tweets.['tp'],['fp'],[f1]\n # self.calculate_tp_fp_f1_for_others(self.raw_tweets_for_others)\n ########\n ########\n ######## EXPERIMENT FUNCTION ENDS ###################################\n\n\n\n\n # DROP TF\n just_converted_tweets=self.get_complete_tf(untrashed_tweets)\n #incomplete tweets at the end of current batch\n incomplete_tweets=self.get_incomplete_tf(untrashed_tweets)\n\n #all incomplete_tweets---> incomplete_tweets at the end of current batch + incomplete_tweets not reintroduced\n # self.incomplete_tweets=incomplete_tweets #without reintroduction--- when everything is reintroduced, just incomplete_tweets\n self.incomplete_tweets=pd.concat([incomplete_tweets,self.not_reintroduced],ignore_index=True)\n\n\n\n #recording tp, fp , f1\n #self.accuracy_tuples_prev_batch.append((just_converted_tweets.tp.sum(), just_converted_tweets.total_mention.sum(),just_converted_tweets.fp.sum(),just_converted_tweets.fn.sum()))\n\n\n #operations for getting ready for next batch.\n # self.incomplete_tweets.drop('2nd Iteration Candidates', axis=1, inplace=True)\n self.incomplete_tweets.drop(['2nd Iteration Candidates','2nd Iteration Candidates Unnormalized'], axis=1, inplace=True)\n self.counter=self.counter+1\n\n self.aggregator_incomplete_tweets= self.aggregator_incomplete_tweets.append(self.incomplete_tweets)\n self.just_converted_tweets=self.just_converted_tweets.append(just_converted_tweets)\n\n if(self.counter==(max_batch_value+1)):\n # self.just_converted_tweets.drop('2nd Iteration Candidates', axis=1, inplace=True)\n self.just_converted_tweets.drop(['2nd Iteration Candidates','2nd Iteration Candidates Unnormalized'], axis=1, inplace=True)\n\n print('completed tweets: ', len(self.just_converted_tweets),'incomplete tweets: ', len(self.incomplete_tweets))\n \n print(len(list(self.just_converted_tweets.columns.values)))\n print(len(list(self.incomplete_tweets.columns.values)))\n\n combined_frame_list=[self.just_converted_tweets, self.incomplete_tweets]\n complete_tweet_dataframe = pd.concat(combined_frame_list)\n\n print('final tally: ', (len(self.just_converted_tweets)+len(self.incomplete_tweets)), len(complete_tweet_dataframe))\n\n # print(sorted(complete_tweet_dataframe['tweetID'].astype(int).unique()))\n # lst=list(range(38911))\n # for elem in lst:\n # if(elem not in complete_tweet_dataframe['tweetID'].astype(int).unique().tolist()):\n # print(elem)\n # print(list(filter(lambda elem: elem not in complete_tweet_dataframe['tweetID'].unique(), lst)))\n\n #to groupby tweetID and get one tuple per tweetID\n complete_tweet_dataframe_grouped_df= (complete_tweet_dataframe.groupby('tweetID', as_index=False).aggregate(lambda x: x.tolist()))\n complete_tweet_dataframe_grouped_df['tweetID']=complete_tweet_dataframe_grouped_df['tweetID'].astype(int)\n self.complete_tweet_dataframe_grouped_df_sorted=(complete_tweet_dataframe_grouped_df.sort_values(by='tweetID', ascending=True)).reset_index(drop=True)\n\n print('524: ',self.complete_tweet_dataframe_grouped_df_sorted[(self.complete_tweet_dataframe_grouped_df_sorted.tweetID==524)]['output_mentions'])\n\n print(list(self.complete_tweet_dataframe_grouped_df_sorted.columns.values))\n # print(self.complete_tweet_dataframe_grouped_df_sorted.head(5))\n # print(len(self.complete_tweet_dataframe_grouped_df_sorted))\n\n\n #self.aggregator_incomplete_tweets.to_csv(\"all_incompletes.csv\", sep=',', encoding='utf-8')\n\n\n #self.just_converted_tweets.to_csv(\"all_converteds.csv\", sep=',', encoding='utf-8')\n #self.incomplete_tweets.to_csv(\"incomplete_for_last_batch.csv\", sep=',', encoding='utf-8')\n return candidate_featureBase_DF, converted_candidates, self.complete_tweet_dataframe_grouped_df_sorted\n\n\n\n def __init__(self):\n self.counter=0\n self.decay_factor=2**(-1/2)\n self.decay_base_staggering=2\n\n self.my_classifier= svm.SVM1('training.csv')\n self.complete_tweet_dataframe_grouped_df_sorted=pd.DataFrame([], columns=['tweetID', 'TweetSentence', 'ambiguous_candidates', 'annotation', 'candidates_with_label', 'completeness', 'current_minus_entry', 'entry_batch', 'hashtags', 'index', 'only_good_candidates', 'output_mentions', 'phase1Candidates', 'sentID', 'stanford_candidates', 'user'])\n\n\n def calculate_tp_fp_f1_generic(self,raw_tweets_for_others,state_of_art):\n\n\n unique_tweetIDs=raw_tweets_for_others['tweetID'].unique().tolist()\n\n column_annot_holder=[]\n column_candidates_holder=[]\n column_tweet_text_holder=[]\n\n for unique_tweetID in unique_tweetIDs:\n #TO DO : cast to int tweetID column of dataframe.\n group_by_tweet_id_df=raw_tweets_for_others[raw_tweets_for_others.tweetID==unique_tweetID]\n\n tweet_level_annot=[]\n tweet_level_candidates=[]\n tweet_level_tweets=\"\"\n for index, row in group_by_tweet_id_df.iterrows():\n \n #annotation\n annot=list(row['annotation'])\n tweet_level_annot=tweet_level_annot+annot;\n\n #candidates from other systems\n tweet_level_candidates=list(row[state_of_art])\n\n #merging sentences into one tweet.\n sentence_tweet=str(row[\"TweetSentence\"])\n tweet_level_tweets=sentence_tweet+\" \"+tweet_level_tweets\n\n #getting unique candidates.\n tweet_level_candidates_set = set(tweet_level_candidates)\n tweet_level_candidates = list(tweet_level_candidates_set)\n\n #getting unique annotations.\n tweet_level_annot_set = set(tweet_level_annot)\n tweet_level_annot = list(tweet_level_annot_set)\n\n\n column_annot_holder.append(tweet_level_annot)\n column_candidates_holder.append(tweet_level_candidates)\n column_tweet_text_holder.append(tweet_level_tweets)\n\n ## for annotation.\n cum_holder_annot=[]\n for rows_annot in column_annot_holder:\n cum_holder_annot.extend(rows_annot)\n\n\n cum_holder_annot_set = set(cum_holder_annot)\n cum_holder_annot = list(cum_holder_annot_set)\n\n\n ## for candidates.\n cum_holder_candidates=[]\n for rows_candidates in column_candidates_holder:\n cum_holder_candidates.extend(rows_candidates)\n\n\n cum_holder_candidates_set = set(cum_holder_candidates)\n cum_holder_candidates = list(cum_holder_candidates_set)\n\n # tweet_ids_df=pd.DataFrame(unique_tweetIDs,column_annot_holder,column_candidates_holder, columns=['tweetID','column_annot_holder','column_candidates_holder'])\n tweet_ids_df = pd.DataFrame({'tweetid': unique_tweetIDs,'tweet_text':column_tweet_text_holder, 'annotations': column_annot_holder,\"candidates_holder\": column_candidates_holder})\n \n # tweet_ids_df=pd.DataFrame(unique_tweetIDs,column_annot_holder,column_candidates_holder, columns=['tweetID','column_annot_holder','column_candidates_holder'])\n\n good_candidates = cum_holder_candidates\n\n annotations= cum_holder_annot\n\n true_positive_count=0\n false_positive_count=0\n false_negative_count=0\n ambigious_not_in_annotation=0\n\n true_positive_holder = []\n false_negative_holder=[]\n false_positive_holder=[]\n total_mention_holder=[]\n ambigious_not_in_annotation_holder=[]\n f_measure_holder=[]\n\n\n\n total_mentions=0\n total_mentions+=len(annotations)\n #print(idx,val,true_positives_candidates[idx])\n false_negative_line= [val2 for val2 in annotations if val2 not in good_candidates]\n #print(idx,false_negative_line)\n true_positive_line=[val2 for val2 in annotations if val2 in good_candidates]\n\n # ambigious_not_in_annotation_line= [val2 for val2 in ambiguous_candidates[idx] if val2 not in val]\n\n false_positive_line=[val2 for val2 in good_candidates if val2 not in annotations]\n #print(idx,false_positive_line)\n\n \n # print(idx,true_positive_line,'ground truth: ',annotations[idx],'our system: ',good_candidates[idx])\n \n #print(idx+1,'True positive:',true_positive_line)\n true_positive_count+=len(true_positive_line)\n #print(idx+1,'False positive:',false_positive_line)\n false_positive_count+=len(false_positive_line)\n #print(idx+1,'False negative:',false_negative_line)\n false_negative_count+=len(false_negative_line)\n #print(' ')\n\n true_positive_holder.append(len(true_positive_line))\n false_negative_holder.append(len(false_negative_line))\n false_positive_holder.append(len(false_positive_line))\n # ambigious_not_in_annotation_holder.append(len(ambigious_not_in_annotation_line))\n total_mention_holder.append(len(annotations))\n\n\n\n #print(total_mentions, true_positive_count,false_positive_count,false_negative_count)\n # print(false_positive_count)\n # print(false_negative_count)\n precision=(true_positive_count)/(true_positive_count+false_positive_count)\n recall=(true_positive_count)/(true_positive_count+false_negative_count)\n f_measure=2*(precision*recall)/(precision+recall)\n\n if(state_of_art==\"ritter_candidates\"):\n self.accuracy_vals_ritter.append((f_measure,precision,recall)) \n if(state_of_art==\"stanford_candidates\"):\n self.accuracy_vals_stanford.append((f_measure,precision,recall))\n if(state_of_art==\"calai_candidates\"):\n self.accuracy_vals_opencalai.append((f_measure,precision,recall)) \n # print('z_score:', z_score_threshold , 'precision: ',precision,'recall: ',recall,'f measure: ',f_measure)\n # print('trupe positive: ',tp_count, 'false positive: ',fp_count,'false negative: ', fn_count,'total mentions: ', tm_count)\n\n # tweet_ids_df[\"tp\"+state_of_art]=true_positive_holder\n # tweet_ids_df[\"fn\"+state_of_art]=false_negative_holder\n # tweet_ids_df['fp'+state_of_art]= false_positive_holder\n \n # if(state_of_art==\"ritter_candidates\"):\n # tweet_ids_df.to_csv(\"ritter_results.csv\", sep=',', encoding='utf-8')\n\n # if(state_of_art==\"stanford_candidates\"):\n # tweet_ids_df.to_csv(\"stanford_results.csv\", sep=',', encoding='utf-8')\n\n\n\n def calculate_tp_fp_f1_for_others(self,raw_tweets_for_others):\n\n opencalai=\"calai_candidates\"\n stanford=\"stanford_candidates\"\n ritter=\"ritter_candidates\"\n\n self.calculate_tp_fp_f1_generic(raw_tweets_for_others,opencalai)\n self.calculate_tp_fp_f1_generic(raw_tweets_for_others,stanford)\n self.calculate_tp_fp_f1_generic(raw_tweets_for_others,ritter)\n\n #################################\n #input candidate_feature_Base\n #output candidate_feature_Base with [\"Z_score\"], [\"probability\"],[\"class\"]\n # no side effect\n #################################\n def classify_candidate_base(self,z_score_threshold,candidate_featureBase_DF):\n\n # #filtering test set based on z_score\n mert1=candidate_featureBase_DF['cumulative'].as_matrix()\n #frequency_array = np.array(list(map(lambda val: val[0], sortedCandidateDB.values())))\n zscore_array1=stats.zscore(mert1)\n\n candidate_featureBase_DF['Z_ScoreUnweighted']=zscore_array1\n z_score_threshold=candidate_featureBase_DF[candidate_featureBase_DF['cumulative']==2].Z_ScoreUnweighted.tolist()[0]\n print(z_score_threshold)\n #candidate_featureBase_DF.to_csv(\"cf_new_with_z_score.csv\", sep=',', encoding='utf-8')\n\n #multi-word infrequent candidates ---> to be used for recall correction\n infrequent_candidates=candidate_featureBase_DF[(candidate_featureBase_DF['Z_ScoreUnweighted'] < z_score_threshold) & (candidate_featureBase_DF.length>1)].candidate.tolist()\n candidate_featureBase_DF = candidate_featureBase_DF[candidate_featureBase_DF['Z_ScoreUnweighted'] >= z_score_threshold]\n\n\n\n #returns updated candidate_featureBase_DF with [\"Z_score\"], [\"probability\"],[\"class\"] attributes.\n return (self.my_classifier.run(candidate_featureBase_DF,z_score_threshold),infrequent_candidates)\n\n\n # recall_correction\n def set_partition_dict(self,candidate_featureBase_DF,infrequent_candidates):\n\n #print(list(self.partition_dict.keys()))\n ambiguous_bad_candidates=candidate_featureBase_DF[(((candidate_featureBase_DF.status==\"a\")|(candidate_featureBase_DF.status==\"b\"))&(candidate_featureBase_DF.length.astype(int)>1))]\n good_candidates=candidate_featureBase_DF[(candidate_featureBase_DF.status==\"g\")].candidate.tolist()\n flag1=False\n flag2=False\n if(len(ambiguous_bad_candidates)>0):\n ambiguous_bad_candidates['max_column'] =ambiguous_bad_candidates[['cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative']].idxmax(axis=1) \n ambiguous_bad_candidates_wFilter= ambiguous_bad_candidates[ambiguous_bad_candidates.max_column=='substring-cap']\n\n #good_candidates=candidate_featureBase_DF[(candidate_featureBase_DF.status==\"g\")].candidate.tolist()\n #print(ambiguous_bad_candidates_wFilter.candidate.tolist())\n\n for candidate in ambiguous_bad_candidates_wFilter.candidate.tolist():\n #print(candidate)\n if candidate not in self.partition_dict.keys():\n\n substring_candidates=self.get_substring_candidates(candidate.split(),good_candidates)\n if(len(substring_candidates)>0):\n if(candidate==\"science guy on the john oliver\"):\n print(candidate,substring_candidates)\n self.partition_dict[candidate]=substring_candidates\n\n flag1= True\n if(len(infrequent_candidates)>0):\n #print(len(ambiguous_bad_candidates_wFilter.candidate.tolist()))\n\n for candidate in infrequent_candidates:\n #print(candidate)\n if candidate not in self.partition_dict.keys():\n substring_candidates=self.get_substring_candidates(candidate.split(),good_candidates)\n if(len(substring_candidates)>0):\n # if(candidate==\"bill de blasio's 2020\"):\n # print(candidate,substring_candidates)\n self.partition_dict[candidate]=substring_candidates\n flag2= True\n return (flag1|flag2)\n\n\n def get_aggregate_sketch(self,candidate_featureBase):\n candidate_count=0\n sketch_vector=[0.0,0.0,0.0,0.0,0.0,0.0]\n for index, row in candidate_featureBase.iterrows():\n normalized_cap=row['cap']/row['cumulative']\n sketch_vector[0]+=normalized_cap\n normalized_capnormalized_substring_cap=row['substring-cap']/row['cumulative']\n sketch_vector[1]+=normalized_capnormalized_substring_cap\n normalized_sosCap=row['s-o-sCap']/row['cumulative']\n sketch_vector[2]+=normalized_sosCap\n normalized_allCap=row['all-cap']/row['cumulative']\n sketch_vector[3]+=normalized_allCap\n normalized_non_cap=row['non-cap']/row['cumulative']\n sketch_vector[4]+=normalized_non_cap\n normalized_non_discriminative=row['non-discriminative']/row['cumulative']\n sketch_vector[5]+=normalized_non_discriminative\n candidate_count+=1\n sketch_vector=list(map(lambda elem: elem/candidate_count, sketch_vector))\n # print(\"aggregated sketch:\", sketch_vector)\n return sketch_vector\n\n #MULTIPLE SKETCHES CLUSTERING\n def get_multiple_aggregate_sketches(self, function_call_label, metric, candidate_featureBase):\n sketch_vectors=[]\n candidate_count_arr=[]\n x=candidate_featureBase[['normalized_cap','normalized_capnormalized_substring-cap','normalized_s-o-sCap','normalized_all-cap','normalized_non-cap','normalized_non-discriminative']]\n print(function_call_label,x.shape)\n\n #insert code for silhouette plot here\n\n #considering 2 sub clusters for now, can change this into dynamic selection\n if(function_call_label=='For non-entities: '):\n n_clusters=2\n else:\n n_clusters=2\n if(function_call_label=='For ambiguous: '):\n if(len(x)<3):\n return x\n \n\n clusterer = KMeans(n_clusters, random_state=10)\n cluster_labels = clusterer.fit_predict(x)\n silhouette_avg = silhouette_score(x, cluster_labels, metric=metric) #with metric= euclidean\n # silhouette_avg = silhouette_score(x, cluster_labels, metric='cosine') #with metric= cosine\n sketch_vectors = clusterer.cluster_centers_\n\n # print(\"For n_clusters =\", n_clusters, \"The average silhouette_score is :\", silhouette_avg)\n\n # for i in range(n_clusters):\n # sketch_vectors.append([0.0,0.0,0.0,0.0,0.0,0.0])\n # candidate_count_arr.append(0)\n\n # index=0\n # for row_index, row in candidate_featureBase.iterrows():\n # # print(index,cluster_labels[index])\n # sketch_vectors[cluster_labels[index]][0]+= row['normalized_cap']\n # sketch_vectors[cluster_labels[index]][1]+= row['normalized_capnormalized_substring-cap']\n # sketch_vectors[cluster_labels[index]][2]+= row['normalized_s-o-sCap']\n # sketch_vectors[cluster_labels[index]][3]+= row['normalized_all-cap']\n # sketch_vectors[cluster_labels[index]][4]+= row['normalized_non-cap']\n # sketch_vectors[cluster_labels[index]][5]+= row['normalized_non-discriminative']\n # candidate_count_arr[cluster_labels[index]]+=1\n # index+=1\n\n\n # for i in range(n_clusters):\n # sketch_vectors[i]=list(map(lambda elem: elem/candidate_count_arr[i], sketch_vectors[i]))\n # print(sketch_vectors[i])\n\n\n # #trying alternate clustering options\n # # print(function_call_label, metric)\n # x=candidate_featureBase[['normalized_cap','normalized_capnormalized_substring-cap','normalized_s-o-sCap','normalized_all-cap','normalized_non-cap','normalized_non-discriminative']]\n # clusterer = MeanShift()\n # cluster_labels = clusterer.fit_predict(x)\n # sketch_vectors = clusterer.cluster_centers_\n\n # print(sketch_vectors)\n\n return sketch_vectors\n\n\n #SINGLE SKETCH CLUSTERING--- COSINE \n\n #single entity/non-entity sketch; minimal cosine distance\n def get_cosine_distance(self, ambiguous_candidate_records,entity_sketch,non_entity_sketch,reintroduction_threshold):\n cosine_distance_dict={}\n cosine_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n cosine_distance_ent=spatial.distance.cosine(candidate_synvec, entity_sketch)\n cosine_distance_non_ent=spatial.distance.cosine(candidate_synvec, non_entity_sketch)\n candidate_distance_array=[cosine_distance_ent,cosine_distance_non_ent]\n #cosine_distance_array.append(candidate_distance_array)\n cosine_distance_dict[row['candidate']]=min(candidate_distance_array)\n cosine_similarity_dict[row['candidate']]=1-min(candidate_distance_array)\n\n cosine_distance_dict_sorted= OrderedDict(sorted(cosine_distance_dict.items(), key=lambda x: x[1]))\n cosine_similarity_dict_sorted= OrderedDict(sorted(cosine_similarity_dict.items(), key=lambda x: x[1], reverse=True))\n # cosine_distance_dict_sorted_final= { key:value for key, value in cosine_distance_dict_sorted.items() if value < reintroduction_threshold }\n return cosine_similarity_dict_sorted\n\n #single ambiguous sketch; maximal cosine distance\n def get_cosine_distance_1(self, ambiguous_candidate_records,ambiguous_entity_sketch,reintroduction_threshold):\n cosine_distance_dict={}\n cosine_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n cosine_distance_amb=spatial.distance.cosine(candidate_synvec, ambiguous_entity_sketch)\n candidate_distance_array=cosine_distance_amb #not an array; just single value\n cosine_distance_dict[row['candidate']]=candidate_distance_array\n cosine_similarity_dict[row['candidate']]=1-candidate_distance_array\n\n cosine_distance_dict_sorted= OrderedDict(sorted(cosine_distance_dict.items(), key=lambda x: x[1], reverse=True))\n cosine_similarity_dict_sorted= OrderedDict(sorted(cosine_similarity_dict.items(), key=lambda x: x[1]))\n # cosine_distance_dict_sorted_final= { key:value for key, value in cosine_distance_dict_sorted.items() if value > reintroduction_threshold }\n return cosine_distance_dict_sorted\n\n def get_combined_score(self, ambiguous_candidate_records,entity_sketch,non_entity_sketch,ambiguous_entity_sketch,reintroduction_threshold):\n combined_score_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n cosine_distance_ent=spatial.distance.cosine(candidate_synvec, entity_sketch)\n cosine_distance_non_ent=spatial.distance.cosine(candidate_synvec, non_entity_sketch)\n candidate_distance_array=[cosine_distance_ent,cosine_distance_non_ent]\n cosine_distance_amb=spatial.distance.cosine(candidate_synvec, ambiguous_entity_sketch)\n #cosine_distance_array.append(candidate_distance_array)\n combined_score_dict[row['candidate']]=min(candidate_distance_array)/cosine_distance_amb\n\n combined_score_dict_sorted= OrderedDict(sorted(combined_score_dict.items(), key=lambda x: x[1]))\n combined_score_sorted_final= { key:value for key, value in combined_score_dict_sorted.items() if value < reintroduction_threshold }\n return combined_score_sorted_final\n\n\n #MULTIPLE SKETCH CLUSTERING--- COSINE\n\n #multiple entity/non-entity sketches; minimal cosine distance, maximal similarity\n def get_cosine_distance_multi_sketch(self, ambiguous_candidate_records,entity_sketches,non_entity_sketches,reintroduction_threshold):\n cosine_distance_dict={}\n cosine_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n\n cosine_distance_ent= min(list(map(lambda elem: spatial.distance.cosine(candidate_synvec, elem), entity_sketches)))\n cosine_distance_non_ent= min(list(map(lambda elem: spatial.distance.cosine(candidate_synvec, elem), non_entity_sketches)))\n candidate_distance_array=[cosine_distance_ent,cosine_distance_non_ent]\n #cosine_distance_array.append(candidate_distance_array)\n cosine_distance_dict[row['candidate']]=min(candidate_distance_array)\n cosine_similarity_dict[row['candidate']]=1-min(candidate_distance_array)\n\n cosine_distance_dict_sorted= OrderedDict(sorted(cosine_distance_dict.items(), key=lambda x: x[1]))\n cosine_similarity_dict_sorted= OrderedDict(sorted(cosine_similarity_dict.items(), key=lambda x: x[1], reverse=True))\n # cosine_distance_dict_sorted_final= { key:value for key, value in cosine_distance_dict_sorted.items() if value < reintroduction_threshold }\n return cosine_similarity_dict_sorted\n\n #multiple ambiguous sketches; maximal cosine distance, minimal similarity\n def get_cosine_distance_multi_sketch_wAmb(self, ambiguous_candidate_records,ambiguous_entity_sketches,reintroduction_threshold):\n cosine_distance_dict={}\n cosine_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n\n cosine_distance_amb= max(list(map(lambda elem: spatial.distance.cosine(candidate_synvec, elem), ambiguous_entity_sketches)))\n \n #cosine_distance_array.append(candidate_distance_array)\n cosine_distance_dict[row['candidate']]=cosine_distance_amb\n cosine_similarity_dict[row['candidate']]=1-cosine_distance_amb\n\n cosine_distance_dict_sorted= OrderedDict(sorted(cosine_distance_dict.items(), key=lambda x: x[1], reverse=True))\n cosine_similarity_dict_sorted= OrderedDict(sorted(cosine_similarity_dict.items(), key=lambda x: x[1]))\n # cosine_distance_dict_sorted_final= { key:value for key, value in cosine_distance_dict_sorted.items() if value < reintroduction_threshold }\n return cosine_similarity_dict_sorted\n\n\n\n #MULTIPLE SKETCH CLUSTERING--- EUCLIDEAN\n\n #multiple entity/non-entity sketches; minimal euclidean distance\n def get_euclidean_distance_multi_sketch(self, ambiguous_candidate_records,entity_sketches,non_entity_sketches,reintroduction_threshold):\n euclidean_distance_dict={}\n euclidean_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n\n euclidean_distance_ent= min(list(map(lambda elem: spatial.distance.euclidean(candidate_synvec, elem), entity_sketches)))\n euclidean_distance_non_ent= min(list(map(lambda elem: spatial.distance.euclidean(candidate_synvec, elem), non_entity_sketches)))\n candidate_distance_array=[euclidean_distance_ent,euclidean_distance_non_ent]\n #euclidean_distance_array.append(candidate_distance_array)\n euclidean_distance_dict[row['candidate']]=min(candidate_distance_array)\n euclidean_similarity_dict[row['candidate']]=1-min(candidate_distance_array)\n\n euclidean_distance_dict_sorted= OrderedDict(sorted(euclidean_distance_dict.items(), key=lambda x: x[1]))\n euclidean_similarity_dict_sorted= OrderedDict(sorted(euclidean_similarity_dict.items(), key=lambda x: x[1], reverse=True))\n # euclidean_distance_dict_sorted_final= { key:value for key, value in euclidean_distance_dict_sorted.items() if value < reintroduction_threshold }\n return euclidean_similarity_dict_sorted\n\n\n #multiple ambiguous sketches; maximal euclidean distance, minimal similarity\n def get_euclidean_distance_multi_sketch_wAmb(self, ambiguous_candidate_records,ambiguous_entity_sketches,reintroduction_threshold):\n euclidean_distance_dict={}\n euclidean_similarity_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n\n euclidean_distance_amb= max(list(map(lambda elem: spatial.distance.euclidean(candidate_synvec, elem), ambiguous_entity_sketches)))\n \n #euclidean_distance_array.append(candidate_distance_array)\n euclidean_distance_dict[row['candidate']]=euclidean_distance_amb\n euclidean_similarity_dict[row['candidate']]=1-euclidean_distance_amb\n\n euclidean_distance_dict_sorted= OrderedDict(sorted(euclidean_distance_dict.items(), key=lambda x: x[1], reverse=True))\n euclidean_similarity_dict_sorted= OrderedDict(sorted(euclidean_similarity_dict.items(), key=lambda x: x[1]))\n # euclidean_distance_dict_sorted_final= { key:value for key, value in euclidean_distance_dict_sorted.items() if value < reintroduction_threshold }\n return euclidean_similarity_dict_sorted\n\n\n\n #SINGLE SKETCH CLUSTERING--- EUCLIDEAN \n #single entity/non-entity sketch; maximal euclidean distance\n def get_euclidean_distance(self, ambiguous_candidate_records,entity_sketch,non_entity_sketch,reintroduction_threshold):\n euclidean_distance_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n euclidean_distance_ent=spatial.distance.euclidean(candidate_synvec, entity_sketch)\n euclidean_distance_non_ent=spatial.distance.euclidean(candidate_synvec, non_entity_sketch)\n candidate_distance_array=[euclidean_distance_ent,euclidean_distance_non_ent]\n #cosine_distance_array.append(candidate_distance_array)\n euclidean_distance_dict[row['candidate']]=candidate_distance_array\n #euclidean_distance_array_sorted= OrderedDict(sorted(cosine_distance_dict.items(), key=lambda x: x[1]))\n return euclidean_distance_dict\n\n #single ambiguous sketch; maximal euclidean distance\n def get_euclidean_distance_1(self, ambiguous_candidate_records,ambiguous_entity_sketch,reintroduction_threshold):\n euclidean_distance_dict={}\n for index, row in ambiguous_candidate_records.iterrows():\n candidate_synvec=[(row['cap']/row['cumulative']),\n (row['substring-cap']/row['cumulative']),\n (row['s-o-sCap']/row['cumulative']),\n (row['all-cap']/row['cumulative']),\n (row['non-cap']/row['cumulative']),\n (row['non-discriminative']/row['cumulative'])]\n euclidean_distance_amb=spatial.distance.euclidean(candidate_synvec, ambiguous_entity_sketch)\n candidate_distance_array=euclidean_distance_amb\n euclidean_distance_dict[row['candidate']]=candidate_distance_array\n euclidean_distance_dict_sorted= OrderedDict(sorted(euclidean_distance_dict.items(), key=lambda x: x[1], reverse=True))\n return euclidean_distance_dict_sorted\n\n def get_reintroduced_tweets(self,candidates_to_reintroduce,reintroduction_threshold):\n #no reintroduction\n #no preferential selection\n print(\"incomplete tweets in batch: \",len(self.incomplete_tweets))\n # print(list(self.incomplete_tweets.columns.values))\n\n reintroduced_tweets=self.incomplete_tweets[(self.counter-self.incomplete_tweets['entry_batch'])<=reintroduction_threshold]\n self.not_reintroduced=self.incomplete_tweets[~self.incomplete_tweets.index.isin(reintroduced_tweets.index)]\n\n print(\"reintroduced tweets: \",len(reintroduced_tweets))\n # for i in range(self.counter):\n # print('i:',len(self.incomplete_tweets[self.incomplete_tweets['entry_batch']==i]))\n return reintroduced_tweets\n\n # #no preferential selection\n \n # # for i in range(self.counter):\n # # print('i:',len(self.incomplete_tweets[self.incomplete_tweets['entry_batch']==i]))\n # # return self.incomplete_tweets\n \n # # get union of tweet-set of selected candidates \n # #print(self.incomplete_tweets[any(x in list(cosine_distance_dict.keys()) for x in self.incomplete_tweets['ambiguous_candidates'])])\n # reintroduced_tweets=self.incomplete_tweets[self.incomplete_tweets.apply(lambda row:any(x in candidates_to_reintroduce for x in row['ambiguous_candidates']) ,axis=1)]\n\n # # reintroduced_tweets_reintroduction_eviction=self.incomplete_tweets[self.incomplete_tweets.apply(lambda row:any(x in candidates_to_reintroduce1 for x in row['ambiguous_candidates']) ,axis=1)]\n # #not_reintroduced=self.incomplete_tweets[self.incomplete_tweets.apply(lambda row:all(x not in list(cosine_distance_dict.keys()) for x in row['ambiguous_candidates']) ,axis=1)]\n # self.not_reintroduced=self.incomplete_tweets[self.incomplete_tweets.apply(lambda row:all(x not in candidates_to_reintroduce1 for x in row['ambiguous_candidates']) ,axis=1)]\n # # # print(len(self.incomplete_tweets))\n # reintroduced_length=len(reintroduced_tweets)\n # not_reintroduced_length=len(self.not_reintroduced)\n\n # print(\"=> reintroduced tweets: \",reintroduced_length ,\" not-reintroduced tweets: \", not_reintroduced_length, \"total: \", (reintroduced_length+not_reintroduced_length))\n # #print((len(not_reintroduced)==len(self.not_reintroduced)),(len(reintroduced_tweets)+len(self.not_reintroduced)==len(self.incomplete_tweets)))\n # return reintroduced_tweets\n\n #NOTE: with simple eviction\n def frequencies_w_decay(self,ambiguous_candidates_in_batch_w_Count,candidate_featureBase_DF):\n dict_to_return={}\n for candidate in ambiguous_candidates_in_batch_w_Count.keys():\n frequency_w_decay=-99\n old_value=0\n if(candidate in self.ambiguous_candidates_reintroduction_dict):\n old_value=self.ambiguous_candidates_reintroduction_dict[candidate][1]\n first_reported_reintroduction= self.ambiguous_candidates_reintroduction_dict[candidate][0]\n frequency_w_decay= self.ambiguous_candidates_reintroduction_dict[candidate][1]+ (self.decay_factor**(self.counter-first_reported_reintroduction))*(ambiguous_candidates_in_batch_w_Count[candidate])\n # frequency_w_decay= (self.decay_factor**(self.counter-first_reported_reintroduction))*(ambiguous_candidates_in_batch_w_Count[candidate])\n else:\n frequency_w_decay=int(candidate_featureBase_DF[candidate_featureBase_DF['candidate']==candidate].cumulative)\n # frequency_w_decay=ambiguous_candidates_in_batch_w_Count[candidate]\n first_reported_reintroduction=self.counter\n # print(candidate,first_reported_reintroduction,ambiguous_candidates_in_batch_w_Count[candidate],old_value,frequency_w_decay)\n self.ambiguous_candidates_reintroduction_dict[candidate]=(first_reported_reintroduction, frequency_w_decay)\n dict_to_return[candidate]=frequency_w_decay\n return dict_to_return\n\n\n #NOTE: distances mean similarities here!!\n def get_ranking_score(self,ambiguous_candidates_in_batch_freq_w_decay,cosine_distance_dict,cosine_distance_dict_multi_sketch,euclidean_distance_dict_multi_sketch):\n \n \n combined_sketching_similarity_dict={}\n combined_sketching_w_decay={}\n\n # print(\"checking for same lengths: \",len(ambiguous_candidates_in_batch_freq_w_decay),len(list(cosine_distance_dict.keys())),len(list(cosine_distance_dict_multi_sketch.keys())),len(list(euclidean_distance_dict_multi_sketch.keys())))\n for candidate in ambiguous_candidates_in_batch_freq_w_decay.keys():\n relative_rank_1= (list(cosine_distance_dict.keys())).index(candidate)\n relative_rank_2= (list(cosine_distance_dict_multi_sketch.keys())).index(candidate)\n relative_rank_3= (list(euclidean_distance_dict_multi_sketch.keys())).index(candidate)\n\n #just based on sketching, combining ranks not similarities:\n combined_sketching_similarity_dict[candidate]=min(relative_rank_1,relative_rank_2,relative_rank_3)\n\n # #combining sketching based rank induced similarity with freq_w_decay:\n # rank_induced_similarity=1-(min(relative_rank_1,relative_rank_2,relative_rank_3)/len(ambiguous_candidates_in_batch_freq_w_decay))\n # combined_sketching_w_decay[candidate]= ambiguous_candidates_in_batch_freq_w_decay[candidate]*rank_induced_similarity\n\n # combined_sketching_w_decay_sorted= OrderedDict(sorted(combined_sketching_w_decay.items(), key=lambda x: x[1], reverse=True))\n\n return combined_sketching_similarity_dict #returning the combined sketching variant ranks now\n\n def set_cb(self,TweetBase,CTrie,phase2stopwordList,z_score_threshold,reintroduction_threshold):\n\n #input new_tweets, z_score, Updated candidatebase of phase1\n #output candidate_featureBase_DF, Incomplete_tweets\n data_frame_holder=pd.DataFrame([], columns=['index','entry_batch','tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized'])\n phase2_candidates_holder=[]\n phase2_unnormalized_candidates_holder=[]\n df_holder=[]\n\n candidate_featureBase_DF,df_holder_extracted,phase2_candidates_holder_extracted,phase2_unnormalized_candidates_holder_extracted= self.extract(TweetBase,CTrie,phase2stopwordList,0)\n phase2_candidates_holder.extend(phase2_candidates_holder_extracted)\n phase2_unnormalized_candidates_holder.extend(phase2_unnormalized_candidates_holder_extracted)\n df_holder.extend(df_holder_extracted)\n\n\n ambiguous_candidates_in_batch_w_Count=dict((x,self.ambiguous_candidates_in_batch.count(x)) for x in set(self.ambiguous_candidates_in_batch))\n\n self.ambiguous_candidates_in_batch=list(set(self.ambiguous_candidates_in_batch))\n # print(len(self.ambiguous_candidates_in_batch))\n cosine_distance_dict_wAmb={}\n if((self.counter>0)&(len(self.incomplete_tweets)>0)):\n \n ambiguous_candidate_inBatch_records=candidate_featureBase_DF[candidate_featureBase_DF['candidate'].isin(self.ambiguous_candidates_in_batch)]\n \n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!not used\n ambiguous_candidates_in_batch_freq_w_decay=self.frequencies_w_decay(ambiguous_candidates_in_batch_w_Count,candidate_featureBase_DF)\n \n\n #with single sketch for entity/non-entity class-- cosine\n cosine_distance_dict=self.get_cosine_distance(ambiguous_candidate_inBatch_records,self.entity_sketch,self.non_entity_sketch,reintroduction_threshold)\n candidates_to_reintroduce=list(cosine_distance_dict.keys())\n\n #with multiple sketches for entity/non-entity class-- cosine\n cosine_distance_dict_multi_sketch=self.get_cosine_distance_multi_sketch(ambiguous_candidate_inBatch_records,self.entity_sketches,self.non_entity_sketches,reintroduction_threshold)\n candidates_to_reintroduce_multi_sketch=list(cosine_distance_dict_multi_sketch.keys())\n\n #with multiple sketches for entity/non-entity class-- euclidean\n euclidean_distance_dict_multi_sketch=self.get_euclidean_distance_multi_sketch(ambiguous_candidate_inBatch_records,self.entity_sketches_euclidean,self.non_entity_sketches_euclidean,reintroduction_threshold)\n candidates_to_reintroduce_multi_sketch_euclidean=list(euclidean_distance_dict_multi_sketch.keys())\n\n #with alternative ranking\n ranking_score_dict= self.get_ranking_score(ambiguous_candidates_in_batch_freq_w_decay, cosine_distance_dict,cosine_distance_dict_multi_sketch,euclidean_distance_dict_multi_sketch)\n # ranking_score_dict_eviction= self.get_ranking_score_for_eviction(ambiguous_candidate_records_before_classification.candidate.tolist(),cosine_distance_dict_eviction,cosine_distance_dict_multi_sketch_eviction,euclidean_distance_dict_multi_sketch_eviction)\n ##----comment out next line and use the dict directly when combining just based on ranks!!!!----\n # candidates_to_reintroduce_w_ranking=list(ranking_score_dict.keys())\n\n #with multiple sketches for ambiguous class-- cosine\n cosine_distance_dict_wAmb=self.get_cosine_distance_1(ambiguous_candidate_inBatch_records,self.ambiguous_entity_sketch,reintroduction_threshold)\n candidates_to_reintroduce_wAmb=list(cosine_distance_dict_wAmb.keys())\n\n #with multiple sketches for ambiguous class-- euclidean\n cosine_distance_dict_multi_sketch_wAmb=self.get_cosine_distance_multi_sketch_wAmb(ambiguous_candidate_inBatch_records,self.ambiguous_entity_sketches,reintroduction_threshold)\n candidates_to_reintroduce_multi_sketch_wAmb=list(cosine_distance_dict_multi_sketch_wAmb.keys())\n\n\n #with multiple sketches for ambiguous class-- euclidean\n euclidean_distance_dict_multi_sketch_wAmb=self.get_euclidean_distance_multi_sketch_wAmb(ambiguous_candidate_inBatch_records,self.ambiguous_entity_sketches_euclidean,reintroduction_threshold)\n candidates_to_reintroduce_multi_sketch_euclidean_wAmb=list(euclidean_distance_dict_multi_sketch_wAmb.keys())\n\n #with alternative ranking\n ranking_score_dict_wAmb=self.get_ranking_score(ambiguous_candidates_in_batch_freq_w_decay, cosine_distance_dict_wAmb,cosine_distance_dict_multi_sketch_wAmb,euclidean_distance_dict_multi_sketch_wAmb)\n\n\n rank_dict_reintroduction_candidates={candidate: min(ranking_score_dict[candidate],ranking_score_dict_wAmb[candidate]) for candidate in self.ambiguous_candidates_in_batch}\n rank_dict_ordered_reintroduction_candidates=OrderedDict(sorted(rank_dict_reintroduction_candidates.items(), key=lambda x: x[1]))\n rank_dict_ordered_list_reintroduction_candidates=list(rank_dict_ordered_reintroduction_candidates.keys())\n real_cutoff= int(60/100*(len(self.ambiguous_candidates_in_batch)))\n rank_dict_ordered_list_reintroduction_candidates_cutoff=rank_dict_ordered_list_reintroduction_candidates[0:real_cutoff]\n\n\n #tweet candidates for Reintroduction\n reintroduced_tweets=self.get_reintroduced_tweets(rank_dict_ordered_list_reintroduction_candidates_cutoff,reintroduction_threshold)\n candidate_featureBase_DF,df_holder_extracted,phase2_candidates_holder_extracted,phase2_unnormalized_candidates_holder_extracted = self.extract(reintroduced_tweets,CTrie,phase2stopwordList,1)\n phase2_candidates_holder.extend(phase2_candidates_holder_extracted)\n phase2_unnormalized_candidates_holder.extend(phase2_unnormalized_candidates_holder_extracted)\n df_holder.extend(df_holder_extracted)\n\n #print(len(df_holder))\n data_frame_holder = pd.DataFrame(df_holder)\n #print(len(self.incomplete_tweets),len(data_frame_holder),len(candidate_featureBase_DF))\n \n print(\"ambiguous_candidates_in_batch: \",len(self.ambiguous_candidates_in_batch))\n\n #set ['probabilities'] for candidate_featureBase_DF\n candidate_featureBase_DF,self.infrequent_candidates= self.classify_candidate_base(z_score_threshold,candidate_featureBase_DF)\n # set readable labels (a,g,b) for candidate_featureBase_DF based on ['probabilities.']\n candidate_featureBase_DF=self.set_readable_labels(candidate_featureBase_DF)\n\n good_to_amb_df=candidate_featureBase_DF[(candidate_featureBase_DF['candidate'].isin(self.good_candidates)&(candidate_featureBase_DF[\"status\"]==\"a\"))]\n good_to_amb=good_to_amb_df.candidate.tolist()\n # print('good to ambiguous: ',good_to_amb)\n # print(good_to_amb_df[['candidate','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative','status']])\n\n self.good_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"g\"].candidate.tolist()\n self.ambiguous_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"a\"].candidate.tolist()\n self.bad_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"b\"].candidate.tolist()\n\n\n entity_candidate_records=candidate_featureBase_DF[candidate_featureBase_DF['candidate'].isin(self.good_candidates)]\n non_entity_candidate_records=candidate_featureBase_DF[candidate_featureBase_DF['candidate'].isin(self.bad_candidates)]\n ambiguous_candidate_records=candidate_featureBase_DF[candidate_featureBase_DF['candidate'].isin(self.ambiguous_candidates)]\n\n #comment it out\n if(self.counter==12):\n ambiguous_candidate_records.to_csv(\"ambiguous_records.csv\",columns=['candidate','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative','status'], sep=',', mode='a', index=False)\n # print(self.ambiguous_candidates[['candidate','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative','status']])\n \n\n self.entity_sketch= self.get_aggregate_sketch(entity_candidate_records)\n self.non_entity_sketch=self.get_aggregate_sketch(non_entity_candidate_records)\n self.ambiguous_entity_sketch=self.get_aggregate_sketch(ambiguous_candidate_records)\n\n #multiple sketches per category--cosine\n self.entity_sketches= self.get_multiple_aggregate_sketches(\"For entities: \",\"cosine\",entity_candidate_records)\n self.non_entity_sketches= self.get_multiple_aggregate_sketches(\"For non-entities: \",\"cosine\",non_entity_candidate_records)\n self.ambiguous_entity_sketches=self.get_multiple_aggregate_sketches(\"For ambiguous: \",\"cosine\",ambiguous_candidate_records)\n \n #multiple sketches per category--euclidean\n self.entity_sketches_euclidean= self.get_multiple_aggregate_sketches(\"For entities: \",\"euclidean\",entity_candidate_records)\n self.non_entity_sketches_euclidean= self.get_multiple_aggregate_sketches(\"For non-entities: \",\"euclidean\",non_entity_candidate_records)\n self.ambiguous_entity_sketches_euclidean=self.get_multiple_aggregate_sketches(\"For ambiguous: \",\"euclidean\",ambiguous_candidate_records)\n\n # self.ambiguous_candidate_distanceDict_prev=self.get_cosine_distance(ambiguous_candidate_records,self.entity_sketch,self.non_entity_sketch)\n #candidate_featureBase_DF.to_csv(\"cb_with_prob_label.csv\", sep=',', encoding='utf-8')\n correction_flag=self.set_partition_dict(candidate_featureBase_DF,self.infrequent_candidates)\n # candidate_featureBase_DF.to_csv(\"cf_new.csv\", sep=',', encoding='utf-8')\n if(self.counter>0):\n ambiguous_turned_good=list(filter(lambda element: element in self.good_candidates, self.ambiguous_candidates_in_batch))\n ambiguous_turned_bad=list(filter(lambda element: element in self.bad_candidates, self.ambiguous_candidates_in_batch))\n ambiguous_remaining_ambiguous=list(filter(lambda element: element in self.ambiguous_candidates, self.ambiguous_candidates_in_batch))\n print('ambiguous to good: ',len(ambiguous_turned_good))\n # print(ambiguous_turned_bad)\n # print(ambiguous_remaining_ambiguous)\n\n converted_candidates= ambiguous_turned_good + ambiguous_turned_bad\n else:\n ambiguous_turned_good=[]\n ambiguous_turned_bad=[]\n ambiguous_remaining_ambiguous=[]\n converted_candidates=[]\n\n # for cand in (ambiguous_turned_good):\n # row=candidate_featureBase_DF[candidate_featureBase_DF.candidate==cand]\n # candidate_synvec=[(row['normalized_cap'].values.tolist()),(row['normalized_capnormalized_substring-cap'].values.tolist()),(row['normalized_s-o-sCap'].values.tolist()),(row['normalized_all-cap'].values.tolist()),(row['normalized_non-cap'].values.tolist()),(row['normalized_non-discriminative'].values.tolist())]\n # print('=>',cand,cosine_distance_dict_wAmb[cand],cosine_distance_dict[cand],euclidean_distance_dict_wAmb[cand],euclidean_distance_dict[cand])\n # print(candidate_synvec)\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"-----------------------------------------------------------------------------------\")\n # for cand in (ambiguous_turned_bad):\n # row=candidate_featureBase_DF[candidate_featureBase_DF.candidate==cand]\n # candidate_synvec=[(row['normalized_cap'].values.tolist()),(row['normalized_capnormalized_substring-cap'].values.tolist()),(row['normalized_s-o-sCap'].values.tolist()),(row['normalized_all-cap'].values.tolist()),(row['normalized_non-cap'].values.tolist()),(row['normalized_non-discriminative'].values.tolist())]\n # print('=>',cand,cosine_distance_dict_wAmb[cand],cosine_distance_dict[cand],euclidean_distance_dict_wAmb[cand],euclidean_distance_dict[cand])\n # print(candidate_synvec)\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"=========================================================================================\")\n # for cand in ambiguous_remaining_ambiguous:\n # row=candidate_featureBase_DF[candidate_featureBase_DF.candidate==cand]\n # candidate_synvec=[(row['normalized_cap'].values.tolist()),(row['normalized_capnormalized_substring-cap'].values.tolist()),(row['normalized_s-o-sCap'].values.tolist()),(row['normalized_all-cap'].values.tolist()),(row['normalized_non-cap'].values.tolist()),(row['normalized_non-discriminative'].values.tolist())]\n # print('=>',cand,cosine_distance_dict_wAmb[cand],cosine_distance_dict[cand],euclidean_distance_dict_wAmb[cand],euclidean_distance_dict[cand])\n # print(candidate_synvec)\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # #print(self.good_candidates, self.ambiguous_candidates_in_batch)\n\n\n #['probability'],['a,g,b']\n return candidate_featureBase_DF,data_frame_holder,phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag,(ambiguous_turned_good+ambiguous_turned_bad+self.ambiguous_candidates), converted_candidates\n\n\n #flush out completed tweets\n # input candidate base, looped over tweets (incomplete tweets+ new tweets)\n # output: incomplete tweets (a tags in it.), incomplete_tweets[\"Complete\"]\n def set_tf(self,data_frame_holder,\n candidate_featureBase_DF,\n phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag):\n return self.set_completeness_in_tweet_frame(data_frame_holder,\n candidate_featureBase_DF,\n phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag)\n\n def get_incomplete_tf(self,untrashed_tweets):\n return untrashed_tweets[untrashed_tweets.completeness==False]\n\n def get_complete_tf(self,untrashed_tweets):\n return untrashed_tweets[untrashed_tweets.completeness==True]\n\n def compute_seen_tweets_so_far(self,start_batch,end_batch):\n if(start_batch==end_batch):\n sliced_seen_tweets=self.number_of_seen_tweets_per_batch[start_batch]\n\n\n sliced_seen_tweets=self.number_of_seen_tweets_per_batch[start_batch:]\n\n\n counter=0\n for elem in sliced_seen_tweets:\n counter=counter+elem\n\n return counter\n\n #@profile\n def rreplace(self,s, old, new, occurrence):\n if s.endswith(old):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n else:\n return s\n #ME_EXTR=Mention.Mention_Extraction()\n\n\n # experiment function\n def set_x_axis(self,just_converted_tweets_for_current_batch):\n\n self.incomplete_tweets.to_csv(\"set_x_axis_debug.csv\", sep=',', encoding='utf-8')\n\n self.incomplete_tweets['number_of_seen_tweets'] = self.incomplete_tweets['entry_batch'].apply(lambda x: self.compute_seen_tweets_so_far(x,self.counter))\n\n\n self.incomplete_tweets[\"entry_vs_tweet_seen_ratio\"]=self.incomplete_tweets['entry_batch']/self.incomplete_tweets['number_of_seen_tweets']\n\n\n #counter_list= \n self.incomplete_tweets[\"ratio_entry_vs_current\"]=self.incomplete_tweets['entry_batch']/self.counter\n\n\n self.incomplete_tweets[\"current_minus_entry\"]=self.counter-self.incomplete_tweets['entry_batch']\n\n just_converted_tweets_for_current_batch[\"current_minus_entry\"]=self.counter-just_converted_tweets_for_current_batch['entry_batch']\n\n return just_converted_tweets_for_current_batch\n\n\n\n def set_column_for_candidates_in_incomplete_tweets(self,candidate_featureBase_DF,input_to_eval):\n\n incomplete_candidates= input_to_eval['2nd Iteration Candidates'].tolist()\n\n # incomplete_candidates_unnormalized= input_to_eval['2nd Iteration Candidates Unnormalized'].tolist()\n\n\n\n candidate_featureBase_DF= candidate_featureBase_DF.set_index('candidate')\n\n candidate_with_label_holder=[]\n one_level=[]\n \n\n for sentence_level_candidates in incomplete_candidates:\n\n one_level.clear()\n\n for candidate in sentence_level_candidates:\n if candidate.lower() in candidate_featureBase_DF.index:\n label=candidate_featureBase_DF.get_value(candidate.lower(),'status')\n one_level.append((candidate,label))\n else:\n one_level.append((candidate,\"na\"))\n\n candidate_with_label_holder.append(copy.deepcopy(one_level))\n\n\n input_to_eval[\"candidates_with_label\"]=candidate_with_label_holder\n debug_candidates_label_list= input_to_eval['candidates_with_label'].tolist()\n candidates_filtered_g_labeled=[]\n row_level_candidates=[]\n index_outer=0\n\n candidates_filtered_a_labeled=[]\n row_level_a_candidates=[]\n\n for sentence_level in debug_candidates_label_list:\n\n # sentence_level_candidates_unnormalized= incomplete_candidates_unnormalized[index_outer]\n row_level_candidates.clear()\n row_level_a_candidates.clear()\n for candidate in sentence_level:\n if(candidate[1]==\"g\"):\n row_level_candidates.append(candidate[0])\n if(((candidate[1]==\"b\")|(candidate[1]==\"a\"))&(candidate[0]==\"US\")):\n # print('here')\n row_level_candidates.append(candidate[0])\n if(candidate[1]==\"a\"):\n row_level_a_candidates.append(candidate[0])\n\n candidates_filtered_g_labeled.append(copy.deepcopy(row_level_candidates))\n candidates_filtered_a_labeled.append(copy.deepcopy(row_level_a_candidates))\n index_outer+=1\n\n\n input_to_eval[\"only_good_candidates\"]=candidates_filtered_g_labeled\n input_to_eval[\"ambiguous_candidates\"]=candidates_filtered_a_labeled\n\n\n\n def calculate_tp_fp_f1(self,z_score_threshold,input_to_eval):\n\n column_candidates_holder = input_to_eval['only_good_candidates'].tolist()\n\n column_annot_holder= input_to_eval['annotation'].tolist()\n\n\n ## for annotation.\n cum_holder_annot=[]\n for rows_annot in column_annot_holder:\n cum_holder_annot.extend(rows_annot)\n\n\n cum_holder_annot_set = set(cum_holder_annot)\n cum_holder_annot = list(cum_holder_annot_set)\n\n\n ## for candidates.\n cum_holder_candidates=[]\n for rows_candidates in column_candidates_holder:\n cum_holder_candidates.extend(rows_candidates)\n\n\n cum_holder_candidates_set = set(cum_holder_candidates)\n cum_holder_candidates = list(cum_holder_candidates_set)\n\n\n\n good_candidates = cum_holder_candidates\n\n annotations= cum_holder_annot\n\n\n true_positive_count=0\n false_positive_count=0\n false_negative_count=0\n ambigious_not_in_annotation=0\n\n true_positive_holder = []\n false_negative_holder=[]\n false_positive_holder=[]\n total_mention_holder=[]\n ambigious_not_in_annotation_holder=[]\n f_measure_holder=[]\n\n\n total_mentions=0\n\n total_mentions+=len(annotations)\n #print(idx,val,true_positives_candidates[idx])\n false_negative_line= [val2 for val2 in annotations if val2 not in good_candidates]\n #print(idx,false_negative_line)\n true_positive_line=[val2 for val2 in annotations if val2 in good_candidates]\n\n false_positive_line=[val2 for val2 in good_candidates if val2 not in annotations]\n #print(idx,false_positive_line)\n\n \n # print(idx,true_positive_line,'ground truth: ',annotations[idx],'our system: ',good_candidates[idx])\n \n #print(idx+1,'True positive:',true_positive_line)\n true_positive_count+=len(true_positive_line)\n #print(idx+1,'False positive:',false_positive_line)\n false_positive_count+=len(false_positive_line)\n #print(idx+1,'False negative:',false_negative_line)\n false_negative_count+=len(false_negative_line)\n #print(' ')\n\n true_positive_holder=[ true_positive_count for i in range(len(input_to_eval['only_good_candidates'].tolist()))]\n\n false_negative_holder=[ false_negative_count for i in range(len(input_to_eval['only_good_candidates'].tolist()))]\n false_positive_holder=[ false_positive_count for i in range(len(input_to_eval['only_good_candidates'].tolist()))]\n # ambigious_not_in_annotation_holder.append(len(ambigious_not_in_annotation_line))\n total_mention_holder=[ total_mentions for i in range(len(input_to_eval['only_good_candidates'].tolist()))]\n\n\n\n\n true_positive_count_IPQ=true_positive_count\n false_positive_count_IPQ = false_positive_count\n false_negative_count_IPQ= false_negative_count\n total_mention_count_IPQ=total_mentions\n\n\n tp_count=0\n tm_count=0\n fp_count=0\n fn_count=0\n\n for idx,tup in enumerate(self.accuracy_tuples_prev_batch):\n # print(idx,tup)\n tp_count+=tup[0]\n tm_count+=tup[1]\n fp_count+=tup[2]\n fn_count+=tup[3]\n\n\n\n tp_count+=true_positive_count_IPQ\n tm_count+=total_mention_count_IPQ\n fp_count+=false_positive_count_IPQ\n fn_count+=false_negative_count_IPQ\n\n precision=(tp_count)/(tp_count+fp_count)\n recall=(tp_count)/(tp_count+fn_count)\n f_measure=2*(precision*recall)/(precision+recall)\n\n\n\n self.accuracy_vals=(f_measure,z_score_threshold,precision,recall)\n\n # print('z_score:', z_score_threshold , 'precision: ',precision,'recall: ',recall,'f measure: ',f_measure)\n # print('trupe positive: ',tp_count, 'false positive: ',fp_count,'false negative: ', fn_count,'total mentions: ', tm_count)\n\n\n input_to_eval[\"tp\"]=true_positive_holder\n input_to_eval[\"fn\"]=false_negative_holder\n input_to_eval['fp']= false_positive_holder\n input_to_eval[\"total_mention\"]=total_mention_holder\n\n # input_to_eval[\"ambigious_not_in_annot\"]=ambigious_not_in_annotation_holder\n # input_to_eval[\"inverted_loss\"]=input_to_eval[\"tp\"]/( input_to_eval[\"fn\"]+input_to_eval[\"ambigious_not_in_annot\"])\n\n return input_to_eval\n\n\n def recall_correction(self,candidate_featureBase_DF,phase2_candidates_holder,phase2_unnormalized_candidates_holder,data_frame_holder):\n\n corrected_phase2_candidates_holder=[]\n index_outer=0\n for candidates in phase2_candidates_holder:\n unnormalized_candidates=phase2_unnormalized_candidates_holder[index_outer]\n corrected_phase2_candidates=[]\n for idx, candidate in enumerate(candidates):\n unnormalized_candidate=unnormalized_candidates[idx]\n # if((candidate in self.partition_dict.keys())&((candidate in self.infrequent_candidates)|(candidate in self.bad_candidates))):\n if((candidate in self.partition_dict.keys())&((candidate in self.infrequent_candidates)|(candidate in self.bad_candidates)|(candidate in self.ambiguous_candidates))): #do this only for 3K tweets\n #print(candidate, self.partition_dict[candidate])\n corrected_phase2_candidates.extend(self.partition_dict[candidate])\n else:\n if(((candidate in self.bad_candidates)|(candidate in self.ambiguous_candidates))&(candidate=='us')&(unnormalized_candidate=='US')):\n # print(index_outer)\n candidate=unnormalized_candidate\n corrected_phase2_candidates.append(candidate)\n corrected_phase2_candidates_holder.append(copy.deepcopy(corrected_phase2_candidates))\n index_outer+=1\n\n \n #print(corrected_phase2_candidates_holder)\n data_frame_holder['2nd Iteration Candidates']=corrected_phase2_candidates_holder\n\n return corrected_phase2_candidates_holder,data_frame_holder \n\n\n\n #@profile\n def set_completeness_in_tweet_frame(self,data_frame_holder,candidate_featureBase_DF,phase2_candidates_holder,phase2_unnormalized_candidates_holder,correction_flag):\n #print(candidate_featureBase_DF.head())\n\n # print(\"trump:\", candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"trump\"][['status','cumulative']])\n # print(\"fbi:\", candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"fbi\"][['status','cumulative']])\n # print(\"fbi obtained fisa warrant:\", candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"fbi obtained fisa warrant\"][['status','cumulative']])\n # print(\"trump aide:\", candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"trump aide\"][['status','cumulative']])\n # print(\"agent of the russian government:\", candidate_featureBase_DF[candidate_featureBase_DF.candidate==\"agent of the russian government\"][['status','cumulative']])\n\n good_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"g\"].candidate.tolist()\n bad_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"b\"].candidate.tolist()\n\n merged_g_b= bad_candidates+good_candidates\n\n #candidate_featureBase_DF.to_csv(\"cf_before_labeling_comp.csv\", sep=',', encoding='utf-8')\n ambiguous_candidates=candidate_featureBase_DF[candidate_featureBase_DF.status==\"a\"].candidate.tolist()\n\n if(correction_flag):\n phase2_candidates_holder,data_frame_holder=self.recall_correction(candidate_featureBase_DF,phase2_candidates_holder,phase2_unnormalized_candidates_holder,data_frame_holder)\n\n \n\n \n truth_vals=[False if any(x not in merged_g_b for x in list1) else True for list1 in phase2_candidates_holder]\n\n output_mentions=[list(filter(lambda candidate: ((candidate in good_candidates))|(candidate=='US'), list1)) for list1 in phase2_candidates_holder]\n\n # truth_vals=[False if any(x in ambiguous_candidates for x in list1) else True for list1 in phase2_candidates_holder]\n\n # for list1 in phase2_candidates_holder:\n # if any(x in ambiguous_candidates for x in list1):\n # truth_vals.append(False)\n # else:\n # truth_vals.append(True)\n \n\n\n #print(truth_vals)\n completeness_series = pd.Series( (v for v in truth_vals) )\n output_mentions_series = pd.Series( (v for v in output_mentions) )\n\n\n data_frame_holder['output_mentions']=output_mentions_series\n data_frame_holder['completeness']=completeness_series\n data_frame_holder[\"current_minus_entry\"]=self.counter-data_frame_holder['entry_batch']\n\n # print('0: ',data_frame_holder[(data_frame_holder.tweetID=='0')]['output_mentions'])\n # print('1006: ',data_frame_holder[(data_frame_holder.tweetID=='1006')]['output_mentions'])\n # print('14154: ',data_frame_holder[(data_frame_holder.tweetID=='14154')]['output_mentions'])\n # print('31877: ',data_frame_holder[(data_frame_holder.tweetID=='31877')]['output_mentions'])\n # print('35028: ',data_frame_holder[(data_frame_holder.tweetID=='35028')]['output_mentions'])\n # print('38894: ',data_frame_holder[(data_frame_holder.tweetID=='38894')]['output_mentions'])\n\n \n # data_frame_holder.to_csv(\"phase2output_with_completeness.csv\", sep=',', encoding='utf-8')\n\n return data_frame_holder\n\n\n\n #@profile\n def set_readable_labels(self,candidate_featureBase_DF):\n\n #candidate_featureBase_DF['status'] = candidate_featureBase_DF['probability'].apply(lambda x: set(x).issubset(good_candidates))\n candidate_featureBase_DF['status']='ne'\n candidate_featureBase_DF['status'][candidate_featureBase_DF['probability']>=0.8]='g'\n candidate_featureBase_DF['status'][(candidate_featureBase_DF['probability'] > 0.4) & (candidate_featureBase_DF['probability'] < 0.8)] = 'a'\n candidate_featureBase_DF['status'][candidate_featureBase_DF['probability']<=0.4]='b'\n\n return candidate_featureBase_DF\n\n\n #@profile\n def normalize(self,word):\n strip_op=word\n strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()\n strip_op=(strip_op.lstrip('“‘’”')).rstrip('“‘’”')\n strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()\n #strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,\"'s\",\"\",1),\"’s\",\"\",1),\"’s\",\"\",1)\n if strip_op.endswith(\"'s\"):\n li = strip_op.rsplit(\"'s\", 1)\n return ''.join(li)\n elif strip_op.endswith(\"’s\"):\n li = strip_op.rsplit(\"’s\", 1)\n return ''.join(li)\n else:\n return strip_op\n #return strip_op\n\n #@profile \n def isSubstring(self,to_increase_element,id_to_incr,comparison_holder,phase1_holder_holder_copy):\n combined_list=comparison_holder[id_to_incr]+phase1_holder_holder_copy[id_to_incr]\n\n for idx,val in enumerate(comparison_holder[id_to_incr]):\n if((to_increase_element[0] in val[0]) and to_increase_element[0] != val[0]):\n if((to_increase_element[5] in val[5]) and to_increase_element[5] != val[5]):\n return True\n for idx,val in enumerate(phase1_holder_holder_copy[id_to_incr]):\n if((to_increase_element[0] in val[0]) and to_increase_element[0] != val[0]):\n if((to_increase_element[5] in val[2]) and to_increase_element[5] != val[2]):\n return True \n \n return False\n\n #@profile\n def calculate_pmi(self,big,x1,x2,total):\n big__= float(big/total)\n x1__=float(x1/total)\n x2__=float(x2/total)\n pmi= math.log(big__/(x1__*x2__),2.71828182845)\n pklv=big__*pmi\n #return (1/(1+math.exp(-1*pmi)))\n npmi= pmi/(-1.0*(math.log(big__,2.71828182845)))\n return npmi,pklv\n #return pklv\n\n def multiSlice(self,s,cutpoints,good_candidates):\n k = len(cutpoints)\n multislices=[]\n if k == 0:\n curr_candidate=self.normalize(' '.join(s))\n\n if(curr_candidate in good_candidates):\n multislices = [curr_candidate] \n else:\n \n curr_candidate=self.normalize(' '.join(s[:cutpoints[0]]))\n alt_list=[curr_candidate]\n \n if(curr_candidate in good_candidates):\n multislices = [curr_candidate]\n\n alt_list.extend(self.normalize(' '.join(s[cutpoints[i]:cutpoints[i+1]])) for i in range(k-1))\n multislices.extend(self.normalize(' '.join(s[cutpoints[i]:cutpoints[i+1]])) for i in range(k-1) if self.normalize(' '.join(s[cutpoints[i]:cutpoints[i+1]])) in good_candidates)\n\n curr_candidate=self.normalize(' '.join(s[cutpoints[k-1]:]))\n alt_list.append(curr_candidate)\n \n if(curr_candidate in good_candidates):\n multislices.append(curr_candidate)\n # print('::',alt_list)\n return multislices\n\n\n\n def get_substring_candidates(self,candidate_words,good_candidates):\n n = len(candidate_words)\n all_partitions=[]\n all_partitions_length=[]\n cuts = list(range(1,n))\n for k in range(n):\n # all_partitions_inner=[]\n partition_list=[]\n partition_length_list=[]\n for cutpoints in itertools.combinations(cuts,k):\n ret_list=self.multiSlice(candidate_words,cutpoints,good_candidates)\n if(ret_list):\n partition_length=sum([len(elem.split()) for elem in ret_list])\n # print('==',ret_list,partition_length)\n if(partition_length==len(candidate_words)):\n return ret_list\n partition_list.append(ret_list)\n partition_length_list.append(partition_length)\n # yield ret_list\n # print('------')\n if(partition_length_list):\n max_index=partition_length_list.index(max(partition_length_list))\n all_partitions.append(partition_list[max_index])\n all_partitions_length.append(partition_length_list[max_index])\n # print(all_partitions)\n if(all_partitions_length):\n max_index=all_partitions_length.index(max(all_partitions_length))\n # print(all_partitions[max_index])\n return all_partitions[max_index]\n else:\n return []\n \n #@profile\n def verify(self, subsequence, CTrie):\n return CTrie.__contains__(subsequence)\n\n\n #@profile\n def check_sequence(self, sequence, l, CTrie):\n result=[]\n subsequence_length=l\n while(subsequence_length>0):\n shift=len(sequence)-subsequence_length\n verified_subsequence=[]\n verified=False\n for i in range(0,shift+1):\n list1=sequence[i:(i+subsequence_length)]\n text=' '.join(str(e[0]) for e in list1)\n subsequence=(self.normalize(text)).split()\n #print(\"search for\", subsequence)\n if self.verify(subsequence, CTrie):\n verified_subsequence.append(i)\n verified_subsequence.append(i+subsequence_length)\n #print(subsequence)\n #print(subsequence,[(verified_subsequence[0]-0),(int(sequence[-1][1])-verified_subsequence[1])])\n verified=True\n break\n if(verified):\n result.append(sequence[verified_subsequence[0]:verified_subsequence[1]])\n if(verified_subsequence[0]-0)>0:\n subequence_to_check=sequence[0:verified_subsequence[0]]\n #since tokens before the starting position of the verified subsequence have already been checked for subsequences of this length\n partition_length=min(len(subequence_to_check),(subsequence_length-1))\n #print(subequence_to_check)\n lst=self.check_sequence(subequence_to_check,partition_length, CTrie)\n if(lst):\n result.extend(lst)\n if(int(sequence[-1][1])-verified_subsequence[1])>0:\n subequence_to_check=sequence[(verified_subsequence[1]):]\n #since tokens following the end position of the verified subsequence have not been checked for subsequences of this length\n partition_length=min(len(subequence_to_check),(subsequence_length))\n #print(subequence_to_check)\n lst=self.check_sequence(subequence_to_check,partition_length, CTrie)\n if(lst):\n result.extend(lst)\n return result\n else:\n subsequence_length-=1\n return result\n\n def flatten(self,mylist, outlist,ignore_types=(str, bytes, int, ne.NE_candidate)):\n \n if mylist !=[]:\n for item in mylist:\n #print not isinstance(item, ne.NE_candidate)\n if isinstance(item, list) and not isinstance(item, ignore_types):\n self.flatten(item, outlist)\n else:\n if isinstance(item,ne.NE_candidate):\n item.phraseText=item.phraseText.strip(' \\t\\n\\r')\n item.reset_length()\n else:\n if type(item)!= int:\n item=item.strip(' \\t\\n\\r')\n outlist.append(item)\n return outlist\n\n\n def getWords(self, sentence):\n tempList=[]\n tempWordList=sentence.split()\n p_dots= re.compile(r'[.]{2,}')\n #print(tempWordList)\n for word in tempWordList:\n temp=[]\n \n if \"(\" in word:\n temp=list(filter(lambda elem: elem!='',word.split(\"(\")))\n if(temp):\n temp=list(map(lambda elem: '('+elem, temp))\n elif \")\" in word:\n temp=list(filter(lambda elem: elem!='',word.split(\")\")))\n if(temp):\n temp=list(map(lambda elem: elem+')', temp))\n # temp.append(temp1[-1])\n elif ((\"-\" in word)&(not word.endswith(\"-\"))):\n temp1=list(filter(lambda elem: elem!='',word.split(\"-\")))\n if(temp1):\n temp=list(map(lambda elem: elem+'-', temp1[:-1]))\n temp.append(temp1[-1])\n elif ((\"?\" in word)&(not word.endswith(\"?\"))):\n temp1=list(filter(lambda elem: elem!='',word.split(\"?\")))\n if(temp1):\n temp=list(map(lambda elem: elem+'?', temp1[:-1]))\n temp.append(temp1[-1])\n elif ((\":\" in word)&(not word.endswith(\":\"))):\n temp1=list(filter(lambda elem: elem!='',word.split(\":\")))\n if(temp1):\n temp=list(map(lambda elem: elem+':', temp1[:-1]))\n temp.append(temp1[-1])\n elif ((\",\" in word)&(not word.endswith(\",\"))):\n #temp=list(filter(lambda elem: elem!='',word.split(\",\")))\n temp1=list(filter(lambda elem: elem!='',word.split(\",\")))\n if(temp1):\n temp=list(map(lambda elem: elem+',', temp1[:-1]))\n temp.append(temp1[-1])\n elif ((\"/\" in word)&(not word.endswith(\"/\"))):\n temp1=list(filter(lambda elem: elem!='',word.split(\"/\")))\n if(temp1):\n temp=list(map(lambda elem: elem+'/', temp1[:-1]))\n temp.append(temp1[-1])\n #print(index, temp)\n # elif \"...\" in word:\n # #print(\"here\")\n # temp=list(filter(lambda elem: elem!='',word.split(\"...\")))\n # if(temp):\n # if(word.endswith(\"...\")):\n # temp=list(map(lambda elem: elem+'...', temp))\n # else:\n # temp=list(map(lambda elem: elem+'...', temp[:-1]))+[temp[-1]]\n # # temp.append(temp1[-1])\n # elif \"..\" in word:\n # temp=list(filter(lambda elem: elem!='',word.split(\"..\")))\n # if(temp):\n # if(word.endswith(\"..\")):\n # temp=list(map(lambda elem: elem+'..', temp))\n # else:\n # temp=list(map(lambda elem: elem+'..', temp[:-1]))+[temp[-1]]\n # #temp.append(temp1[-1])\n elif (list(p_dots.finditer(word))):\n matched_spans= list(p_dots.finditer(word)) \n temp=[]\n next_string_start=0\n for matched_span in matched_spans:\n matched_start=matched_span.span()[0]\n this_excerpt=word[next_string_start:matched_start]\n if(this_excerpt):\n temp.append(this_excerpt)\n next_string_start=matched_span.span()[1]\n if(next_string_start<len(word)):\n last_excerpt=word[next_string_start:]\n if(last_excerpt):\n temp.append(last_excerpt)\n elif \"…\" in word:\n temp=list(filter(lambda elem: elem!='',word.split(\"…\")))\n if(temp):\n if(word.endswith(\"…\")):\n temp=list(map(lambda elem: elem+'…', temp))\n else:\n temp=list(map(lambda elem: elem+'…', temp[:-1]))+[temp[-1]]\n else:\n #if word not in string.punctuation:\n temp=[word]\n if(temp):\n tempList.append(temp)\n tweetWordList=self.flatten(tempList,[])\n return tweetWordList\n\n\n #@profile\n # def get_Candidates(self, sequence, CTrie,flag):\n # #print(sequence)\n # #print(sequence)\n # candidateList=[]\n # left=0\n # start_node=CTrie\n # last_cand=\"NAN\"\n # last_cand_substr=\"\"\n # reset=False\n # for right in range(len(sequence)):\n # if(reset):\n # start_node=CTrie\n # last_cand_substr=\"\"\n # left=right\n # curr_text=sequence[right][0]\n # curr_pos=[sequence[right][1]]\n # curr=self.normalize(sequence[right][0])\n # cand_str=self.normalize(last_cand_substr+\" \"+curr)\n # last_cand_sequence=sequence[left:(right+1)]\n # last_cand_text=' '.join(str(e[0]) for e in last_cand_sequence)\n # last_cand_text_norm=self.normalize(' '.join(str(e[0]) for e in last_cand_sequence))\n # #print(\"==>\",cand_str,last_cand_text)\n # if ((curr in start_node.path.keys())&(cand_str==last_cand_text_norm)):\n # #if flag:\n # #print(\"=>\",cand_str,last_cand_text)\n # reset=False\n # if (start_node.path[curr].value_valid):\n # #print(last_cand_text)\n # # if flag:\n # # print(last_cand_text)\n # last_cand_pos=[e[1] for e in last_cand_sequence]\n # last_cand=last_cand_text\n # last_cand_batch=start_node.path[curr].feature_list[-1]\n # start_node=start_node.path[curr]\n # last_cand_substr=cand_str\n # else:\n # #print(\"=>\",cand_str,last_cand_text)\n # if(last_cand!=\"NAN\"):\n # candidateList.append((last_cand,last_cand_pos,last_cand_batch))\n # last_cand=\"NAN\"\n # if(start_node!=CTrie):\n # start_node=CTrie\n # last_cand_substr=\"\"\n # if curr in start_node.path.keys():\n # #print(\"here\",curr)\n # reset=False\n # if start_node.path[curr].value_valid:\n # last_cand_text=curr_text\n # last_cand_pos=curr_pos\n # last_cand=last_cand_text\n # last_cand_batch=start_node.path[curr].feature_list[-1]\n # left=right\n # start_node=start_node.path[curr]\n # last_cand_substr=curr\n # else:\n # reset=True\n # else:\n # reset=True\n # else:\n # candidateList.extend(self.get_Candidates(sequence[(left+1):(right+1)], CTrie, flag))\n # reset=True\n # #print(last_cand)\n # if(last_cand!=\"NAN\"):\n # candidateList.append((last_cand,last_cand_pos,last_cand_batch))\n # return candidateList\n\n def get_Candidates(self, sequence, CTrie,flag):\n #flag: debug_flag\n candidateList=[]\n left=0\n start_node=CTrie\n last_cand=\"NAN\"\n last_cand_substr=\"\"\n reset=False\n right=0\n while (right < len(sequence)):\n # if(flag):\n # print(right)\n if(reset):\n start_node=CTrie\n last_cand_substr=\"\"\n left=right\n curr_text=sequence[right][0]\n curr_pos=[sequence[right][1]]\n #normalized curr_text\n curr=self.normalize(sequence[right][0])\n cand_str=self.normalize(last_cand_substr+\" \"+curr)\n cand_str_wPunct=(last_cand_substr+\" \"+curr_text).lower()\n last_cand_sequence=sequence[left:(right+1)]\n last_cand_text=' '.join(str(e[0]) for e in last_cand_sequence)\n last_cand_text_norm=self.normalize(' '.join(str(e[0]) for e in last_cand_sequence))\n if(flag):\n print(\"==>\",cand_str,last_cand_text_norm)\n if((cand_str==last_cand_text_norm)&((curr in start_node.path.keys())|(curr_text.lower() in start_node.path.keys()))):\n #if (((curr in start_node.path.keys())&(cand_str==last_cand_text_norm))|(curr_text.lower() in start_node.path.keys())):\n if flag:\n print(\"=>\",cand_str,last_cand_text)\n reset=False\n if (curr_text.lower() in start_node.path.keys()):\n if (start_node.path[curr_text.lower()].value_valid):\n last_cand_pos=[e[1] for e in last_cand_sequence]\n last_cand_batch=start_node.path[curr_text.lower()].feature_list[-1]\n last_cand=last_cand_text\n elif(curr in start_node.path.keys()):\n if ((start_node.path[curr].value_valid)):\n last_cand_pos=[e[1] for e in last_cand_sequence]\n last_cand=last_cand_text\n last_cand_batch=start_node.path[curr].feature_list[-1]\n else:\n if((right==(len(sequence)-1))&(last_cand==\"NAN\")&(left<right)):\n #print(\"hehe\",cand_str)\n right=left\n reset=True\n else:\n if((right==(len(sequence)-1))&(last_cand==\"NAN\")&(left<right)):\n #print(\"hehe\",cand_str)\n right=left\n reset=True\n elif ((start_node.path[curr].value_valid)&(cand_str==last_cand_text_norm)):\n # if flag:\n # print(\"==\",last_cand_text)\n last_cand_pos=[e[1] for e in last_cand_sequence]\n last_cand=last_cand_text\n last_cand_batch=start_node.path[curr].feature_list[-1]\n else:\n if((right==(len(sequence)-1))&(last_cand==\"NAN\")&(left<right)):\n #print(\"hehe\",cand_str)\n right=left\n reset=True\n if((curr_text.lower() in start_node.path.keys())&(cand_str==last_cand_text_norm)):\n start_node=start_node.path[curr_text.lower()]\n last_cand_substr=cand_str_wPunct\n else:\n start_node=start_node.path[curr]\n last_cand_substr=cand_str\n else:\n #print(\"=>\",cand_str,last_cand_text)\n if(last_cand!=\"NAN\"):\n candidateList.append((last_cand,last_cand_pos,last_cand_batch))\n last_cand=\"NAN\"\n if(start_node!=CTrie):\n start_node=CTrie\n last_cand_substr=\"\"\n if curr in start_node.path.keys():\n # if(flag):\n # print(\"here\",curr)\n reset=False\n if start_node.path[curr].value_valid:\n last_cand_text=curr_text\n last_cand_pos=curr_pos\n last_cand=last_cand_text\n last_cand_batch=start_node.path[curr].feature_list[-1]\n left=right\n start_node=start_node.path[curr]\n last_cand_substr=curr\n else:\n reset=True\n else:\n reset=True\n else:\n if(left<right):\n # if(flag):\n # print(sequence[(left+1):(right+1)])\n #candidateList.extend(self.get_Candidates(sequence[(left+1):(right+1)], CTrie, flag))\n right=left\n # if(flag):\n # print(\"++\",right)\n reset=True\n right+=1\n # if(flag):\n # print(last_cand)\n if(last_cand!=\"NAN\"):\n candidateList.append((last_cand,last_cand_pos,last_cand_batch))\n return candidateList\n\n\n # candidateList=[]\n # left=0\n # start_node=CTrie\n # last_cand=\"NAN\"\n # last_cand_substr=\"\"\n # reset=False\n # for right in range(len(sequence)):\n # if(reset):\n # left=right\n # curr_text=sequence[right][0]\n # curr_pos=[sequence[right][1]]\n # curr=self.normalize(sequence[right][0])\n # cand_str=self.normalize(last_cand_substr+\" \"+curr)\n # last_cand_sequence=sequence[left:(right+1)]\n # last_cand_text=' '.join(str(e[0]) for e in last_cand_sequence)\n # last_cand_text_norm=self.normalize(' '.join(str(e[0]) for e in last_cand_sequence))\n # #print(\"==>\",cand_str,last_cand_text)\n # if ((curr in start_node.path.keys())&(cand_str==last_cand_text_norm)):\n # #if flag:\n # #print(\"=>\",cand_str,last_cand_text)\n # reset=False\n # if (start_node.path[curr].value_valid):\n # #print(last_cand_text)\n # # if flag:\n # # print(last_cand_text)\n # last_cand_pos=[e[1] for e in last_cand_sequence]\n # last_cand=last_cand_text\n # start_node=start_node.path[curr]\n # last_cand_substr=cand_str\n # else:\n # #print(\"=>\",cand_str,last_cand_text)\n # if(last_cand!=\"NAN\"):\n # candidateList.append((last_cand,last_cand_pos))\n # last_cand=\"NAN\"\n # if(start_node!=CTrie):\n # start_node=CTrie\n # last_cand_substr=\"\"\n # if curr in start_node.path.keys():\n # #print(\"here\",curr)\n # reset=False\n # if start_node.path[curr].value_valid:\n # last_cand_text=curr_text\n # last_cand_pos=curr_pos\n # last_cand=curr\n # left=right\n # start_node=start_node.path[curr]\n # last_cand_substr=curr\n # else:\n # reset=True\n # else:\n # reset=True\n # else:\n # candidateList.extend(self.get_Candidates(sequence[(left+1):(right+1)], CTrie, flag))\n # reset=True\n # #print(last_cand)\n # if(last_cand!=\"NAN\"):\n # candidateList.append((last_cand,last_cand_pos))\n # return candidateList\n\n #@profile\n def append_rows(self,df_holder):\n \n df = pd.DataFrame(df_holder)\n #self.data_frame_holder=self.data_frame_holder.append(df,ignore_index=True)\n #self.data_frame_holder=self.data_frame_holder.reset_index(drop=True)\n return df\n\n\n\n #@profile\n def join_token_tuples(self,list_of_tuples):\n #print(string.punctuation)\n combined_str=(' '.join(tuple[0] for tuple in list_of_tuples)).lstrip(string.punctuation).rstrip(string.punctuation).strip()\n combined_pos='*'.join(str(tuple[1]) for tuple in list_of_tuples)\n combined_tuple=(combined_str,combined_pos,list_of_tuples[0][2],list_of_tuples[0][3],list_of_tuples[0][4],list_of_tuples[0][5],list_of_tuples[0][6])\n return combined_tuple\n\n\n\n #@profile\n def all_capitalized(self,candidate):\n strip_op=candidate\n strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip())\n strip_op=(strip_op.lstrip('“‘’”')).rstrip('“‘’”')\n strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,\"'s\",\"\",1),\"’s\",\"\",1),\"’s\",\"\",1)\n prep_article_list=prep_list+article_list+self.phase2stopwordList+conjoiner\n word_list=strip_op.split()\n for i in range(len(word_list)):\n word=word_list[i]\n if((word[0].isupper())|(word[0].isdigit())):\n continue\n else:\n if(word in prep_article_list):\n if (i!=0):\n continue\n else:\n return False\n else:\n return False\n return True\n\n\n\n #@profile\n def check_feature_update(self, candidate_tuple,non_discriminative_flag):\n #print(candidate_tuple)\n if(non_discriminative_flag):\n return 7\n candidateText=candidate_tuple[0]\n position=candidate_tuple[1]\n word_list=candidateText.split()\n if candidateText.islower():\n return 6\n elif candidateText.isupper():\n return 5\n elif (len(word_list)==1):\n #start-of-sentence-check\n if self.all_capitalized(candidateText):\n if(int(position[0])==0):\n return 4\n else:\n return 2\n else:\n return 3\n else:\n if(self.all_capitalized(candidateText)):\n return 2\n else:\n return 3\n\n #@profile\n def update_Candidatedict(self,candidate_tuple,non_discriminative_flag):\n candidateText=candidate_tuple[0]\n\n #print(candidate_tuple)\n normalized_candidate=self.normalize(candidateText)\n feature_list=[]\n if(normalized_candidate in self.CandidateBase_dict.keys()):\n feature_list=self.CandidateBase_dict[normalized_candidate]\n else:\n feature_list=[0]*9\n feature_list[0]=self.counter\n feature_list[1]=len(normalized_candidate.split())\n feature_to_update=self.check_feature_update(candidate_tuple,non_discriminative_flag)\n # if(normalized_candidate==\"mayor of new york\"):\n # print(candidateText,feature_to_update)\n feature_list[feature_to_update]+=1\n feature_list[8]+=1\n self.CandidateBase_dict[normalized_candidate]=feature_list\n\n\n\n\n #@profile\n def extract(self,tweetBaseInput,CTrie,phase2stopwordList,new_or_old):\n\n\n if(self.counter==0):\n #output_queue\n self.data_frame_holder_OQ=pd.DataFrame([], columns=['index', 'entry_batch', 'tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized','annotation','stanford_candidates'])\n self.incomplete_tweets=pd.DataFrame([], columns=['index','entry_batch', 'tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized','annotation','stanford_candidates'])\n self.not_reintroduced=pd.DataFrame([], columns=['index','entry_batch', 'tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized','annotation','stanford_candidates'])\n self.CandidateBase_dict= {}\n self.ambiguous_candidate_distanceDict_prev={}\n self.partition_dict={}\n self.good_candidates=[]\n self.bad_candidates=[]\n self.ambiguous_candidates=[]\n self.infrequent_candidates=[]\n self.entity_sketch=[0.0,0.0,0.0,0.0,0.0,0.0]\n self.non_entity_sketch=[0.0,0.0,0.0,0.0,0.0,0.0]\n self.ambiguous_entity_sketch=[0.0,0.0,0.0,0.0,0.0,0.0]\n self.aggregator_incomplete_tweets=pd.DataFrame([], columns=['index', 'entry_batch', 'tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized','annotation','stanford_candidates'])\n self.just_converted_tweets=pd.DataFrame([], columns=['index', 'entry_batch', 'tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized','annotation','stanford_candidates'])\n #self.data_frame_holder=pd.DataFrame([], columns=['index','entry_batch','tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates'])\n self.raw_tweets_for_others=pd.DataFrame([], columns=['index','entry_batch','tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized'])\n\n self.accuracy_tuples_prev_batch=[]\n self.accuracy_vals=[]\n \n #frequency_w_decay related information\n self.ambiguous_candidates_reintroduction_dict={}\n\n #### other systems\n self.accuracy_vals_stanford=[]\n self.accuracy_vals_opencalai=[]\n self.accuracy_vals_ritter=[]\n\n self.number_of_seen_tweets_per_batch=[]\n self.phase2stopwordList=phase2stopwordList\n self.number_of_seen_tweets_per_batch.append(len(tweetBaseInput))\n\n\n #data_frame_holder=pd.DataFrame([], columns=['index','entry_batch','tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence','phase1Candidates', '2nd Iteration Candidates', '2nd Iteration Candidates Unnormalized'])\n phase1_holder_holder=[]\n phase2_candidates_holder=[]\n phase2_unnormalized_candidates_holder=[]\n df_holder=[]\n if(new_or_old==0):\n self.ambiguous_candidates_in_batch=[]\n \n #candidateBase_holder=[]\n\n #this has to be changed to an append function since IPQ already has incomplete tweets from prev batch \n #print(len(tweetBaseInput))\n #immediate_processingQueue = pd.concat([self.incomplete_tweets,TweetBase ])\n #immediate_processingQueue.to_csv(\"impq.csv\", sep=',', encoding='utf-8')\n \n\n\n #print('In Phase 2',len(immediate_processingQueue))\n #immediate_processingQueue=immediate_processingQueue.reset_index(drop=True)\n combined_list_here=([]+list(cachedStopWords)+chat_word_list+day_list+month_list+article_list+prep_list)\n combined_list_filtered=list(filter(lambda word: word not in (prep_list+article_list+month_list+phase2stopwordList), combined_list_here))\n #--------------------------------------PHASE II---------------------------------------------------\n for index, row in tweetBaseInput.iterrows():\n\n #phase 1 candidates for one sentence\n phase1_holder=[]\n\n tweetText=str(row['TweetSentence'])\n #print(tweetText)\n sentID=str(row['sentID'])\n tweetID=str(row['tweetID'])\n phase1Candidates=str(row['phase1Candidates'])\n hashtags=str(row['hashtags'])\n user=str(row['user'])\n batch=int(row['entry_batch'])\n #time=str(row['start_time'])\n\n\n\n annotation=list(row['annotation'])\n\n stanford=list(row['stanford_candidates'])\n non_discriminative_flag=False\n\n # if((tweetID==\"524\")):\n # print(tweetID,phase1Candidates)\n\n\n if (phase1Candidates !='nan'):\n phase1Raw=phase1Candidates.split(\"||\")\n phase1Raw = list(filter(None, phase1Raw))\n\n\n for entities_with_loc in phase1Raw:\n entity_to_store=entities_with_loc.split(\"::\")[0]\n #print(entity_to_store)\n position=entities_with_loc.split(\"::\")[1]\n #print(position)\n phase1_holder.append((entity_to_store,position))\n\n phase1_holder_holder.append(copy.deepcopy(phase1_holder))\n phase1_holder.clear()\n\n else:\n non_discriminative_flag=True\n phase1_holder_holder.append([])\n\n\n #print(sen_index1)[ ()/,;:!?…-]\n #splitList=tweetText.split()\n '''splitList=re.split('[ ()/,;:!?…-]',tweetText)\n #print(tweetText,splitList)\n #wordlstU=list(filter(lambda word: ((word!=\"\")&(word.strip(string.punctuation).strip().lower() not in cachedStopWords)), splitList))\n splitList=list(map(lambda word: word.strip(), splitList))\n tweetWordList=list(filter(lambda word: word!=\"\", splitList))'''\n #print(tweetWordList)\n tweetWordList=self.getWords(tweetText)\n tweetWordList= [(token,idx) for idx,token in enumerate(tweetWordList)]\n #print(tweetWordList)\n\n\n #combined_list_here=([]+list(cachedStopWords)+prep_list+chat_word_list+article_list+day_list+month_list)\n \n tweetWordList_stopWords=list(filter (lambda word: ((((word[0].strip()).strip(string.punctuation)).lower() in combined_list_filtered)|(word[0].strip() in string.punctuation)|(word[0].startswith('@'))|(word[0].startswith('#'))), tweetWordList))\n\n\n # phase 2 candidate tuples without stopwords for a sentence\n c=[(y[0],str(y[1]),tweetID,sentID,'ne',batch,time) for y in tweetWordList if y not in tweetWordList_stopWords ]\n #c=[(y[0],str(y[1])) for y in tweetWordList if y not in tweetWordList_stopWords ]\n\n \n sequences=[]\n for k, g in groupby(enumerate(c), lambda element: element[0]-int(element[1][1])):\n sequences.append(list(map(itemgetter(1), g)))\n\n ne_candidate_list=[]\n for sequence in sequences:\n # if((tweetID==\"436\")|(tweetID==\"938\")):\n # print(sequence)\n # seq_candidate_list=self.get_Candidates(sequence, CTrie,True)\n # else:\n seq_candidate_list=self.get_Candidates(sequence, CTrie,False)\n if(seq_candidate_list):\n '''seq_candidate_list= list(map(lambda e: self.join_token_tuples(e) ,seq_candidates))'''\n # if((tweetID==\"14\")|(tweetID==\"938\")):\n # print(\"====\",seq_candidate_list)\n\n \n for candidate_tuple in seq_candidate_list:\n #inserts into CandidateBase and updates the correct frequency feature based on Capitalization pattern\n if not ((float(batch)<self.counter)&(candidate_tuple[-1]<self.counter)):\n #print(candidate_tuple[0])\n self.update_Candidatedict(candidate_tuple,non_discriminative_flag)\n\n ne_candidate_list.extend(seq_candidate_list)\n \n \n #phase2_candidates='||'.join(e[0] for e in ne_candidate_list)\n\n phase2_candidates=[self.normalize(e[0]) for e in ne_candidate_list]\n phase2_candidates_unnormalized=[e[0] for e in ne_candidate_list]\n\n # if(tweetID==\"14\"):\n # print((phase2_candidates),(phase2_candidates_unnormalized))\n #print(len(self.ambiguous_candidates))\n if(new_or_old==0):\n #self.ambiguous_candidates_in_batch=[]\n self.ambiguous_candidates_in_batch.extend(list(filter(lambda candidate: candidate in self.ambiguous_candidates, phase2_candidates)))\n #print(len(self.ambiguous_candidates_in_batch))\n # for candidate in phase2_candidates:\n # if candidate in self.ambiguous_candidates:\n # print(candidate)\n phase2_candidates_holder.append(phase2_candidates)\n phase2_unnormalized_candidates_holder.append(phase2_candidates_unnormalized)\n\n #print(phase1Candidates,\"====\",phase2_candidates)\n # if((tweetID==\"63\")|(tweetID==\"130\")|(tweetID==\"277\")|(tweetID==\"335\")|(tweetID==\"13\")):\n\n if((tweetID==\"371\")):\n print(tweetID,phase1Candidates,\"====\",phase2_candidates,non_discriminative_flag)\n dict1 = {'entry_batch':batch, 'tweetID':tweetID, 'sentID':sentID, 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':phase1Candidates,'2nd Iteration Candidates':phase2_candidates,'2nd Iteration Candidates Unnormalized':phase2_candidates_unnormalized, 'annotation':annotation,'stanford_candidates':stanford}\n\n df_holder.append(dict1)\n #-------------------------------------------------------------------END of 1st iteration: RESCAN+CANDIDATE_UPDATION-----------------------------------------------------------\n\n #df_holder is the immediate processing queue of the current batch converted into a dataframe---> data_frame_holder\n #self.append_rows(df_holder)\n #data_frame_holder = pd.DataFrame(df_holder)\n # print(data_frame_holder.head(5))\n\n\n #convert the CandidateFeatureBase from a dictionary to dataframe---> CandidateFeatureBaseDF\n candidateBaseHeaders=['candidate', 'batch', 'length','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative']\n candidate_featureBase_DF=pd.DataFrame.from_dict(self.CandidateBase_dict, orient='index')\n candidate_featureBase_DF.columns=candidateBaseHeaders[1:]\n candidate_featureBase_DF.index.name=candidateBaseHeaders[0]\n candidate_featureBase_DF = candidate_featureBase_DF.reset_index(drop=False)\n\n\n #data_frame_holder.to_csv(\"phase2output.csv\", sep=',', encoding='utf-8')\n return candidate_featureBase_DF,df_holder,phase2_candidates_holder,phase2_unnormalized_candidates_holder\n\n\n # self.aggregator_incomplete_tweets= self.aggregator_incomplete_tweets.append(self.incomplete_tweets)\n # self.just_converted_tweets=self.just_converted_tweets.append(just_converted_tweets_for_current_batch)\n\n\n\n\n\n\n def finish(self):\n return self.accuracy_vals\n\n def finish_other_systems(self):\n print(\"*****************************************STANFORD RESULSTSSS***********************\")\n for i in self.accuracy_vals_stanford:\n print(i)\n\n print(\"*****************************************STANFORD ENDSSSSSSSS***********************\")\n\n return (self.accuracy_vals_stanford,self.accuracy_vals_opencalai,self.accuracy_vals_ritter)\n\n\n##################UNCOMMENT THIS WHEN YOU'RE DONE // STARTS\n\n # candidate_featureBase_DF = candidate_featureBase_DF.set_index('candidate')\n\n # candidate_with_label_holder=[]\n # one_level=[]\n # for sentence_level_candidates in phase2_candidates_holder:\n # one_level.clear()\n # for candidate in sentence_level_candidates:\n # if candidate in candidate_featureBase_DF.index:\n # label=candidate_featureBase_DF.get_value(candidate,'status')\n # one_level.append((candidate,label))\n # else:\n # one_level.append((candidate,\"na\"))\n\n\n # candidate_with_label_holder.append(copy.deepcopy(one_level))\n\n # print(len(data_frame_holder),len(candidate_with_label_holder))\n\n # data_frame_holder[\"candidates_with_label\"]=candidate_with_label_holder\n\n\n############################ UNCOMMENT THIS WHEN YOU DONE FINISH\n\n\n\n # print(data_frame_holder[\"2nd Iteration Candidates\"][data_frame_holder.tweetID=='2'].tolist())\n # list1=data_frame_holder[\"candidates_with_label\"][data_frame_holder.tweetID=='2'].tolist()\n \n # list3=[]\n # #print(list1)\n # for i in list1:\n # for a in i:\n # # print(a)\n # list3.append(a)\n\n\n\n\n\n\n\n\n # tweetID_holder=data_frame_holder.tweetID.astype(int) \n\n # # row_level_candidates=[]\n # # tweet_level_candidates=[]\n # # for i in range(int(tweetID_holder.max())+1):\n # # list1=data_frame_holder[\"candidates_with_label\"][data_frame_holder.tweetID==str(i)].tolist()\n\n # row_level_candidates.clear()\n # for j in list1:\n # for a in j:\n # # print(a)\n # if(a[1]==\"g\"):\n # row_level_candidates.append(a[0])\n\n # tweet_level_candidates.append(copy.deepcopy(row_level_candidates))\n\n\n\n # for i in tweet_level_candidates:\n# print(i)"
] | [
[
"pandas.read_csv",
"sklearn.svm.SVC"
],
[
"scipy.stats.zscore",
"scipy.spatial.distance.euclidean",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"sklearn.cluster.KMeans",
"sklearn.metrics.silhouette_score",
"scipy.spatial.distance.cosine",
"pandas.concat",
"pandas.Series"
]
] |
pizza654321/pandas | [
"abf1af545ef8feac46d8927f1fe10dc21312b840"
] | [
"pandas/tests/io/xml/test_xml.py"
] | [
"from __future__ import annotations\n\nfrom io import (\n BytesIO,\n StringIO,\n)\nfrom lzma import LZMAError\nimport os\nfrom urllib.error import HTTPError\nfrom zipfile import BadZipFile\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat._optional import import_optional_dependency\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.xml import read_xml\n\n\"\"\"\nCHECK LIST\n\n[x] - ValueError: \"Values for parser can only be lxml or etree.\"\n\netree\n[X] - ImportError: \"lxml not found, please install or use the etree parser.\"\n[X] - TypeError: \"expected str, bytes or os.PathLike object, not NoneType\"\n[X] - ValueError: \"Either element or attributes can be parsed not both.\"\n[X] - ValueError: \"xpath does not return any nodes...\"\n[X] - SyntaxError: \"You have used an incorrect or unsupported XPath\"\n[X] - ValueError: \"names does not match length of child elements in xpath.\"\n[X] - TypeError: \"...is not a valid type for names\"\n[X] - ValueError: \"To use stylesheet, you need lxml installed...\"\n[] - URLError: (GENERAL ERROR WITH HTTPError AS SUBCLASS)\n[X] - HTTPError: \"HTTP Error 404: Not Found\"\n[] - OSError: (GENERAL ERROR WITH FileNotFoundError AS SUBCLASS)\n[X] - FileNotFoundError: \"No such file or directory\"\n[] - ParseError (FAILSAFE CATCH ALL FOR VERY COMPLEX XML)\n[X] - UnicodeDecodeError: \"'utf-8' codec can't decode byte 0xe9...\"\n[X] - UnicodeError: \"UTF-16 stream does not start with BOM\"\n[X] - BadZipFile: \"File is not a zip file\"\n[X] - OSError: \"Invalid data stream\"\n[X] - LZMAError: \"Input format not supported by decoder\"\n[X] - ValueError: \"Unrecognized compression type\"\n[X] - PermissionError: \"Forbidden\"\n\nlxml\n[X] - ValueError: \"Either element or attributes can be parsed not both.\"\n[X] - AttributeError: \"__enter__\"\n[X] - XSLTApplyError: \"Cannot resolve URI\"\n[X] - XSLTParseError: \"document is not a stylesheet\"\n[X] - ValueError: \"xpath does not return any nodes.\"\n[X] - XPathEvalError: \"Invalid expression\"\n[] - XPathSyntaxError: (OLD VERSION IN lxml FOR XPATH ERRORS)\n[X] - TypeError: \"empty namespace prefix is not supported in XPath\"\n[X] - ValueError: \"names does not match length of child elements in xpath.\"\n[X] - TypeError: \"...is not a valid type for names\"\n[X] - LookupError: \"unknown encoding\"\n[] - URLError: (USUALLY DUE TO NETWORKING)\n[X - HTTPError: \"HTTP Error 404: Not Found\"\n[X] - OSError: \"failed to load external entity\"\n[X] - XMLSyntaxError: \"Start tag expected, '<' not found\"\n[] - ParserError: (FAILSAFE CATCH ALL FOR VERY COMPLEX XML\n[X] - ValueError: \"Values for parser can only be lxml or etree.\"\n[X] - UnicodeDecodeError: \"'utf-8' codec can't decode byte 0xe9...\"\n[X] - UnicodeError: \"UTF-16 stream does not start with BOM\"\n[X] - BadZipFile: \"File is not a zip file\"\n[X] - OSError: \"Invalid data stream\"\n[X] - LZMAError: \"Input format not supported by decoder\"\n[X] - ValueError: \"Unrecognized compression type\"\n[X] - PermissionError: \"Forbidden\"\n\"\"\"\n\ngeom_df = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": [360, 360, 180],\n \"sides\": [4, np.nan, 3],\n }\n)\n\nxml_default_nmsp = \"\"\"\\\n<?xml version='1.0' encoding='utf-8'?>\n<data xmlns=\"http://example.com\">\n <row>\n <shape>square</shape>\n <degrees>360</degrees>\n <sides>4</sides>\n </row>\n <row>\n <shape>circle</shape>\n <degrees>360</degrees>\n <sides/>\n </row>\n <row>\n <shape>triangle</shape>\n <degrees>180</degrees>\n <sides>3</sides>\n </row>\n</data>\"\"\"\n\nxml_prefix_nmsp = \"\"\"\\\n<?xml version='1.0' encoding='utf-8'?>\n<doc:data xmlns:doc=\"http://example.com\">\n <doc:row>\n <doc:shape>square</doc:shape>\n <doc:degrees>360</doc:degrees>\n <doc:sides>4.0</doc:sides>\n </doc:row>\n <doc:row>\n <doc:shape>circle</doc:shape>\n <doc:degrees>360</doc:degrees>\n <doc:sides/>\n </doc:row>\n <doc:row>\n <doc:shape>triangle</doc:shape>\n <doc:degrees>180</doc:degrees>\n <doc:sides>3.0</doc:sides>\n </doc:row>\n</doc:data>\"\"\"\n\n\ndf_kml = DataFrame(\n {\n \"id\": {\n 0: \"ID_00001\",\n 1: \"ID_00002\",\n 2: \"ID_00003\",\n 3: \"ID_00004\",\n 4: \"ID_00005\",\n },\n \"name\": {\n 0: \"Blue Line (Forest Park)\",\n 1: \"Red, Purple Line\",\n 2: \"Red, Purple Line\",\n 3: \"Red, Purple Line\",\n 4: \"Red, Purple Line\",\n },\n \"styleUrl\": {\n 0: \"#LineStyle01\",\n 1: \"#LineStyle01\",\n 2: \"#LineStyle01\",\n 3: \"#LineStyle01\",\n 4: \"#LineStyle01\",\n },\n \"extrude\": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},\n \"altitudeMode\": {\n 0: \"clampedToGround\",\n 1: \"clampedToGround\",\n 2: \"clampedToGround\",\n 3: \"clampedToGround\",\n 4: \"clampedToGround\",\n },\n \"coordinates\": {\n 0: (\n \"-87.77678526964958,41.8708863930319,0 \"\n \"-87.77826234150609,41.87097820122218,0 \"\n \"-87.78251583439344,41.87130129991005,0 \"\n \"-87.78418294588424,41.87145055520308,0 \"\n \"-87.7872369165933,41.8717239119163,0 \"\n \"-87.79160214925886,41.87210797280065,0\"\n ),\n 1: (\n \"-87.65758750947528,41.96427269188822,0 \"\n \"-87.65802133507393,41.96581929055245,0 \"\n \"-87.65819033925305,41.96621846093642,0 \"\n \"-87.6583189819129,41.96650362897086,0 \"\n \"-87.65835858701473,41.96669002089185,0 \"\n \"-87.65838428411853,41.96688150295095,0 \"\n \"-87.65842208882658,41.96745896091846,0 \"\n \"-87.65846556843937,41.9683761425439,0 \"\n \"-87.65849296214573,41.96913893870342,0\"\n ),\n 2: (\n \"-87.65492939166126,41.95377494531437,0 \"\n \"-87.65557043199591,41.95376544118533,0 \"\n \"-87.65606302030132,41.95376391658746,0 \"\n \"-87.65623502146268,41.95377379126367,0 \"\n \"-87.65634748981634,41.95380103566435,0 \"\n \"-87.65646537904269,41.95387703994676,0 \"\n \"-87.65656532461145,41.95396622645799,0 \"\n \"-87.65664760856414,41.95404201996044,0 \"\n \"-87.65671750555913,41.95416647054043,0 \"\n \"-87.65673983607117,41.95429949810849,0 \"\n \"-87.65673866475777,41.95441024240925,0 \"\n \"-87.6567690255541,41.95490657227902,0 \"\n \"-87.65683672482363,41.95692259283837,0 \"\n \"-87.6568900886376,41.95861070983142,0 \"\n \"-87.65699865558875,41.96181418669004,0 \"\n \"-87.65756347177603,41.96397045777844,0 \"\n \"-87.65758750947528,41.96427269188822,0\"\n ),\n 3: (\n \"-87.65362593118043,41.94742799535678,0 \"\n \"-87.65363554415794,41.94819886386848,0 \"\n \"-87.6536456393239,41.95059994675451,0 \"\n \"-87.65365831235026,41.95108288489359,0 \"\n \"-87.6536604873874,41.9519954657554,0 \"\n \"-87.65362592053201,41.95245597302328,0 \"\n \"-87.65367158496069,41.95311153649393,0 \"\n \"-87.65368468595476,41.9533202828916,0 \"\n \"-87.65369271253692,41.95343095587119,0 \"\n \"-87.65373335834569,41.95351536301472,0 \"\n \"-87.65378605844126,41.95358212680591,0 \"\n \"-87.65385067928185,41.95364452823767,0 \"\n \"-87.6539390793817,41.95370263886964,0 \"\n \"-87.6540786298351,41.95373403675265,0 \"\n \"-87.65430648647626,41.9537535411832,0 \"\n \"-87.65492939166126,41.95377494531437,0\"\n ),\n 4: (\n \"-87.65345391792157,41.94217681262115,0 \"\n \"-87.65342448305786,41.94237224420864,0 \"\n \"-87.65339745703922,41.94268217746244,0 \"\n \"-87.65337753982941,41.94288140770284,0 \"\n \"-87.65336256753105,41.94317369618263,0 \"\n \"-87.65338799707138,41.94357253961736,0 \"\n \"-87.65340240886648,41.94389158188269,0 \"\n \"-87.65341837392448,41.94406444407721,0 \"\n \"-87.65342275247338,41.94421065714904,0 \"\n \"-87.65347469646018,41.94434829382345,0 \"\n \"-87.65351486483024,41.94447699917548,0 \"\n \"-87.65353483605053,41.9453896864472,0 \"\n \"-87.65361975532807,41.94689193720703,0 \"\n \"-87.65362593118043,41.94742799535678,0\"\n ),\n },\n }\n)\n\n\[email protected](params=[\"rb\", \"r\"])\ndef mode(request):\n return request.param\n\n\[email protected](params=[pytest.param(\"lxml\", marks=td.skip_if_no(\"lxml\")), \"etree\"])\ndef parser(request):\n return request.param\n\n\n# FILE / URL\n\n\[email protected]_if_no(\"lxml\")\ndef test_parser_consistency_file(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n df_file_lxml = read_xml(filename, parser=\"lxml\")\n df_file_etree = read_xml(filename, parser=\"etree\")\n\n tm.assert_frame_equal(df_file_lxml, df_file_etree)\n\n\[email protected]\[email protected]\[email protected]_if_no(\"lxml\")\ndef test_parser_consistency_url():\n url = (\n \"https://data.cityofchicago.org/api/views/\"\n \"8pix-ypme/rows.xml?accessType=DOWNLOAD\"\n )\n df_url_lxml = read_xml(url, xpath=\".//row/row\", parser=\"lxml\")\n df_url_etree = read_xml(url, xpath=\".//row/row\", parser=\"etree\")\n\n tm.assert_frame_equal(df_url_lxml, df_url_etree)\n\n\ndef test_file_like(datapath, parser, mode):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with open(filename, mode) as f:\n df_file = read_xml(f, parser=parser)\n\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_file, df_expected)\n\n\ndef test_file_io(datapath, parser, mode):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with open(filename, mode) as f:\n xml_obj = f.read()\n\n df_io = read_xml(\n (BytesIO(xml_obj) if isinstance(xml_obj, bytes) else StringIO(xml_obj)),\n parser=parser,\n )\n\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_io, df_expected)\n\n\ndef test_file_buffered_reader_string(datapath, parser, mode):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with open(filename, mode) as f:\n xml_obj = f.read()\n\n df_str = read_xml(xml_obj, parser=parser)\n\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_str, df_expected)\n\n\ndef test_file_buffered_reader_no_xml_declaration(datapath, parser, mode):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with open(filename, mode) as f:\n next(f)\n xml_obj = f.read()\n\n df_str = read_xml(xml_obj, parser=parser)\n\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_str, df_expected)\n\n\ndef test_file_handle_close(datapath, parser):\n xml_file = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with open(xml_file, \"rb\") as f:\n read_xml(BytesIO(f.read()), parser=parser)\n\n assert not f.closed\n\n\[email protected]_if_no(\"lxml\")\[email protected](\"val\", [\"\", b\"\"])\ndef test_empty_string_lxml(val):\n from lxml.etree import XMLSyntaxError\n\n with pytest.raises(XMLSyntaxError, match=\"Document is empty\"):\n read_xml(val, parser=\"lxml\")\n\n\[email protected](\"val\", [\"\", b\"\"])\ndef test_empty_string_etree(val):\n from xml.etree.ElementTree import ParseError\n\n with pytest.raises(ParseError, match=\"no element found\"):\n read_xml(val, parser=\"etree\")\n\n\[email protected]_if_no(\"lxml\")\ndef test_wrong_file_path_lxml():\n from lxml.etree import XMLSyntaxError\n\n filename = os.path.join(\"data\", \"html\", \"books.xml\")\n\n with pytest.raises(\n XMLSyntaxError,\n match=(\"Start tag expected, '<' not found\"),\n ):\n read_xml(filename, parser=\"lxml\")\n\n\ndef test_wrong_file_path_etree():\n from xml.etree.ElementTree import ParseError\n\n filename = os.path.join(\"data\", \"html\", \"books.xml\")\n\n with pytest.raises(\n ParseError,\n match=(\"not well-formed\"),\n ):\n read_xml(filename, parser=\"etree\")\n\n\[email protected]\[email protected]_if_no(\"lxml\")\ndef test_url():\n url = \"https://www.w3schools.com/xml/books.xml\"\n df_url = read_xml(url, xpath=\".//book[count(*)=4]\")\n\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n \"cover\": [None, None, \"paperback\"],\n }\n )\n\n tm.assert_frame_equal(df_url, df_expected)\n\n\[email protected]\ndef test_wrong_url(parser):\n with pytest.raises(HTTPError, match=(\"HTTP Error 404: Not Found\")):\n url = \"https://www.w3schools.com/xml/python.xml\"\n read_xml(url, xpath=\".//book[count(*)=4]\", parser=parser)\n\n\n# XPATH\n\n\[email protected]_if_no(\"lxml\")\ndef test_empty_xpath_lxml(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with pytest.raises(ValueError, match=(\"xpath does not return any nodes\")):\n read_xml(filename, xpath=\".//python\", parser=\"lxml\")\n\n\ndef test_bad_xpath_etree(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with pytest.raises(\n SyntaxError, match=(\"You have used an incorrect or unsupported XPath\")\n ):\n read_xml(filename, xpath=\".//[book]\", parser=\"etree\")\n\n\[email protected]_if_no(\"lxml\")\ndef test_bad_xpath_lxml(datapath):\n from lxml.etree import XPathEvalError\n\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with pytest.raises(XPathEvalError, match=(\"Invalid expression\")):\n read_xml(filename, xpath=\".//[book]\", parser=\"lxml\")\n\n\n# NAMESPACE\n\n\ndef test_default_namespace(parser):\n df_nmsp = read_xml(\n xml_default_nmsp,\n xpath=\".//ns:row\",\n namespaces={\"ns\": \"http://example.com\"},\n parser=parser,\n )\n\n df_expected = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": [360, 360, 180],\n \"sides\": [4.0, float(\"nan\"), 3.0],\n }\n )\n\n tm.assert_frame_equal(df_nmsp, df_expected)\n\n\ndef test_prefix_namespace(parser):\n df_nmsp = read_xml(\n xml_prefix_nmsp,\n xpath=\".//doc:row\",\n namespaces={\"doc\": \"http://example.com\"},\n parser=parser,\n )\n\n df_expected = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": [360, 360, 180],\n \"sides\": [4.0, float(\"nan\"), 3.0],\n }\n )\n\n tm.assert_frame_equal(df_nmsp, df_expected)\n\n\[email protected]_if_no(\"lxml\")\ndef test_consistency_default_namespace():\n df_lxml = read_xml(\n xml_default_nmsp,\n xpath=\".//ns:row\",\n namespaces={\"ns\": \"http://example.com\"},\n parser=\"lxml\",\n )\n\n df_etree = read_xml(\n xml_default_nmsp,\n xpath=\".//doc:row\",\n namespaces={\"doc\": \"http://example.com\"},\n parser=\"etree\",\n )\n\n tm.assert_frame_equal(df_lxml, df_etree)\n\n\[email protected]_if_no(\"lxml\")\ndef test_consistency_prefix_namespace():\n df_lxml = read_xml(\n xml_prefix_nmsp,\n xpath=\".//doc:row\",\n namespaces={\"doc\": \"http://example.com\"},\n parser=\"lxml\",\n )\n\n df_etree = read_xml(\n xml_prefix_nmsp,\n xpath=\".//doc:row\",\n namespaces={\"doc\": \"http://example.com\"},\n parser=\"etree\",\n )\n\n tm.assert_frame_equal(df_lxml, df_etree)\n\n\n# PREFIX\n\n\ndef test_missing_prefix_with_default_namespace(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n with pytest.raises(ValueError, match=(\"xpath does not return any nodes\")):\n read_xml(filename, xpath=\".//Placemark\", parser=parser)\n\n\ndef test_missing_prefix_definition_etree(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n with pytest.raises(SyntaxError, match=(\"you used an undeclared namespace prefix\")):\n read_xml(filename, xpath=\".//kml:Placemark\", parser=\"etree\")\n\n\[email protected]_if_no(\"lxml\")\ndef test_missing_prefix_definition_lxml(datapath):\n from lxml.etree import XPathEvalError\n\n filename = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n with pytest.raises(XPathEvalError, match=(\"Undefined namespace prefix\")):\n read_xml(filename, xpath=\".//kml:Placemark\", parser=\"lxml\")\n\n\[email protected]_if_no(\"lxml\")\[email protected](\"key\", [\"\", None])\ndef test_none_namespace_prefix(key):\n with pytest.raises(\n TypeError, match=(\"empty namespace prefix is not supported in XPath\")\n ):\n read_xml(\n xml_default_nmsp,\n xpath=\".//kml:Placemark\",\n namespaces={key: \"http://www.opengis.net/kml/2.2\"},\n parser=\"lxml\",\n )\n\n\n# ELEMS AND ATTRS\n\n\ndef test_file_elems_and_attrs(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n df_file = read_xml(filename, parser=parser)\n df_expected = DataFrame(\n {\n \"category\": [\"cooking\", \"children\", \"web\"],\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_file, df_expected)\n\n\ndef test_file_only_attrs(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n df_file = read_xml(filename, attrs_only=True, parser=parser)\n df_expected = DataFrame({\"category\": [\"cooking\", \"children\", \"web\"]})\n\n tm.assert_frame_equal(df_file, df_expected)\n\n\ndef test_file_only_elems(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n df_file = read_xml(filename, elems_only=True, parser=parser)\n df_expected = DataFrame(\n {\n \"title\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"author\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"year\": [2005, 2005, 2003],\n \"price\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_file, df_expected)\n\n\ndef test_elem_and_attrs_only(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n with pytest.raises(\n ValueError,\n match=(\"Either element or attributes can be parsed not both\"),\n ):\n read_xml(filename, elems_only=True, attrs_only=True, parser=parser)\n\n\[email protected]_if_no(\"lxml\")\ndef test_attribute_centric_xml():\n xml = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<TrainSchedule>\n <Stations>\n <station Name=\"Manhattan\" coords=\"31,460,195,498\"/>\n <station Name=\"Laraway Road\" coords=\"63,409,194,455\"/>\n <station Name=\"179th St (Orland Park)\" coords=\"0,364,110,395\"/>\n <station Name=\"153rd St (Orland Park)\" coords=\"7,333,113,362\"/>\n <station Name=\"143rd St (Orland Park)\" coords=\"17,297,115,330\"/>\n <station Name=\"Palos Park\" coords=\"128,281,239,303\"/>\n <station Name=\"Palos Heights\" coords=\"148,257,283,279\"/>\n <station Name=\"Worth\" coords=\"170,230,248,255\"/>\n <station Name=\"Chicago Ridge\" coords=\"70,187,208,214\"/>\n <station Name=\"Oak Lawn\" coords=\"166,159,266,185\"/>\n <station Name=\"Ashburn\" coords=\"197,133,336,157\"/>\n <station Name=\"Wrightwood\" coords=\"219,106,340,133\"/>\n <station Name=\"Chicago Union Sta\" coords=\"220,0,360,43\"/>\n </Stations>\n</TrainSchedule>\"\"\"\n\n df_lxml = read_xml(xml, xpath=\".//station\")\n df_etree = read_xml(xml, xpath=\".//station\", parser=\"etree\")\n\n tm.assert_frame_equal(df_lxml, df_etree)\n\n\n# NAMES\n\n\ndef test_names_option_output(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n df_file = read_xml(\n filename, names=[\"Col1\", \"Col2\", \"Col3\", \"Col4\", \"Col5\"], parser=parser\n )\n\n df_expected = DataFrame(\n {\n \"Col1\": [\"cooking\", \"children\", \"web\"],\n \"Col2\": [\"Everyday Italian\", \"Harry Potter\", \"Learning XML\"],\n \"Col3\": [\"Giada De Laurentiis\", \"J K. Rowling\", \"Erik T. Ray\"],\n \"Col4\": [2005, 2005, 2003],\n \"Col5\": [30.00, 29.99, 39.95],\n }\n )\n\n tm.assert_frame_equal(df_file, df_expected)\n\n\ndef test_names_option_wrong_length(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with pytest.raises(ValueError, match=(\"names does not match length\")):\n read_xml(filename, names=[\"Col1\", \"Col2\", \"Col3\"], parser=parser)\n\n\ndef test_names_option_wrong_type(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with pytest.raises(TypeError, match=(\"is not a valid type for names\")):\n read_xml(filename, names=\"Col1, Col2, Col3\", parser=parser)\n\n\n# ENCODING\n\n\ndef test_wrong_encoding(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"baby_names.xml\")\n with pytest.raises(UnicodeDecodeError, match=(\"'utf-8' codec can't decode\")):\n read_xml(filename, parser=parser)\n\n\ndef test_utf16_encoding(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"baby_names.xml\")\n with pytest.raises(\n UnicodeError,\n match=(\n \"UTF-16 stream does not start with BOM|\"\n \"'utf-16-le' codec can't decode byte\"\n ),\n ):\n read_xml(filename, encoding=\"UTF-16\", parser=parser)\n\n\ndef test_unknown_encoding(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"baby_names.xml\")\n with pytest.raises(LookupError, match=(\"unknown encoding: UFT-8\")):\n read_xml(filename, encoding=\"UFT-8\", parser=parser)\n\n\ndef test_ascii_encoding(datapath, parser):\n filename = datapath(\"io\", \"data\", \"xml\", \"baby_names.xml\")\n with pytest.raises(UnicodeDecodeError, match=(\"'ascii' codec can't decode byte\")):\n read_xml(filename, encoding=\"ascii\", parser=parser)\n\n\[email protected]_if_no(\"lxml\")\ndef test_parser_consistency_with_encoding(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"baby_names.xml\")\n df_lxml = read_xml(filename, parser=\"lxml\", encoding=\"ISO-8859-1\")\n df_etree = read_xml(filename, parser=\"etree\", encoding=\"iso-8859-1\")\n\n tm.assert_frame_equal(df_lxml, df_etree)\n\n\[email protected]_if_no(\"lxml\")\ndef test_wrong_encoding_for_lxml():\n # GH#45133\n data = \"\"\"<data>\n <row>\n <a>c</a>\n </row>\n</data>\n\"\"\"\n with pytest.raises(TypeError, match=\"encoding None\"):\n read_xml(StringIO(data), parser=\"lxml\", encoding=None)\n\n\ndef test_none_encoding_etree():\n # GH#45133\n data = \"\"\"<data>\n <row>\n <a>c</a>\n </row>\n</data>\n\"\"\"\n result = read_xml(StringIO(data), parser=\"etree\", encoding=None)\n expected = DataFrame({\"a\": [\"c\"]})\n tm.assert_frame_equal(result, expected)\n\n\n# PARSER\n\n\[email protected]_if_installed(\"lxml\")\ndef test_default_parser_no_lxml(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with pytest.raises(\n ImportError, match=(\"lxml not found, please install or use the etree parser.\")\n ):\n read_xml(filename)\n\n\ndef test_wrong_parser(datapath):\n filename = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with pytest.raises(\n ValueError, match=(\"Values for parser can only be lxml or etree.\")\n ):\n read_xml(filename, parser=\"bs4\")\n\n\n# STYLESHEET\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_file(datapath):\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"flatten_doc.xsl\")\n\n df_style = read_xml(\n kml,\n xpath=\".//k:Placemark\",\n namespaces={\"k\": \"http://www.opengis.net/kml/2.2\"},\n stylesheet=xsl,\n )\n\n tm.assert_frame_equal(df_kml, df_style)\n\n\ndef test_read_xml_passing_as_positional_deprecated(datapath, parser):\n # GH#45133\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n\n with tm.assert_produces_warning(FutureWarning, match=\"keyword-only\"):\n read_xml(\n kml,\n \".//k:Placemark\",\n namespaces={\"k\": \"http://www.opengis.net/kml/2.2\"},\n parser=parser,\n )\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_file_like(datapath, mode):\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"flatten_doc.xsl\")\n\n with open(xsl, mode) as f:\n df_style = read_xml(\n kml,\n xpath=\".//k:Placemark\",\n namespaces={\"k\": \"http://www.opengis.net/kml/2.2\"},\n stylesheet=f,\n )\n\n tm.assert_frame_equal(df_kml, df_style)\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_io(datapath, mode):\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"flatten_doc.xsl\")\n\n xsl_obj: BytesIO | StringIO\n\n with open(xsl, mode) as f:\n if mode == \"rb\":\n xsl_obj = BytesIO(f.read())\n else:\n xsl_obj = StringIO(f.read())\n\n df_style = read_xml(\n kml,\n xpath=\".//k:Placemark\",\n namespaces={\"k\": \"http://www.opengis.net/kml/2.2\"},\n stylesheet=xsl_obj,\n )\n\n tm.assert_frame_equal(df_kml, df_style)\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_buffered_reader(datapath, mode):\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"flatten_doc.xsl\")\n\n with open(xsl, mode) as f:\n xsl_obj = f.read()\n\n df_style = read_xml(\n kml,\n xpath=\".//k:Placemark\",\n namespaces={\"k\": \"http://www.opengis.net/kml/2.2\"},\n stylesheet=xsl_obj,\n )\n\n tm.assert_frame_equal(df_kml, df_style)\n\n\[email protected]_if_no(\"lxml\")\ndef test_not_stylesheet(datapath):\n from lxml.etree import XSLTParseError\n\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"books.xml\")\n\n with pytest.raises(XSLTParseError, match=(\"document is not a stylesheet\")):\n read_xml(kml, stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\ndef test_incorrect_xsl_syntax(datapath):\n from lxml.etree import XMLSyntaxError\n\n xsl = \"\"\"\\\n<xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\"\n xmlns:k=\"http://www.opengis.net/kml/2.2\"/>\n <xsl:output method=\"xml\" omit-xml-declaration=\"yes\"\n cdata-section-elements=\"k:description\" indent=\"yes\"/>\n <xsl:strip-space elements=\"*\"/>\n\n <xsl:template match=\"node()|@*\">\n <xsl:copy>\n <xsl:apply-templates select=\"node()|@*\"/>\n </xsl:copy>\n </xsl:template>\n\n <xsl:template match=\"k:MultiGeometry|k:LineString\">\n <xsl:apply-templates select='*'/>\n </xsl:template>\n\n <xsl:template match=\"k:description|k:Snippet|k:Style\"/>\n</xsl:stylesheet>\"\"\"\n\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n\n with pytest.raises(\n XMLSyntaxError, match=(\"Extra content at the end of the document\")\n ):\n read_xml(kml, stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\ndef test_incorrect_xsl_eval(datapath):\n from lxml.etree import XSLTParseError\n\n xsl = \"\"\"\\\n<xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\"\n xmlns:k=\"http://www.opengis.net/kml/2.2\">\n <xsl:output method=\"xml\" omit-xml-declaration=\"yes\"\n cdata-section-elements=\"k:description\" indent=\"yes\"/>\n <xsl:strip-space elements=\"*\"/>\n\n <xsl:template match=\"node(*)|@*\">\n <xsl:copy>\n <xsl:apply-templates select=\"node()|@*\"/>\n </xsl:copy>\n </xsl:template>\n\n <xsl:template match=\"k:MultiGeometry|k:LineString\">\n <xsl:apply-templates select='*'/>\n </xsl:template>\n\n <xsl:template match=\"k:description|k:Snippet|k:Style\"/>\n</xsl:stylesheet>\"\"\"\n\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n\n with pytest.raises(XSLTParseError, match=(\"failed to compile\")):\n read_xml(kml, stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\ndef test_incorrect_xsl_apply(datapath):\n from lxml.etree import XSLTApplyError\n\n xsl = \"\"\"\\\n<xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\">\n <xsl:output method=\"xml\" encoding=\"utf-8\" indent=\"yes\" />\n <xsl:strip-space elements=\"*\"/>\n\n <xsl:template match=\"@*|node()\">\n <xsl:copy>\n <xsl:copy-of select=\"document('non_existent.xml')/*\"/>\n </xsl:copy>\n </xsl:template>\n</xsl:stylesheet>\"\"\"\n\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n\n with pytest.raises(XSLTApplyError, match=(\"Cannot resolve URI\")):\n read_xml(kml, stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\ndef test_wrong_stylesheet():\n from lxml.etree import XMLSyntaxError\n\n kml = os.path.join(\"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = os.path.join(\"data\", \"xml\", \"flatten.xsl\")\n\n with pytest.raises(\n XMLSyntaxError,\n match=(\"Start tag expected, '<' not found\"),\n ):\n read_xml(kml, stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_file_close(datapath, mode):\n kml = datapath(\"io\", \"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = datapath(\"io\", \"data\", \"xml\", \"flatten_doc.xsl\")\n\n xsl_obj: BytesIO | StringIO\n\n with open(xsl, mode) as f:\n if mode == \"rb\":\n xsl_obj = BytesIO(f.read())\n else:\n xsl_obj = StringIO(f.read())\n\n read_xml(kml, stylesheet=xsl_obj)\n\n assert not f.closed\n\n\[email protected]_if_no(\"lxml\")\ndef test_stylesheet_with_etree():\n kml = os.path.join(\"data\", \"xml\", \"cta_rail_lines.kml\")\n xsl = os.path.join(\"data\", \"xml\", \"flatten_doc.xsl\")\n\n with pytest.raises(\n ValueError, match=(\"To use stylesheet, you need lxml installed\")\n ):\n read_xml(kml, parser=\"etree\", stylesheet=xsl)\n\n\[email protected]_if_no(\"lxml\")\[email protected](\"val\", [\"\", b\"\"])\ndef test_empty_stylesheet(val):\n from lxml.etree import XMLSyntaxError\n\n kml = os.path.join(\"data\", \"xml\", \"cta_rail_lines.kml\")\n\n with pytest.raises(\n XMLSyntaxError, match=(\"Document is empty|Start tag expected, '<' not found\")\n ):\n read_xml(kml, stylesheet=val)\n\n\[email protected]\[email protected]_if_no(\"lxml\")\ndef test_online_stylesheet():\n xml = \"https://www.w3schools.com/xml/cdcatalog_with_xsl.xml\"\n xsl = \"https://www.w3schools.com/xml/cdcatalog.xsl\"\n\n df_xsl = read_xml(\n xml,\n xpath=\".//tr[td and position() <= 6]\",\n names=[\"title\", \"artist\"],\n stylesheet=xsl,\n )\n\n df_expected = DataFrame(\n {\n \"title\": {\n 0: \"Empire Burlesque\",\n 1: \"Hide your heart\",\n 2: \"Greatest Hits\",\n 3: \"Still got the blues\",\n 4: \"Eros\",\n },\n \"artist\": {\n 0: \"Bob Dylan\",\n 1: \"Bonnie Tyler\",\n 2: \"Dolly Parton\",\n 3: \"Gary Moore\",\n 4: \"Eros Ramazzotti\",\n },\n }\n )\n\n tm.assert_frame_equal(df_expected, df_xsl)\n\n\n# COMPRESSION\n\n\ndef test_compression_read(parser, compression_only):\n with tm.ensure_clean() as path:\n geom_df.to_xml(path, index=False, parser=parser, compression=compression_only)\n\n xml_df = read_xml(path, parser=parser, compression=compression_only)\n\n tm.assert_frame_equal(xml_df, geom_df)\n\n\ndef test_wrong_compression(parser, compression, compression_only):\n actual_compression = compression\n attempted_compression = compression_only\n\n if actual_compression == attempted_compression:\n return\n\n errors = {\n \"bz2\": (OSError, \"Invalid data stream\"),\n \"gzip\": (OSError, \"Not a gzipped file\"),\n \"zip\": (BadZipFile, \"File is not a zip file\"),\n }\n zstd = import_optional_dependency(\"zstandard\", errors=\"ignore\")\n if zstd is not None:\n errors[\"zstd\"] = (zstd.ZstdError, \"Unknown frame descriptor\")\n lzma = import_optional_dependency(\"lzma\", errors=\"ignore\")\n if lzma is not None:\n errors[\"xz\"] = (LZMAError, \"Input format not supported by decoder\")\n error_cls, error_str = errors[attempted_compression]\n\n with tm.ensure_clean() as path:\n geom_df.to_xml(path, parser=parser, compression=actual_compression)\n\n with pytest.raises(error_cls, match=error_str):\n read_xml(path, parser=parser, compression=attempted_compression)\n\n\ndef test_unsuported_compression(parser):\n with pytest.raises(ValueError, match=\"Unrecognized compression type\"):\n with tm.ensure_clean() as path:\n read_xml(path, parser=parser, compression=\"7z\")\n\n\n# STORAGE OPTIONS\n\n\[email protected]\[email protected]_if_no(\"s3fs\")\[email protected]_if_no(\"lxml\")\[email protected](\n os.environ.get(\"PANDAS_CI\", \"0\") == \"1\",\n reason=\"2022.1.17: Hanging on the CI min versions build.\",\n)\ndef test_s3_parser_consistency():\n # Python Software Foundation (2019 IRS-990 RETURN)\n s3 = \"s3://irs-form-990/201923199349319487_public.xml\"\n\n df_lxml = read_xml(\n s3,\n xpath=\".//irs:Form990PartVIISectionAGrp\",\n namespaces={\"irs\": \"http://www.irs.gov/efile\"},\n parser=\"lxml\",\n storage_options={\"anon\": True},\n )\n\n df_etree = read_xml(\n s3,\n xpath=\".//irs:Form990PartVIISectionAGrp\",\n namespaces={\"irs\": \"http://www.irs.gov/efile\"},\n parser=\"etree\",\n storage_options={\"anon\": True},\n )\n\n tm.assert_frame_equal(df_lxml, df_etree)\n"
] | [
[
"pandas._testing.assert_produces_warning",
"pandas.DataFrame",
"pandas.io.xml.read_xml",
"pandas.compat._optional.import_optional_dependency",
"pandas._testing.assert_frame_equal",
"pandas._testing.ensure_clean",
"pandas.util._test_decorators.skip_if_installed",
"pandas.util._test_decorators.skip_if_no"
]
] |
davestanley/animated-succotash | [
"174f08063c222ead153bf9db67c75e2843301912"
] | [
"app/utils_EDAplots.py"
] | [
"\n\n\ndef plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #'):\n \"\"\"Old version with limited axis labels\"\"\"\n # Import fig stuff\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import figure\n\n figure(num=None, figsize=(15, 4),facecolor='w', edgecolor='k')\n barlist = plt.bar(range(len(myvar)), myvar, align='center', alpha=0.5)\n plt.xlabel(xlabel)\n plt.ylabel('{} per article'.format(varname))\n for i in range(Ntrain,Ntrain+Ndev):\n barlist[i].set_color('r')\n plt.show()\n\ndef plotbar_train_dev2(myvar,Ntrain,Ndev,ylabel='value',xlabel='Article #'):\n # Import fig stuff\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import figure\n\n figure(num=None, figsize=(15, 4),facecolor='w', edgecolor='k')\n barlist = plt.bar(range(len(myvar)), myvar, align='center', alpha=0.5)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n for i in range(Ntrain,Ntrain+Ndev):\n barlist[i].set_color('r')\n plt.show()\n\ndef plot_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #'):\n # Import fig stuff\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import figure\n\n figure(num=None, figsize=(15, 4),facecolor='w', edgecolor='k')\n barlist = plt.plot(range(len(myvar)), myvar,)\n plt.xlabel(xlabel)\n plt.ylabel('{} per article'.format(varname))\n for i in range(Ntrain,Ntrain+Ndev):\n barlist[i].set_color('r')\n plt.show()\n\ndef plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles',devbins=30):\n \"\"\"Old version with limited axis labels\"\"\"\n # Import fig stuff\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import figure\n import statistics\n\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=False,figsize=(15, 4));\n ax1.hist(myvar[0:Ntrain-1], bins=30); # arguments are passed to np.histogram\n ax1.set_title(\"Narticles={}, median={}, mean={}\".format(str(Ntrain),'{0:.2f}'.format(statistics.median(myvar[0:Ntrain-1])),'{0:.2f}'.format(statistics.mean(myvar[0:Ntrain-1]))));\n ax1.set_ylabel('N Articles');\n ax1.set_xlabel('{}'.format(varname));\n\n ax2.hist(myvar[Ntrain:], bins=devbins); # arguments are passed to np.histogram\n ax2.set_title(\"Narticles={}, median={}, mean={}\".format(str(Ndev),'{0:.2f}'.format(statistics.median(myvar[Ntrain:])),'{0:.2f}'.format(statistics.mean(myvar[Ntrain:]))));\n ax2.set_xlabel('{}'.format(varname));\n return {'ax1': ax1, 'ax2':ax2}\n\n\ndef plothist_train_dev2(myvar,Ntrain,Ndev,xlabel='value',ylabel='N Articles',devbins=30):\n # Import fig stuff\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import figure\n import statistics\n\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=False,figsize=(15, 4));\n ax1.hist(myvar[0:Ntrain-1], bins=30); # arguments are passed to np.histogram\n ax1.set_title(\"Narticles={}, median={}, mean={}\".format(str(Ntrain),'{0:.2f}'.format(statistics.median(myvar[0:Ntrain-1])),'{0:.2f}'.format(statistics.mean(myvar[0:Ntrain-1]))));\n ax1.set_ylabel('N Articles');\n ax1.set_xlabel(xlabel);\n\n ax2.hist(myvar[Ntrain:], bins=devbins); # arguments are passed to np.histogram\n ax2.set_title(\"Narticles={}, median={}, mean={}\".format(str(Ndev),'{0:.2f}'.format(statistics.median(myvar[Ntrain:])),'{0:.2f}'.format(statistics.mean(myvar[Ntrain:]))));\n ax2.set_xlabel(xlabel);\n return {'ax1': ax1, 'ax2':ax2}\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
Vivek2018/OSM_Building-Detection-Custom-Repo | [
"278b1f5a46e49cb547162d495979056c36945e43"
] | [
"Archive/floodFillPrototype.py"
] | [
"import cv2\nimport queue\nimport numpy as np\nimport math\n\nTHRESHOLD = 25\n\nFILENAME = 'diff_hue'\nimage = cv2.imread(FILENAME + '.png')\nheight = image.shape[0]\nwidth = image.shape[1]\n\n# used for smoothing out image.\nkernel = np.ones((5, 5), np.float32) / 25\n\ndef RGB_distance_threshold(first_rgb, second_rgb):\n return math.sqrt(np.sum((np.absolute(first_rgb - second_rgb))**2))\n\nx_max = 0\ny_max = 0\nx_min = width - 1\ny_min = height - 1\n\ndef flood_fill(image, x_loc, y_loc, target_color, replacement_color):\n image[y_loc, x_loc] = replacement_color\n pixel_queue = queue.Queue()\n pixel_queue.put((x_loc, y_loc))\n width = len(image[0])\n height = len(image)\n while not pixel_queue.empty():\n global x_max, y_max, x_min, y_min\n current_x, current_y = pixel_queue.get()\n\n if current_x > 0:\n left_rgb = image[current_y][current_x - 1]\n if RGB_distance_threshold(left_rgb, target_color) < THRESHOLD and not np.array_equal(image[current_y][current_x - 1], replacement_color):\n image[current_y][current_x - 1] = replacement_color\n pixel_queue.put((current_x - 1, current_y))\n if (x_min > current_x - 1):\n x_min = current_x - 1\n\n if current_x < width - 1:\n right_rgb = image[current_y][current_x + 1]\n if RGB_distance_threshold(right_rgb, target_color) < THRESHOLD and not np.array_equal(image[current_y][current_x + 1], replacement_color):\n image[current_y][current_x + 1] = replacement_color\n pixel_queue.put((current_x + 1, current_y))\n if (x_max < current_x + 1):\n x_max = current_x + 1\n\n if current_y < height - 1:\n up_rgb = image[current_y + 1][current_x]\n if RGB_distance_threshold(up_rgb, target_color) < THRESHOLD and not np.array_equal(image[current_y + 1][current_x], replacement_color):\n image[current_y + 1][current_x] = replacement_color\n pixel_queue.put((current_x, current_y + 1))\n if (y_max < current_y + 1):\n y_max = current_y + 1\n\n if current_y > 0:\n down_rgb = image[current_y - 1][current_x]\n if RGB_distance_threshold(down_rgb, target_color) < THRESHOLD and not np.array_equal(image[current_y - 1][current_x], replacement_color):\n image[current_y - 1][current_x] = replacement_color\n pixel_queue.put((current_x, current_y - 1))\n if (y_min > current_y - 1):\n y_min = current_y - 1\n return image\n\n\nx_global = 0\ny_global = 0\ndef register_click(event,x,y,flags,param):\n if event == cv2.EVENT_LBUTTONDOWN:\n global x_global, y_global\n x_global = int(x)\n y_global = int(y)\n print(x_global, y_global)\n\n # image indexing returns something weird so this fixed it\n target_color = np.array(image[y_global][x_global].tolist())\n # green color\n replace_color = np.array([0, 255, 0])\n\n image2 = flood_fill(image, x_global, y_global, target_color, replace_color)\n\n cv2.imshow('image2', image2)\n cv2.imwrite(FILENAME + 'detected.PNG', image2)\n\n\n\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', register_click)\ncv2.imshow('image', image)\n\n#print(x_global, y_global)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.array_equal",
"numpy.absolute"
]
] |
ComplexCity/policosm | [
"548d4d694df49603f91cd45af7fe50ced79aea68"
] | [
"examples/drawBuildingsMatplotlib.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nCreated in February 2017 in ComplexCity Lab\n\n@author: github.com/fpfaende\n\nwhat it does\nclean roads graph created from osm file\n\nparameters\ngraph\n\nhow it works\n1 - remove nodes without geographic informations (latitude or longitude)\n2 - remove self referencing edges (loop on itself)\n3 - remove isolated nodes (degree is 0 )\n\nreturn\ngraph minus alone or non-conform nodes\n\n'''\nimport sys\nsys.path.insert(0, '/Users/fabien/workspace/github/policosm')\n\nimport matplotlib.pyplot as plt\nfrom descartes.patch import PolygonPatch\nimport colorlover as cl\nfrom shapely.geometry import Polygon, shape\nimport random\n\nfrom policosm.extractors.buildingsPolygons import *\nimport policosm.geoNetworks as pocogeo\nfrom policosm.functions import *\n\ndef drawBuildings(polygons, displayRatio=1.0):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\t\n\tfor feature in polygons['features']:\n\t\tif displayRatio < 1.0:\n\t\t\tif random.random() >= displayRatio:\n\t\t\t\tcontinue\n\t\tpolygon = shape(feature['geometry'])\n\t\tpatch = PolygonPatch(polygon, facecolor='#FD7400', edgecolor='#FD7400', alpha=0.5, zorder=1)\n\t\tax.add_patch(patch)\n\t\n\tbounds = getBuildingsBoundaries(polygons)\n\tminx, miny, maxx, maxy = bounds\n\tax.set_xlim(minx,maxx)\n\tax.set_ylim(miny,maxy)\n\n\tplt.show()\n\nif __name__ == \"__main__\":\n\tfilename = '/Volumes/Fabien/Research/cities-pictures/data/France/1-pbf/74173.pbf'\n\tpolygons = buildingsPolygons(filename)\n\tdrawBuildings(polygons)"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
makinarocks/Hierarchical-Actor-Critic-HAC-PyTorch | [
"1533356e8ce243d9f589a80b18b6a5016ddd52eb"
] | [
"HAC.py"
] | [
"import torch\nimport numpy as np\nfrom DDPG import DDPG\nfrom utils import ReplayBuffer\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass HAC:\n def __init__(self, k_level, H, state_dim, action_dim, render, threshold, \n action_bounds, action_offset, state_bounds, state_offset, lr):\n \n # adding lowest level\n self.HAC = [DDPG(state_dim, action_dim, action_bounds, action_offset, lr, H)]\n self.replay_buffer = [ReplayBuffer()]\n \n # adding remaining levels\n for _ in range(k_level-1):\n self.HAC.append(DDPG(state_dim, state_dim, state_bounds, state_offset, lr, H))\n self.replay_buffer.append(ReplayBuffer())\n \n # set some parameters\n self.k_level = k_level\n self.H = H\n self.action_dim = action_dim\n self.state_dim = state_dim\n self.threshold = threshold\n self.render = render\n \n # logging parameters\n self.goals = [None]*self.k_level\n self.reward = 0\n self.timestep = 0\n \n def set_parameters(self, lamda, gamma, action_clip_low, action_clip_high, \n state_clip_low, state_clip_high, exploration_action_noise, exploration_state_noise):\n \n self.lamda = lamda\n self.gamma = gamma\n self.action_clip_low = action_clip_low\n self.action_clip_high = action_clip_high\n self.state_clip_low = state_clip_low\n self.state_clip_high = state_clip_high\n self.exploration_action_noise = exploration_action_noise\n self.exploration_state_noise = exploration_state_noise\n \n \n def check_goal(self, state, goal, threshold):\n for i in range(self.state_dim):\n if abs(state[i]-goal[i]) > threshold[i]:\n return False\n return True\n \n \n def run_HAC(self, env, i_level, state, goal, is_subgoal_test):\n next_state = None\n done = None\n goal_transitions = []\n \n # logging updates\n self.goals[i_level] = goal\n \n # H attempts\n for _ in range(self.H):\n # if this is a subgoal test, then next/lower level goal has to be a subgoal test\n is_next_subgoal_test = is_subgoal_test\n \n action = self.HAC[i_level].select_action(state, goal)\n \n # <================ high level policy ================>\n if i_level > 0:\n # add noise or take random action if not subgoal testing\n if not is_subgoal_test:\n if np.random.random_sample() > 0.2:\n action = action + np.random.normal(0, self.exploration_state_noise)\n action = action.clip(self.state_clip_low, self.state_clip_high)\n else:\n action = np.random.uniform(self.state_clip_low, self.state_clip_high)\n \n # Determine whether to test subgoal (action)\n if np.random.random_sample() < self.lamda:\n is_next_subgoal_test = True\n \n # Pass subgoal to lower level \n next_state, done = self.run_HAC(env, i_level-1, state, action, is_next_subgoal_test)\n \n # if subgoal was tested but not achieved, add subgoal testing transition\n if is_next_subgoal_test and not self.check_goal(action, next_state, self.threshold):\n self.replay_buffer[i_level].add((state, action, -self.H, next_state, goal, 0.0, float(done)))\n \n # for hindsight action transition\n action = next_state\n \n # <================ low level policy ================>\n else:\n # add noise or take random action if not subgoal testing\n if not is_subgoal_test:\n if np.random.random_sample() > 0.2:\n action = action + np.random.normal(0, self.exploration_action_noise)\n action = action.clip(self.action_clip_low, self.action_clip_high)\n else:\n action = np.random.uniform(self.action_clip_low, self.action_clip_high)\n \n # take primitive action\n next_state, rew, done, _ = env.step(action)\n \n if self.render:\n # env.render()\n \n if self.k_level == 2:\n env.unwrapped.render_goal(self.goals[0], self.goals[1])\n elif self.k_level == 3:\n env.unwrapped.render_goal_2(self.goals[0], self.goals[1], self.goals[2])\n \n for _ in range(1000000):\n continue\n \n # this is for logging\n self.reward += rew\n self.timestep +=1\n \n # <================ finish one step/transition ================>\n \n # check if goal is achieved\n goal_achieved = self.check_goal(next_state, goal, self.threshold)\n \n # hindsight action transition\n if goal_achieved:\n self.replay_buffer[i_level].add((state, action, 0.0, next_state, goal, 0.0, float(done)))\n else:\n self.replay_buffer[i_level].add((state, action, -1.0, next_state, goal, self.gamma, float(done)))\n \n # copy for goal transition\n goal_transitions.append([state, action, -1.0, next_state, None, self.gamma, float(done)])\n \n state = next_state\n \n if done or goal_achieved:\n break\n \n \n # <================ finish H attempts ================>\n \n # hindsight goal transition\n # last transition reward and discount is 0\n goal_transitions[-1][2] = 0.0\n goal_transitions[-1][5] = 0.0\n for transition in goal_transitions:\n # last state is goal for all transitions\n transition[4] = next_state\n self.replay_buffer[i_level].add(tuple(transition))\n \n return next_state, done\n \n \n def update(self, n_iter, batch_size):\n for i in range(self.k_level):\n self.HAC[i].update(self.replay_buffer[i], n_iter, batch_size)\n \n \n def save(self, directory, name):\n for i in range(self.k_level):\n self.HAC[i].save(directory, name+'_level_{}'.format(i))\n \n \n def load(self, directory, name):\n for i in range(self.k_level):\n self.HAC[i].load(directory, name+'_level_{}'.format(i))\n \n \n \n \n \n \n"
] | [
[
"numpy.random.uniform",
"numpy.random.normal",
"torch.cuda.is_available",
"numpy.random.random_sample"
]
] |
baicenxiao/Shaping-Advice | [
"a5de626792dc691f301ae6c5c4384931318c0aba"
] | [
"maddpg/trainer/maddpg_spread.py"
] | [
"import numpy as np\nfrom numpy import linalg as LA\nimport random\nimport tensorflow as tf\nimport maddpg.common.tf_util as U\n\nfrom maddpg.common.distributions import make_pdtype\nfrom maddpg import AgentTrainer\nfrom maddpg.trainer.replay_buffer import ReplayBuffer\n\n\ndef discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r\n r = r*(1.-done)\n discounted.append(r)\n return discounted[::-1]\n\ndef make_update_exp(vals, target_vals):\n polyak = 1.0 - 1e-2\n expression = []\n for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):\n expression.append(var_target.assign(polyak * var_target + (1.0-polyak) * var))\n expression = tf.group(*expression)\n return U.function([], [], updates=[expression])\n\ndef p_train(make_obs_ph_n, act_space_n, p_index, p_func, q_func, optimizer, grad_norm_clipping=None, local_q_func=False, num_units=64, scope=\"trainer\", reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n # create distribtuions\n act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]\n\n # set up placeholders\n obs_ph_n = make_obs_ph_n\n act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name=\"action\"+str(i)) for i in range(len(act_space_n))]\n p_input = obs_ph_n[p_index]\n\n print('-'*50, int(act_pdtype_n[p_index].param_shape()[0]))\n p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope=\"p_func\", num_units=num_units)\n p_func_vars = U.scope_vars(U.absolute_scope_name(\"p_func\"))\n\n # wrap parameters in distribution\n act_pd = act_pdtype_n[p_index].pdfromflat(p)\n\n act_sample = act_pd.sample()\n p_reg = tf.reduce_mean(tf.square(act_pd.flatparam()))\n\n act_input_n = act_ph_n + []\n print(act_input_n)\n act_input_n[p_index] = act_pd.sample()\n print(act_input_n)\n q_input = tf.concat(obs_ph_n + act_input_n, 1)\n if local_q_func:\n q_input = tf.concat([obs_ph_n[p_index], act_input_n[p_index]], 1)\n q = q_func(q_input, 1, scope=\"q_func\", reuse=True, num_units=num_units)[:,0]\n pg_loss = -tf.reduce_mean(q)\n\n loss = pg_loss + p_reg * 1e-3\n\n optimize_expr = U.minimize_and_clip(optimizer, loss, p_func_vars, grad_norm_clipping)\n\n # Create callable functions\n train = U.function(inputs=obs_ph_n + act_ph_n, outputs=loss, updates=[optimize_expr])\n act = U.function(inputs=[obs_ph_n[p_index]], outputs=act_sample)\n p_values = U.function([obs_ph_n[p_index]], p)\n\n # target network\n target_p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope=\"target_p_func\", num_units=num_units)\n target_p_func_vars = U.scope_vars(U.absolute_scope_name(\"target_p_func\"))\n update_target_p = make_update_exp(p_func_vars, target_p_func_vars)\n\n target_act_sample = act_pdtype_n[p_index].pdfromflat(target_p).sample()\n target_act = U.function(inputs=[obs_ph_n[p_index]], outputs=target_act_sample)\n\n return act, train, update_target_p, {'p_values': p_values, 'target_act': target_act}\n\ndef q_train(make_obs_ph_n, act_space_n, q_index, q_func, optimizer, grad_norm_clipping=None, local_q_func=False, scope=\"trainer\", reuse=None, num_units=64):\n with tf.variable_scope(scope, reuse=reuse):\n # create distribtuions\n act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]\n\n # set up placeholders\n obs_ph_n = make_obs_ph_n\n act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name=\"action\"+str(i)) for i in range(len(act_space_n))]\n target_ph = tf.placeholder(tf.float32, [None], name=\"target\")\n\n q_input = tf.concat(obs_ph_n + act_ph_n, 1)\n if local_q_func:\n q_input = tf.concat([obs_ph_n[q_index], act_ph_n[q_index]], 1)\n q = q_func(q_input, 1, scope=\"q_func\", num_units=num_units)[:,0]\n q_func_vars = U.scope_vars(U.absolute_scope_name(\"q_func\"))\n\n q_loss = tf.reduce_mean(tf.square(q - target_ph))\n\n # viscosity solution to Bellman differential equation in place of an initial condition\n q_reg = tf.reduce_mean(tf.square(q))\n loss = q_loss #+ 1e-3 * q_reg\n\n optimize_expr = U.minimize_and_clip(optimizer, loss, q_func_vars, grad_norm_clipping)\n\n # Create callable functions\n train = U.function(inputs=obs_ph_n + act_ph_n + [target_ph], outputs=loss, updates=[optimize_expr])\n q_values = U.function(obs_ph_n + act_ph_n, q)\n\n # target network\n target_q = q_func(q_input, 1, scope=\"target_q_func\", num_units=num_units)[:,0]\n target_q_func_vars = U.scope_vars(U.absolute_scope_name(\"target_q_func\"))\n update_target_q = make_update_exp(q_func_vars, target_q_func_vars)\n\n target_q_values = U.function(obs_ph_n + act_ph_n, target_q)\n\n return train, update_target_q, {'q_values': q_values, 'target_q_values': target_q_values}\n\n######################################################## Code for Computing SAM-NonUniform values\ndef a_2_pa(original_action):\n \"\"\"\n transform actions output by the policy, shape=(batch_size, 5), to \n actions in the 2d space, shape=(batch_size, 2)\n \"\"\"\n physical_action = np.zeros((original_action.shape[0],2))\n physical_action[:,0] = original_action[:,1] - original_action[:,2]\n physical_action[:,1] = original_action[:,3] - original_action[:,4]\n\n return physical_action\n\ndef In_P(v1, v2):\n \"\"\"\n Compute normalized inner product\n Input: v1, v2 shape=(batch_size,2)\n Output shape = (batch_size,)\n \"\"\"\n N_IP = np.sum(v1*v2, axis=1)\n normalize_factor = LA.norm(v1, axis=1) * LA.norm(v2, axis=1)\n return N_IP/(normalize_factor + 1e-6)\n\ndef Check_IN(actions, relative_LM_pos):\n \"\"\"\n Compute the normalized inner product between actions and relative postions to landmarks\n Input: one agent's actions (shape=batch_size,5) and its corresponding \n relative landmark positions, shape=(batch_size, n_anchor_landmarks * 2)\n Output: inner product values, shape=(batch_size, n_anchor_landmarks)\n \"\"\"\n n_LM = int(relative_LM_pos.shape[1]/2)\n check_values = np.zeros( (relative_LM_pos.shape[0], n_LM) )\n for ii in range(n_LM):\n check_values[:, ii] = In_P( a_2_pa(actions), relative_LM_pos[:, ii*2:(ii*2+2)] )\n \n return check_values\n\n\ndef SAM_NonUniform_potential(actions, relative_landmarks_pos, agent_index):\n \"\"\"\n Inputs:\n array actions, shape=(n_agents, batch_size, dimension_action) inputs all actions\n array relative_landmarks_pos, shape=(n_agents, batch_size, n_landmarks*2) inputs all relative_landmarks_pos\n agent_index for the current agent\n Return: SAM_NonUniform potentials, shape = (batch_size,)\n \"\"\"\n angle_value = 0\n ii = agent_index\n cos_ = Check_IN(actions[ii], relative_landmarks_pos[ii, :, ii*2:(ii*2+2)])\n angle_value += np.arccos(cos_)\n \n SAM_NonUniform_potentials = np.squeeze(-angle_value)\n \n return SAM_NonUniform_potentials \n\ndef SAM_NonUniform_b(actions_n, pre_actions_n, all_observations, all_pre_observations, pre_terminals, agent_index, gamma):\n \"\"\"\n We adopt look back method (equation 2 in the paper)\n Inputs:\n array actions_n, shape=(n_agents, batch_size, dimension_action), inputs all actions\n array pre_actions_n inputs all the previous actions\n array all_observations, shape=(n_agents, batch_size, dimension_observation), inputs all observations\n array all_pre_observations all the previous observations\n current agent index\n discount factor gamma\n Returns: \n SAM_NonUniform values\n \"\"\"\n \n if len(all_observations)==3:\n pre_values = SAM_NonUniform_potential(pre_actions_n, all_pre_observations[:,:,4:10], agent_index)\n pre_values[pre_terminals] = 0\n current_values = SAM_NonUniform_potential(actions_n, all_observations[:,:,4:10], agent_index)\n elif len(all_observations)==6:\n pre_values = SAM_NonUniform_potential(pre_actions_n, all_pre_observations[:,:,4:16], agent_index)\n pre_values[pre_terminals] = 0\n current_values = SAM_NonUniform_potential(actions_n, all_observations[:,:,4:16], agent_index)\n\n SAM_NonUniform_values = current_values - (1/gamma)*pre_values\n\n return SAM_NonUniform_values\n\n########################################################\n\nclass MADDPGAgentTrainer(AgentTrainer):\n def __init__(self, name, model, obs_shape_n, act_space_n, agent_index, args, local_q_func=False):\n self.name = name\n self.n = len(obs_shape_n)\n self.agent_index = agent_index\n self.args = args\n obs_ph_n = []\n for i in range(self.n):\n obs_ph_n.append(U.BatchInput(obs_shape_n[i], name=\"observation\"+str(i)).get())\n \n\n # Create all the functions necessary to train the model\n self.q_train, self.q_update, self.q_debug = q_train(\n scope=self.name,\n make_obs_ph_n=obs_ph_n,\n act_space_n=act_space_n,\n q_index=agent_index,\n q_func=model,\n optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),\n grad_norm_clipping=0.5,\n local_q_func=local_q_func,\n num_units=args.num_units\n )\n self.act, self.p_train, self.p_update, self.p_debug = p_train(\n scope=self.name,\n make_obs_ph_n=obs_ph_n,\n act_space_n=act_space_n,\n p_index=agent_index,\n p_func=model,\n q_func=model,\n optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),\n grad_norm_clipping=0.5,\n local_q_func=local_q_func,\n num_units=args.num_units\n )\n # Create experience buffer\n self.replay_buffer = ReplayBuffer(1e6)\n self.max_replay_buffer_len = args.batch_size * args.max_episode_len\n self.replay_sample_index = None\n\n def action(self, obs):\n return self.act(obs[None])[0]\n\n def experience(self, obs, act, rew, new_obs, done, terminal):\n # Store transition in the replay buffer.\n # self.replay_buffer.add(obs, act, rew, new_obs, float(done))\n self.replay_buffer.add(obs, act, rew, new_obs, float(done), terminal)\n\n def preupdate(self):\n self.replay_sample_index = None\n\n def update(self, agents, t):\n if len(self.replay_buffer) < self.max_replay_buffer_len: # replay buffer is not large enough\n return\n if not t % 100 == 0: # only update every 100 steps\n return\n\n self.replay_sample_index = self.replay_buffer.make_index(self.args.batch_size)\n # collect replay sample from all agents\n pre_obs_n = []\n pre_act_n = []\n pre_terminal_n = []\n\n obs_n = []\n obs_next_n = []\n act_n = []\n index = self.replay_sample_index\n\n look_back = True\n for i in range(self.n):\n obs, act, rew, obs_next, done = agents[i].replay_buffer.sample_index(index)\n obs_n.append(obs)\n obs_next_n.append(obs_next)\n act_n.append(act)\n if look_back:\n for i in range(self.n):\n pre_obs, pre_act, _, _, _, pre_terminal = agents[i].replay_buffer.sample_index_pre(index)\n pre_obs_n.append(pre_obs)\n pre_act_n.append(pre_act)\n pre_terminal_n.append(pre_terminal)\n obs, act, rew, obs_next, done = self.replay_buffer.sample_index(index)\n\n # train q network\n num_sample = 1\n target_q = 0.0\n for i in range(num_sample):\n target_act_next_n = [agents[i].p_debug['target_act'](obs_next_n[i]) for i in range(self.n)]\n ############################################################# Apply SAM-NonUniform\n if look_back:\n SAM_NonUniform_rew = SAM_NonUniform_b(np.array(act_n), np.array(pre_act_n), \n np.array(obs_n), np.array(pre_obs_n), pre_terminal_n[0], \n self.agent_index, self.args.gamma)\n if len(target_act_next_n)==6:\n SAM_NonUniform_rew = 10*( SAM_NonUniform_rew)\n\n target_q_next = self.q_debug['target_q_values'](*(obs_next_n + target_act_next_n))\n if look_back:\n target_q += rew + SAM_NonUniform_rew + self.args.gamma * (1.0 - done) * target_q_next\n else:\n target_q += rew + self.args.gamma * (1.0 - done) * target_q_next\n #############################################################\n\n target_q /= num_sample\n q_loss = self.q_train(*(obs_n + act_n + [target_q]))\n\n # train p network\n p_loss = self.p_train(*(obs_n + act_n))\n\n self.p_update()\n self.q_update()\n\n return [q_loss, p_loss, np.mean(target_q), np.mean(rew), np.mean(target_q_next), np.std(target_q)]\n"
] | [
[
"tensorflow.square",
"numpy.array",
"numpy.arccos",
"numpy.linalg.norm",
"tensorflow.concat",
"numpy.zeros",
"tensorflow.train.AdamOptimizer",
"tensorflow.group",
"numpy.sum",
"numpy.mean",
"numpy.std",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.reduce_mean",
"numpy.squeeze"
]
] |
will-yx/CellSeg-CRISP | [
"cf2270ae766fa378f2c83fe26f3c115e40670180"
] | [
"src/cvstitch_plane.py"
] | [
"# cvstitch.py\n# ---------------------------\n# Contains the logic for stitching masks. See class doc for details.\n\nimport numpy as np\nimport cv2\n\nimport itertools\nfrom collections import Counter\nfrom operator import itemgetter\n\nfrom scipy.ndimage.morphology import binary_fill_holes\n\nfrom ctypes import *\nfrom _ctypes import FreeLibrary\n\nimport matplotlib.pyplot as plt\n\ndef show(img):\n fig = plt.figure()\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(img, aspect='equal')\n plt.show()\n\n\ndef showpair(a, b):\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5), sharex=True, sharey=True)\n ax[0].imshow(a)\n ax[0].axis('off')\n \n ax[1].imshow(b)\n ax[1].axis('off')\n \n fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.95, bottom=0.05, left=0, right=1)\n plt.show()\n\ndef showfour(a, b, c, d):\n fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(8, 5), sharex=True, sharey=True)\n ax[0,0].imshow(a)\n ax[0,0].axis('off')\n \n ax[0,1].imshow(b)\n ax[0,1].axis('off')\n \n ax[1,0].imshow(c)\n ax[1,0].axis('off')\n \n ax[1,1].imshow(d)\n ax[1,1].axis('off')\n \n fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.95, bottom=0.05, left=0, right=1)\n plt.show()\n\n\nclass CVMaskStitcher():\n \"\"\"\n Implements basic stitching between mask subtiles of semi-uniform size (see constraints below). \n Initialized with the pixel overlap between masks and the threshold over which an overlap is considered \n to be one cell, and returns the full set of masks for the passed in rows and cols.\n \"\"\"\n def __init__(self, overlap=80, min_area=20):\n self.overlap = overlap\n self.min_area = min_area\n \n def deconflict(self, tiles, areas, scores, firstid, idx0, idx1, slice0, slice1, direction):\n edge = np.empty((2,) + tiles[idx0][slice0].shape, dtype=np.uint32)\n edge[0] = tiles[idx0][slice0]\n edge[1] = tiles[idx1][slice1]\n \n collisions = np.logical_and(edge[0], edge[1])\n collision_pixels = np.count_nonzero(collisions)\n if collision_pixels > 0: # deconflict where the masks overlap\n collisions0 = edge[0][collisions]\n collisions1 = edge[1][collisions]\n \n conflicts0 = Counter(collisions0) # key: cell id\n conflicts1 = Counter(collisions1) # value: conflicted pixel count\n conflicts2 = [conflicts0, conflicts1]\n conflict_pairs = Counter([(a,b) for a,b in zip(collisions0, collisions1)])\n \n edgedists0 = np.ones_like(areas[idx0], dtype=np.float32)\n edgedists1 = np.ones_like(areas[idx1], dtype=np.float32)\n edgedists2 = [edgedists0, edgedists1]\n \n if 'H' in direction:\n for s in [0,1]:\n weightsH = np.arange(1,1+edge[s].shape[1])[::1 if s else -1]\n if direction == 'VH': weightsH = weightsH[::-1]\n for id, count in conflicts2[s].items():\n # when s=0, we are on the left, and cells furthest right, with highest index need the lowest score\n # when s=1, we are on the right, and cells on the left with the lowest index need the lowest score\n x = np.count_nonzero(edge[s] == id, axis=0) * weightsH\n edgedists2[s][id-firstid[idx1 if s else idx0]] = (np.sum(x) / count)\n \n if 'V' in direction:\n for s in [0,1]:\n weightsV = np.arange(1,1+edge[s].shape[0])[::1 if s else -1]\n for id, count in conflicts2[s].items():\n # when s=0, we are on the top, and cells furthest down, with highest index need the lowest score\n # when s=1, we are on the bottom, and cells at the top with the lowest index need the lowest score\n x = np.count_nonzero(edge[s] == id, axis=1) * weightsV\n edgedists2[s][id-firstid[idx1 if s else idx0]] *= (np.sum(x) / count)\n \n bias0 = edgedists0 * areas[idx0] # bias = k*area, large == good\n bias1 = edgedists1 * areas[idx1] # small k == close to edge == bad\n bias2 = [bias0, bias1]\n \n conflicts = {(0, id): (c/bias0[id-firstid[idx0]], -scores[idx0][id-firstid[idx0]], c) for id, c in conflicts0.items()} \n conflicts.update({(1, id): (c/bias1[id-firstid[idx1]], -scores[idx1][id-firstid[idx1]], c) for id, c in conflicts1.items()})\n \n while len(conflicts) > 0:\n conflicts_sorted = sorted(conflicts.items(), key=itemgetter(1), reverse=True)\n \n (s, id), (_, _, n_conflicted) = conflicts_sorted[0]\n del_side, keep_side = (idx1, idx0) if s else (idx0, idx1)\n \n n_total = areas[del_side][id-firstid[del_side]]\n \n if n_total - n_conflicted > self.min_area and n_conflicted/n_total < 0.5 and False:\n # remove conflicted pixels for this cell, but keep non-overlapping pixels\n areas[del_side][id-firstid[del_side]] -= n_conflicted\n edge[s][(edge[s] == id) & (edge[1-s] > 0)] = 0\n else: # remove this cell entirely\n areas[del_side][id-firstid[del_side]] = 0\n edge[s][(edge[s] == id)] = 0\n tiles[del_side][tiles[del_side] == id] = 0\n \n # cell (s, id) has been handled, so we remove it from conflicts\n del conflicts[(s, id)]\n \n # removing (s, id) may resolve other conflicts\n resolved_conflicts = [k for k in conflict_pairs.keys() if k[s]==id]\n for k in resolved_conflicts:\n ks = 1-s # side index to keep [0 or 1]\n kid = k[ks] # id to keep\n keep_key = (ks, kid)\n v = conflicts[keep_key]\n c = v[2] - conflict_pairs[k]\n if c > 0:\n conflicts[keep_key] = (c / bias2[ks][kid-firstid[keep_side]], v[1], c)\n else:\n del conflicts[keep_key]\n del conflict_pairs[k]\n \n # copy overlapping area back to tiles\n tiles[idx0][slice0] = edge[0]\n tiles[idx1][slice1] = edge[1]\n \n \n def rois_to_plane_mask_nn(self, rois, masks, scores, id0, height, width):\n # This version of rois_to_plane_mask is slower and much worse. Perhaps it has a bug?\n # The score variant works so well there is little point in improving this version\n \n areas = np.array([np.count_nonzero(mask) for mask in masks], dtype=np.uint32)\n min_area = self.min_area\n \n id1 = id0+1 # first id in this tile\n plane_count = np.zeros([height, width], dtype=np.uint8) # if more than 255 cells overlap then god help us\n \n for idx in range(len(areas)):\n if areas[idx] < self.min_area: continue\n y1, x1, y2, x2 = rois[idx]\n plane_count[y1:y2,x1:x2] += (masks[idx] > 0)\n \n conflicted = [i for i,(m,r,a) in enumerate(zip(masks,rois,areas)) if a>=min_area and np.max(plane_count[r[0]:r[2],r[1]:r[3]] * (m>0))>1]\n \n nc = len(conflicted)\n \n A = np.zeros([nc,nc], dtype=np.bool)\n c_rows = np.zeros([height,nc], dtype=np.bool)\n c_cols = np.zeros([width ,nc], dtype=np.bool)\n \n for c, idx in enumerate(conflicted):\n y1, x1, y2, x2 = rois[idx]\n c_rows[y1:y2, c] = True\n c_cols[x1:x2, c] = True\n \n for c, idx in enumerate(conflicted):\n y1, x1, y2, x2 = rois[idx]\n #cr = np.logical_or.reduce(c_rows[y1:y2], axis=0)\n #cc = np.logical_or.reduce(c_cols[x1:x2], axis=0)\n cr = np.any(c_rows[y1:y2], axis=0)\n cc = np.any(c_cols[x1:x2], axis=0)\n A[c] = cr * cc\n \n '''\n We now have an adjacency matrix like this:\n \n * A B C D E |\n A # |\n B # # |\n C # |\n D # |\n E # |\n ------------+\n \n This represents AB, BC, and DE overlaps\n AB and BC are connected through B\n The final groupings should be ABC and DE \n '''\n \n groups = []\n free = np.ones(nc, dtype=np.bool)\n for j in range(1,nc):\n if not free[j]: continue\n group = set()\n nodes = set([j])\n while len(nodes) > 0:\n i = nodes.pop()\n group.add(i)\n free[i] = False\n nodes.update(np.flatnonzero(A[i] * free))\n groups.append(group)\n \n from sklearn.neighbors import NearestNeighbors\n for group in groups:\n plane_count = np.zeros([height, width], dtype=np.uint8)\n \n #Y1, X1 = height, width\n #Y2, X2 = 0, 0\n \n centroids = np.empty([len(group), 2], dtype=np.float32)\n \n for i, c in enumerate(group):\n idx = conflicted[c]\n y1, x1, y2, x2 = rois[idx]\n #Y1, X1 = min(Y1, y1), min(X1, x1)\n #Y2, X2 = max(Y2, y2), max(X2, x2)\n \n mask = masks[idx]\n masksum = np.sum(mask)\n \n centroids[i,0] = np.dot(np.add.reduce(mask, axis=1), np.arange(y1,y2)) / masksum\n centroids[i,1] = np.dot(np.add.reduce(mask, axis=0), np.arange(x1,x2)) / masksum\n \n plane_count[y1:y2,x1:x2] += (mask > 0)\n\n \n #conf_r,conf_c = np.where(plane_count[Y1:Y2,X1:X2]>1)\n conf_r,conf_c = np.where(plane_count>1)\n \n if len(conf_r) < 1: continue\n \n X = np.column_stack([conf_r, conf_c])\n \n nn = NearestNeighbors(n_neighbors=1).fit(centroids)\n pixel_assignments = nn.kneighbors(n_neighbors=1, X=X, return_distance=False)[:,0]\n \n #assigned = np.zeros([Y2-Y1,X2-X1], dtype=np.uint16)\n assigned = np.zeros([height, width], dtype=np.uint16)\n assigned[conf_r, conf_c] = (pixel_assignments+1)\n \n for c in group:\n idx = conflicted[c]\n y1, x1, y2, x2 = rois[idx]\n masks[idx][assigned[y1:y2,x1:x2] != (c+1)] = 0\n areas[idx] = np.count_nonzero(masks[idx])\n \n plane_mask = np.zeros([height, width], dtype=np.uint32)\n for idx in range(len(areas)):\n if areas[idx] < self.min_area:\n areas[idx] = 0\n continue\n \n y1, x1, y2, x2 = rois[idx]\n plane_mask[y1:y2, x1:x2][masks[idx] > 0] = id1+idx\n \n return plane_mask, areas, scores\n\n @staticmethod\n def largest_island(mask):\n h,w = mask.shape[0]+2, mask.shape[1]+2\n groups = []\n \n free = np.pad(binary_fill_holes(mask), 1, mode='constant').flat\n for j in range(w+1, h*w-w-1):\n if free[j]:\n group = set()\n nodes = set([j])\n while len(nodes):\n i = nodes.pop()\n group.add(i)\n free[i] = False\n for di in [-w-1,-w,-w+1,-1,1,w-1,w,w+1]:\n if free[i+di]: nodes.add(i+di)\n groups.append(group)\n \n best = np.zeros_like(mask)\n if len(groups) < 1: return best\n \n best_group = groups[np.argmax([len(group) for group in groups])]\n for idx in best_group:\n j = idx // w\n i = idx % w\n best[j-1,i-1] = True\n \n return best\n\n def rois_to_plane_mask(self, rois, masks, scores, id0, height, width):\n libSpaCE = CDLL('SpaCE.dll')\n \n c_binary_fill_holes = libSpaCE.binary_fill_holes\n c_binary_fill_holes.restype = c_uint\n c_binary_fill_holes.argtypes = [POINTER(c_bool), c_int, c_int]\n \n c_largest_island = libSpaCE.largest_island\n c_largest_island.restype = c_uint\n c_largest_island.argtypes = [POINTER(c_bool), c_int, c_int]\n \n areas = np.array([np.count_nonzero(mask) for mask in masks], dtype=np.uint32)\n original_areas = areas.copy()\n min_area = self.min_area\n \n sorted_cell_indices = np.argsort(scores)[::-1]\n \n id1 = id0+1 # first id in this tile\n for iterations in range(3):\n score_mask = np.zeros([height, width], dtype=np.float32)\n plane_mask = np.zeros([height, width], dtype=np.uint32)\n update_mask = np.zeros(len(areas), dtype=np.bool)\n \n for i in sorted_cell_indices:\n if areas[i] < min_area: continue\n \n y1, x1, y2, x2 = rois[i]\n \n cont = True\n while cont:\n cont = False\n \n better = np.ascontiguousarray(masks[i] > score_mask[y1:y2, x1:x2])\n \n area = np.count_nonzero(better)\n if area < min_area * 0.5: break\n\n h, w = better.shape\n \n filled_area = c_binary_fill_holes(better.ctypes.data_as(POINTER(c_bool)), h, w)\n areas[i] = c_largest_island(better.ctypes.data_as(POINTER(c_bool)), h, w)\n update_mask[i] = (filled_area > area) # we filled some holes, so the mask needs updating\n \n if areas[i] < min_area: break\n \n replaced = Counter(plane_mask[y1:y2, x1:x2][better])\n for id, count in replaced.items():\n if not id: continue\n j = id-id1\n areas[j] -= count\n if areas[j] < min_area:\n cont = True\n areas[j] = 0\n y1r, x1r, y2r, x2r = rois[j]\n score_mask[y1r:y2r, x1r:x2r][plane_mask[y1r:y2r, x1r:x2r] == id] = 0\n plane_mask[y1r:y2r, x1r:x2r][plane_mask[y1r:y2r, x1r:x2r] == id] = 0\n else:\n plane_mask[y1:y2, x1:x2][better & (plane_mask[y1:y2, x1:x2] == id)] = 0\n \n if areas[i] >= min_area:\n score_mask[y1:y2, x1:x2][better] = masks[i][better]\n plane_mask[y1:y2, x1:x2][better] = id1+i\n \n for i in range(len(areas)):\n if areas[i] < 1: continue \n \n y1, x1, y2, x2 = rois[i]\n loc = plane_mask[y1:y2, x1:x2] == (id1+i)\n areas[i] = np.count_nonzero(loc)\n \n if areas[i] < min_area:\n if areas[i] > 0: plane_mask[y1:y2, x1:x2][loc] = 0\n areas[i] = 0\n elif areas[i] != original_areas[i] or update_mask[i]:\n masks[i] = np.maximum(masks[i], 0.1) * loc\n \n FreeLibrary(libSpaCE._handle)\n del libSpaCE\n \n return plane_mask, areas, scores\n\n def stitch_masks_plane(self, roilist, masklist, scorelist, nrows, ncols, height, width):\n ntiles = nrows * ncols\n assert(len(roilist) == len(masklist) == len(scorelist) == ntiles)\n \n crop_height, crop_width = height//nrows, width//ncols\n tile_top = lambda j: j*crop_height - (j>0) * self.overlap//2\n tile_left = lambda i: i*crop_width - (i>0) * self.overlap//2\n \n '''\n count_mask = np.zeros([height, width], dtype=np.uint32)\n for ti in range(ntiles):\n j = ti // ncols\n i = ti % ncols\n \n rois = roilist[ti]\n masks = masklist[ti]\n \n top = tile_top(j)\n left = tile_left(i)\n \n for c in range(len(masks)):\n y1, x1, y2, x2 = rois[c]\n count_mask[top+y1:top+y2, left+x1:left+x2] += (masks[c] > 0)\n \n #show(count_mask)\n '''\n \n print('Removing cells with area less than {} px'.format(self.min_area))\n \n from timeit import default_timer as timer\n \n t0 = timer()\n \n tiles, areas, scores, counts = [], [], [], []\n cumulative = 0\n for ti in range(ntiles):\n j = ti // ncols\n i = ti % ncols\n \n th = (crop_height + (self.overlap//2 if j in [0,nrows-1] else self.overlap)) if nrows > 1 else height\n tw = (crop_width + (self.overlap//2 if i in [0,ncols-1] else self.overlap)) if ncols > 1 else width\n \n tile, t_areas, t_scores = self.rois_to_plane_mask(roilist[ti], masklist[ti], scorelist[ti], cumulative, th, tw)\n \n t_cells = len(t_areas)\n \n tiles.append(tile)\n areas.append(t_areas)\n scores.append(t_scores)\n counts.append(t_cells)\n cumulative += t_cells\n \n \n firstid = np.cumsum(np.concatenate([[1], counts]))\n \n print('Convert rois to plane masks: {:.1f}s'.format(timer()-t0)); t0=timer()\n \n for ti in range(ntiles):\n j = ti // ncols\n i = ti % ncols\n \n if i > 0:\n slice0 = (slice(0, None), slice(-self.overlap, None))\n slice1 = (slice(0, None), slice(0, self.overlap))\n self.deconflict(tiles, areas, scores, firstid, ti-1, ti, slice0, slice1, 'H')\n \n if j > 0:\n slice0 = (slice(-self.overlap, None), slice(0, None))\n slice1 = (slice(0, self.overlap), slice(0, None))\n self.deconflict(tiles, areas, scores, firstid, ti-ncols, ti, slice0, slice1, 'V')\n \n if j > 0 and i > 0:\n slice0 = (slice(-self.overlap, None), slice(-self.overlap, None))\n slice1 = (slice(0, self.overlap) , slice(0, self.overlap))\n self.deconflict(tiles, areas, scores, firstid, ti-ncols-1, ti, slice0, slice1, 'HV')\n \n slice0 = (slice(-self.overlap, None), slice(0, self.overlap))\n slice1 = (slice(0, self.overlap) , slice(-self.overlap, None))\n self.deconflict(tiles, areas, scores, firstid, ti-ncols, ti-1, slice0, slice1, 'VH')\n \n print('Merge tiles: {:.1f}s'.format(timer()-t0)); t0=timer()\n \n all_indices = [(ti, idx) for ti in range(ntiles) for idx in range(len(areas[ti])) if areas[ti][idx] >= self.min_area]\n all_scores = [s[idx] for s,a in zip(scores, areas) for idx in range(len(a)) if a[idx] >= self.min_area]\n \n score_order = np.argsort(np.argsort(all_scores))\n \n print('Found {} cell instances'.format(len(score_order)))\n \n counts = np.zeros(ntiles, dtype=np.uint32)\n for ti, area in enumerate(areas):\n for a in area:\n if a >= self.min_area: counts[ti] += 1\n \n firstidx = np.cumsum(np.concatenate([[0], counts]))\n \n sorted_mask = np.zeros([height, width], dtype=np.uint32)\n for ti, area in enumerate(areas):\n j = ti // ncols\n i = ti % ncols\n \n idx0 = firstidx[ti]\n rois = roilist[ti]\n masks = masklist[ti]\n \n top = tile_top(j)\n left = tile_left(i)\n \n c = 0\n for idx, a in enumerate(area):\n if a >= self.min_area:\n id = score_order[idx0+c]+1\n y1, x1, y2, x2 = rois[idx]\n \n sorted_mask[top+y1:top+y2, left+x1:left+x2][masks[idx] > 0] = id\n \n c += 1\n print('Insert sorted cells into full mask: {:.1f}s'.format(timer()-t0)); t0=timer()\n \n #show(sorted_mask)\n \n assert(np.allclose(np.unique(sorted_mask), np.arange(len(score_order)+1)))\n \n return sorted_mask\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.ones_like",
"numpy.where",
"numpy.concatenate",
"numpy.max",
"numpy.count_nonzero",
"numpy.zeros_like",
"numpy.empty",
"numpy.add.reduce",
"matplotlib.pyplot.subplots",
"numpy.logical_and",
"matplotlib.pyplot.Axes",
"numpy.arange",
"numpy.column_stack",
"numpy.flatnonzero",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.argsort",
"scipy.ndimage.morphology.binary_fill_holes",
"matplotlib.pyplot.show",
"numpy.ascontiguousarray",
"numpy.sum",
"numpy.ones",
"numpy.any",
"sklearn.neighbors.NearestNeighbors",
"numpy.unique",
"numpy.maximum"
]
] |
Gjjring/starr | [
"2f2116b56c5fb05c91e9fe0aff230553279c7f60"
] | [
"src/starr/physics_component.py"
] | [
"import numpy as np\nimport itertools\nclass PhysicsComponent():\n\n def __init__(self, mass, static=False):\n #self.parent = parent\n self.static = static\n self.mass = mass\n self.velocity = np.zeros((2))\n self.acceleration = np.zeros((2))\n self.previous_collisions = [None]\n self.memory = 1\n self.kinetic_energy = 0.0\n self.momentum = np.zeros(2)\n\n def update(self, simulation_object, world, time_step):\n self.update_kinematics(simulation_object, time_step)\n self.update_energy()\n\n def update_energy(self):\n v = np.linalg.norm(self.velocity)\n self.kinetic_energy = 0.5*self.mass*v**2\n\n def update_momentum(self):\n self.momentum = self.mass*self.velocity\n\n def apply_resistance(self):\n constant = 0.1\n v_norm = np.linalg.norm(self.velocity)\n if v_norm == 0.0:\n return\n v_dir = self.velocity/v_norm\n self.apply_force( -v_dir*constant*v_norm**2)\n\n def update_kinematics(self, simulation_object, time_step):\n #self.apply_resistance()\n self.velocity += self.acceleration\n simulation_object.translate(time_step * self.velocity)\n self.acceleration *= 0\n\n def apply_force(self, force):\n self.acceleration += force\n\n def collision(self, other, normal):\n if self.static:\n return\n m1 = self.mass\n m2 = other.mass\n\n v1_init = self.velocity\n v2_init = other.velocity\n\n v1c_init = v1_init\n v2c_init = v2_init\n\n v12_init = v1c_init - v2c_init\n j = self.get_impulse(0.90, v12_init, normal, m1, m2)\n self.apply_force((j*normal/m1))\n self.previous_collisions[0] = other\n\n\n def get_impulse(self, e, v12_init, normal, m1, m2):\n numerator = -(1+e)* v12_init.dot(normal)\n denom = 1/m1 + 1/m2\n return numerator/denom\n\n \"\"\"\n def update_physics(self, time_step):\n self.velocity += time_step * self.acceleration\n self.parent.position += time_step * self.velocity\n self.acceleration *= 0\n\n\n def collision_allowed(self, other):\n truth_val= not other.number in self.previous_collisions\n partners = [3, 4]\n if self.number in partners and other.number in partners and False:\n print(\"collison {}-{}: {}\".format(self.number,\n other.number,\n truth_val))\n\n return truth_val\n\n \"\"\"\n\"\"\"\nclass RigidBody(PhysicsComponent):\n\n def __init__(self, parent, parameters):\n super(RigidBody, self).__init__(parent, parameters)\n\"\"\"\n"
] | [
[
"numpy.linalg.norm",
"numpy.zeros"
]
] |
LBNL-ETA/fmi-for-power-system | [
"7f1818278226a4069a6b90a9b3c4045ebad5f5d5"
] | [
"tests/005_multiplier_with_cyme/simulation.py"
] | [
"# coding: utf-8\nfrom pyfmi import load_fmu\nfrom pyfmi.fmi_coupled import CoupledFMUModelME2\n\n# Load CSV reader FMU\nprint('Loading the csv reader (server ME FMU) ...')\ncyme = load_fmu('cyme/simulator.fmu', log_level=7)\ncyme.setup_experiment(start_time=0, stop_time=20)\n\nprint('Loading the multiplier (function ME FMU) ...')\nmultiplier = load_fmu('multiplier/multiplier.fmu', log_level=7)\nmultiplier.setup_experiment(start_time=0, stop_time=20)\nprint('Done loading FMUs')\n\n# Create the Master algorithm and launch the simulation\nprint('Create master')\nmodels = [(\"cyme\", cyme), (\"multiplier\", multiplier)]\nconnections = [(cyme, \"voltage_836_Vpu\", multiplier, \"x\")]\nmaster = CoupledFMUModelME2(models, connections)\noptions = master.simulate_options()\noptions['ncp'] = 23\nprint('Run simulation')\nresults = master.simulate(options=options, final_time=23.0)\n\n# Terminate FMUs\nprint('Done terminate FMUs')\ncyme.terminate()\nmultiplier.terminate()\n\n# Plot the results\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(10, 3))\nplt.plot(results[\"time\"], results[\"cyme.voltage_836_Vpu\"],\n label='CYME result')\nplt.plot(results[\"time\"], results[\"multiplier.y\"],\n label='Multiply by 2')\nplt.ylabel(\"Results\")\nplt.xlabel(\"Time\")\nplt.legend(loc=0)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
chrisxthe/financial-data-structures | [
"add660f968f2cd72a733ab597b16ecc2d9cdec0b"
] | [
"main.py"
] | [
"# Author: Jacques Joubert\n# Email: [email protected]\n\n\"\"\"\nAdvances in Financial Machine Learning\nMarcos Lopez De Prado\n\nChapter 2: Financial Data Structures\nIn order to build any of the projects mentioned in the book, we must first\ncreate the various types of structured data from the unstructured data provided.\n\nMany of the projects going forward will require Dollar and Volume bars.\n\nI implemented some speed improvements in the code but feel that there is still\nroom for improvement, pull requests are welcomed! :)\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nfrom math import ceil\nimport cython_loops\n\n\n# --------------------------\n# Functions\ndef __pre_process(data):\n # Create an date time\n data['Date_Time'] = data['Date'] + ' ' + data['Time']\n data = data.drop(['Date', 'Time'], axis=1)\n\n # Calculate the transaction value\n data['Transaction'] = data['Price'] * data['Volume']\n\n return data\n\n\ndef __extract_data(data):\n # Extract data\n date_time = data[['Date_Time', 'Group']].groupby('Group')['Date_Time'].last()\n ohlc = data[['Price', 'Group']].astype(float).groupby('Group')['Price'].ohlc()\n volume = data[['Volume', 'Group']].astype(float).groupby('Group').sum()\n vwap = pd.DataFrame(data[['Transaction', 'Group']].astype(float).groupby('Group').sum().values / volume.values)\n\n # Create DataFrame\n bars = pd.concat([date_time, ohlc, volume, vwap], axis=1)\n bars.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'vwap']\n\n return bars\n\n\ndef __time_bars(data, units):\n # Set the time frame\n duration = str(units) + 'T'\n\n # Extract data\n data.index = pd.to_datetime(data['Date_Time'])\n ohlc = data.resample(duration, label='right')['Price'].ohlc()\n date_time = pd.DataFrame(ohlc.index, index=ohlc.index)\n volume = data.resample(duration, label='right')['Volume'].sum()\n vwap = data.resample(duration, label='right')['Transaction'].sum().values / volume\n\n # Create DataFrame\n data = pd.concat([date_time, ohlc, volume, vwap], axis=1)\n data.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'vwap']\n\n return data\n\n\ndef __dollar_bars(data, units):\n # Dollar metric\n data['CumDollars'] = data['Transaction'].cumsum()\n col_names = data.columns\n\n # Set the relevant group for each row\n data = cython_loops.set_row_groups(units, np.array(data))\n data = pd.DataFrame(data, columns=col_names)\n data = __extract_data(data)\n\n return data\n\n\ndef __volume_bars(data, units):\n # Volume metric\n data['CumVol'] = data['Volume'].cumsum()\n col_names = data.columns\n\n # Set the relevant group for each row\n data = cython_loops.set_row_groups(units, np.array(data))\n data = pd.DataFrame(data, columns=col_names)\n data = __extract_data(data)\n\n # Todo: Add 1/50 of the daily traded volume\n return data\n\n\ndef __tick_bars(data, units):\n # Create groups based on number of tick bars\n group_index = data.index % units == 0\n group_size = ceil(data.shape[0] / float(units))\n groups = np.array(range(0, int(group_size)))\n\n # Fill in group values\n data.loc[group_index, 'Group'] = groups\n data['Group'] = data['Group'].ffill()\n data = __extract_data(data)\n\n return data\n\n\ndef create_bars(data, units=1000, type='tick'):\n \"\"\"\n Creates the desired bars. 4 different types:\n 1. Time Bars\n 2. Tick Bars\n 3. Volume Bars\n 4. Dollar Bars\n\n See book for more info:\n Marcos Prado (2018), Advances in Financial Machine Learning, pg 25\n\n :param data: Pandas DataFrame of Tick Data from TickData.com\n :param units: Number of units in a bar.\n Time Bars: Number of minutes per bar\n Tick Bars: Number of ticks per bar\n Volume Bars: Number of shares traded per bar\n Dollar Bars: Transaction size traded per bar\n\n :param type: String of the bar type, ('tick', 'volume', 'dollar', 'time')\n :return: Pandas DataFrame of relevant bar data\n \"\"\"\n data = __pre_process(data)\n\n # Create an empty column\n data['Group'] = np.nan\n\n print('Creating {type} bars'.format(type=type))\n if type == 'tick':\n bars = __tick_bars(data, units)\n elif type == 'volume':\n bars = __volume_bars(data, units)\n elif type == 'dollar':\n bars = __dollar_bars(data, units)\n elif type == 'time':\n bars = __time_bars(data, units)\n else:\n raise ValueError('Type must be: tick, volume, dollar, or time')\n\n return bars\n\n\n# ------------------------\n# Body\nif __name__ == '__main__':\n # Read in tick data:\n # https://s3-us-west-2.amazonaws.com/tick-data-s3/downloads/ES_Sample.zip\n data = pd.read_csv('raw_tick_data/ES_Trades.csv')\n\n # Create bars\n print('Uncomment time_bars in main.py if you want them to be created.')\n # time_bars = create_bars(data, units=600, type='time') # Time bars take long to run since I have not optimised them.\n tick_bars = create_bars(data, units=5000, type='tick')\n volume_bars = create_bars(data, units=21731, type='volume')\n dollar_bars = create_bars(data, units=35638840, type='dollar')\n\n # Write to csv\n #time_bars.to_csv('saved_data/time_bars.csv', index=False)\n tick_bars.to_csv('saved_data/tick_bars.csv', index=False)\n volume_bars.to_csv('saved_data/volume_bars.csv', index=False)\n dollar_bars.to_csv('saved_data/dollar_bars.csv', index=False)\n"
] | [
[
"pandas.to_datetime",
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv"
]
] |
mtezzele/BladeX | [
"94cb3145d9174cb711de90b80928cb5799fba039"
] | [
"bladex/blade.py"
] | [
"\"\"\"\nModule for the blade bottom-up parametrized construction.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass Blade(object):\n \"\"\"\n Bottom-up parametrized blade construction.\n \n Given the following parameters of a propeller blade:\n\n - :math:`(X, Y)` coordinates of the blade cylindrical sections after\n being expanded in 2D to create airfoils.\n\n - Radial distance :math:`(r_i)` from the propeller axis of rotation\n to each cylindrical section.\n\n - Pitch angle :math:`(\\\\varphi)`, for each cylindrical section.\n\n - Rake :math:`(k)`, in distance units, for each cylindrical section.\n\n - Skew angle :math:`(\\\\theta_s)`, for each cylindrical section.\n\n then, a bottom-up construction procedure is performed by applying series of\n transformation operations on the airfoils according to the provided\n parameters, to end up with a 3D CAD model of the blade, which can be\n exported into IGES format. Also surface or volume meshes can be obtained.\n\n Useful definitions on the propeller geometry:\n\n - Blade cylindrical section: the cross section of a blade cut by a\n cylinder whose centerline is the propeller axis of rotation.\n We may also refer as \"radial section\".\n\n - Pitch :math:`(P)`: the linear distance that a propeller would move in\n one revolution with no slippage. The geometric pitch angle\n :math:`(\\\\varphi)` is the angle between the pitch reference line\n and a line perpendicular to the propeller axis of rotation.\n\n .. math::\n tan (\\\\varphi) = \\\\frac{\\\\text{pitch}}\n {\\\\text{propeller circumference}} = \\\\frac{P}{2 \\\\pi r}\n\n - Rake: the fore or aft slant of the blade with respect to a line\n perpendicular to the propeller axis of rotation.\n\n - Skew: the transverse sweeping of a blade such that viewing the blades\n from fore or aft would show an asymmetrical shape.\n\n References:\n\n - Carlton, J. Marine propellers and propulsion. Butterworth-Heinemann, 2012.\n http://navalex.com/downloads/Michigan_Wheel_Propeller_Geometry.pdf\n\n - J. Babicz. Wartsila Encyclopedia of Ship Technology. 2nd ed. Wartsila\n Corporation. 2015.\n\n .. _transformation_operations:\n\n Transformation operations according to the provided parameters:\n\n .. figure:: ../../readme/transformations.png\n :scale: 75 %\n :alt: transformations\n\n Airfoil 2D transformations corresponding to the pitch, rake, and skew of\n the blade expanded cylindrical section.\n\n --------------------------\n\n :param array_like sections: 1D array, each element is an object of the\n BaseProfile class at specific radial section.\n :param array_like radii: 1D array, contains the radii values of the\n sectional profiles.\n :param array_like chord_lengths: 1D array, contains the value of the\n airfoil's chord length for each radial section of the blade.\n :param array_like pitch: 1D array, contains the local pitch values\n (in unit length) for each radial section of the blade.\n :param array_like rake: 1D array, contains the local rake values for each\n radial section of the blade.\n :param array_like skew_angles: 1D array, contains the skew angles\n (in degrees) for each radial section of the blade.\n\n Note that, each of the previous array_like parameters must be consistent\n with the other parameters in terms of the radial ordering of the blade\n sections. In particular, an array_like elements must follow the radial\n distribution of the blade sections starting from the blade root and ends up\n with the blade tip since the blade surface generator depends on that order.\n\n Finally, beware that the profiles class objects in the array 'sections'\n undergo several transformations that affect their coordinates. Therefore\n the array must be specific to each blade class instance. For example, if\n we generate 12 sectional profiles using NACA airfoils and we need to use\n them in two different blade classes, then we should instantiate two class\n objects for the profiles, as well as the blade. The following example\n explains the fault and the correct implementations (assuming we already\n have the arrays radii, chord, pitch, rake, skew):\n\n INCORRECT IMPLEMENTATION:\n\n >>> sections = [bladex.profiles.NacaProfile(digits='0012', n_points=240,\n cosine_spacing=True) for i in range(12)]\n >>> blade_1 = Blade(\n sections=sections,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_1.apply_transformations()\n >>> blade_2 = Blade(\n sections=sections,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_2.apply_transformations()\n\n The previous implementation would lead into erroneous blade coordinates due\n to the transformed data in the array sections\n\n CORRECT IMPLEMENTATION:\n\n >>> sections_1 = [bladex.profiles.NacaProfile(digits='0012', n_points=240,\n cosine_spacing=True) for i in range(12)]\n >>> sections_2 = [bladex.profiles.NacaProfile(digits='0012', n_points=240,\n cosine_spacing=True) for i in range(12)]\n >>> blade_1 = Blade(\n sections=sections_1,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_1.apply_transformations()\n >>> blade_2 = Blade(\n sections=sections_2,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_2.apply_transformations()\n \"\"\"\n\n def __init__(self, sections, radii, chord_lengths, pitch, rake,\n skew_angles):\n # Data are given in absolute values\n self.sections = sections\n self.n_sections = len(sections)\n self.radii = radii\n self.chord_lengths = chord_lengths\n self.pitch = pitch\n self.rake = rake\n self.skew_angles = skew_angles\n self._check_params()\n\n self.pitch_angles = self._compute_pitch_angle()\n self.induced_rake = self._induced_rake_from_skew()\n\n self.blade_coordinates_up = []\n self.blade_coordinates_down = []\n\n self.generated_upper_face = None\n self.generated_lower_face = None\n self.generated_tip = None\n self.generated_root = None\n\n def _check_params(self):\n \"\"\"\n Private method to check if all the blade arguments are numpy.ndarrays\n with the same shape.\n \"\"\"\n if not isinstance(self.sections, np.ndarray):\n self.sections = np.asarray(self.sections)\n if not isinstance(self.radii, np.ndarray):\n self.radii = np.asarray(self.radii)\n if not isinstance(self.chord_lengths, np.ndarray):\n self.chord_lengths = np.asarray(self.chord_lengths)\n if not isinstance(self.pitch, np.ndarray):\n self.pitch = np.asarray(self.pitch)\n if not isinstance(self.rake, np.ndarray):\n self.rake = np.asarray(self.rake)\n if not isinstance(self.skew_angles, np.ndarray):\n self.skew_angles = np.asarray(self.skew_angles)\n\n if not (self.sections.shape == self.radii.shape ==\n self.chord_lengths.shape == self.pitch.shape == self.rake.shape\n == self.skew_angles.shape):\n raise ValueError('Arrays {sections, radii, chord_lengths, pitch, '\\\n 'rake, skew_angles} do not have the same shape.')\n\n def _compute_pitch_angle(self):\n \"\"\"\n Private method that computes the pitch angle from the linear pitch for\n all blade sections.\n\n :return: pitch angle in radians\n :rtype: numpy.ndarray\n \"\"\"\n return np.arctan(self.pitch / (2.0 * np.pi * self.radii))\n\n def _induced_rake_from_skew(self):\n \"\"\"\n Private method that computes the induced rake from skew for all the\n blade sections, according to :ref:`mytransformation_operations`.\n\n :return: induced rake from skew\n :rtype: numpy.ndarray\n \"\"\"\n return self.radii * np.radians(self.skew_angles) * np.tan(\n self.pitch_angles)\n\n def _planar_to_cylindrical(self):\n \"\"\"\n Private method that transforms the 2D planar airfoils into 3D\n cylindrical sections.\n\n The cylindrical transformation is defined by the following formulas:\n\n - :math:`x = x_{i} \\\\qquad \\\\forall x_i \\\\in X`\n\n - :math:`y = r \\\\sin\\\\left( \\\\frac{y_i}{r} \\\\right) \\\\qquad\n \\\\forall y_i \\\\in Y`\n\n - :math:`z = -r \\\\cos\\\\left( \\\\frac{y_i}{r} \\\\right) \\\\qquad\n \\\\forall y_i \\\\in Y`\n\n After transformation, the method also fills the numpy.ndarray\n \"blade_coordinates_up\" and \"blade_coordinates_down\" with the new\n :math:`(X, Y, Z)` coordinates.\n \"\"\"\n for section, radius in zip(self.sections, self.radii):\n theta_up = section.yup_coordinates / radius\n theta_down = section.ydown_coordinates / radius\n\n y_section_up = radius * np.sin(theta_up)\n y_section_down = radius * np.sin(theta_down)\n\n z_section_up = -radius * np.cos(theta_up)\n z_section_down = -radius * np.cos(theta_down)\n\n self.blade_coordinates_up.append(\n np.array([section.xup_coordinates, y_section_up, z_section_up]))\n self.blade_coordinates_down.append(\n np.array(\n [section.xdown_coordinates, y_section_down,\n z_section_down]))\n\n def apply_transformations(self, reflect=True):\n \"\"\"\n Generate a bottom-up constructed propeller blade based on the airfoil\n transformations, see :ref:`mytransformation_operations`.\n\n The order of the transformation operations is as follows:\n\n 1. Translate airfoils by reference points into origin.\n\n 2. Scale X, Y coordinates by a factor of the chord length. Also\n reflect the airfoils if necessary.\n\n 3. Rotate the airfoils counter-clockwise according to the local\n pitch angles. Beware of the orientation system.\n\n 4. Translate airfoils along X-axis by a magnitude of the local\n rake. Perform another translation for the skew-induced rake.\n\n 5. Translate airfoils along Y-axis by a magnitude of the skewness.\n\n 6. Transform the 2D airfoils into cylindrical sections, by laying\n each foil on a cylinder of radius equals to the section radius,\n and the cylinder axis is the propeller axis of rotation.\n\n :param bool reflect: if true, then reflect the coordinates of all the\n airfoils about both X-axis and Y-axis. Default value is True.\n\n We note that the implemented transformation operations with the current\n Cartesian coordinate system shown in :ref:`mytransformation_operations`\n assumes a right-handed propeller. In case of a desired left-handed\n propeller the user can either change the code for the negative\n Z-coordinates in the cylindrical transformation (i.e.\n `_planar_to_cylindrical` private method), or manipulating the\n orientation of the generated CAD with respect to the hub.\n \"\"\"\n for i in range(self.n_sections):\n # Translate reference point into origin\n self.sections[i].translate(-self.sections[i].reference_point)\n\n if reflect:\n self.sections[i].reflect()\n\n # Scale the unit chord to actual length.\n self.sections[i].scale(self.chord_lengths[i])\n\n # Rotate according to the pitch angle.\n # Since the current orientation system is not standard (It is\n # left-handed Cartesian orientation system, where Y-axis points\n # downwards and X-axis points to the right), the standard rotation\n # matrix yields clockwise rotation.\n self.sections[i].rotate(\n rad_angle=np.pi / 2.0 - self.pitch_angles[i])\n\n # Translation due to skew.\n self.sections[i].translate(\n [0, -self.radii[i] * np.radians(self.skew_angles[i])])\n\n # Translate due to total rake.\n self.sections[i].translate(\n [-(self.rake[i] + self.induced_rake[i]), 0])\n\n self._planar_to_cylindrical()\n\n def rotate(self, deg_angle=None, rad_angle=None):\n \"\"\"\n 3D counter clockwise rotation about the X-axis of the Cartesian\n coordinate system, which is the axis of rotation of the propeller hub.\n\n The rotation matrix, :math:`R(\\\\theta)`, is used to perform rotation\n in the 3D Euclidean space about the X-axis, which is -- by default --\n the propeller axis of rotation.\n\n :math:`R(\\\\theta)` is defined by:\n\n .. math::\n \\\\left(\\\\begin{matrix} 1 & 0 & 0 \\\\\\\\\n 0 & cos (\\\\theta) & - sin (\\\\theta) \\\\\\\\\n 0 & sin (\\\\theta) & cos (\\\\theta) \\\\end{matrix}\\\\right)\n\n Given the coordinates of point :math:`P` such that\n\n .. math::\n P = \\\\left(\\\\begin{matrix} x \\\\\\\\\n y \\\\\\\\ z \\\\end{matrix}\\\\right),\n\n Then, the rotated coordinates will be:\n\n .. math::\n P^{'} = \\\\left(\\\\begin{matrix} x^{'} \\\\\\\\\n y^{'} \\\\\\\\ z^{'} \\\\end{matrix}\\\\right)\n = R (\\\\theta) \\\\cdot P\n\n :param float deg_angle: angle in degrees. Default value is None\n :param float rad_angle: angle in radians. Default value is None\n :raises ValueError: if both rad_angle and deg_angle are inserted,\n or if neither is inserted\n\n \"\"\"\n if not self.blade_coordinates_up:\n raise ValueError('You must apply transformations before rotation.')\n\n # Check rotation angle\n if deg_angle is not None and rad_angle is not None:\n raise ValueError(\n 'You have to pass either the angle in radians or in degrees,' \\\n ' not both.')\n if rad_angle is not None:\n cosine = np.cos(rad_angle)\n sine = np.sin(rad_angle)\n elif deg_angle is not None:\n cosine = np.cos(np.radians(deg_angle))\n sine = np.sin(np.radians(deg_angle))\n else:\n raise ValueError(\n 'You have to pass either the angle in radians or in degrees.')\n\n # Rotation is always about the X-axis, which is the center if the hub\n # according to the implemented transformation procedure\n rot_matrix = np.array([1, 0, 0, 0, cosine, -sine, 0, sine,\n cosine]).reshape((3, 3))\n\n for i in range(self.n_sections):\n coord_matrix_up = np.vstack((self.blade_coordinates_up[i][0],\n self.blade_coordinates_up[i][1],\n self.blade_coordinates_up[i][2]))\n coord_matrix_down = np.vstack((self.blade_coordinates_down[i][0],\n self.blade_coordinates_down[i][1],\n self.blade_coordinates_down[i][2]))\n\n new_coord_matrix_up = np.dot(rot_matrix, coord_matrix_up)\n new_coord_matrix_down = np.dot(rot_matrix, coord_matrix_down)\n\n self.blade_coordinates_up[i][0] = new_coord_matrix_up[0]\n self.blade_coordinates_up[i][1] = new_coord_matrix_up[1]\n self.blade_coordinates_up[i][2] = new_coord_matrix_up[2]\n\n self.blade_coordinates_down[i][0] = new_coord_matrix_down[0]\n self.blade_coordinates_down[i][1] = new_coord_matrix_down[1]\n self.blade_coordinates_down[i][2] = new_coord_matrix_down[2]\n\n def plot(self, elev=None, azim=None, ax=None, outfile=None):\n \"\"\"\n Plot the generated blade sections.\n\n :param int elev: set the view elevation of the axes. This can be used\n to rotate the axes programatically. 'elev' stores the elevation\n angle in the z plane. If elev is None, then the initial value is\n used which was specified in the mplot3d.Axes3D constructor. Default\n value is None\n :param int azim: set the view azimuth angle of the axes. This can be\n used to rotate the axes programatically. 'azim' stores the azimuth\n angle in the x,y plane. If azim is None, then the initial value is\n used which was specified in the mplot3d.Axes3D constructor. Default\n value is None\n :param matplotlib.axes ax: allows to pass the instance of figure axes\n to the current plot. This is useful when the user needs to plot the\n coordinates of several blade objects on the same figure (see the\n example below). If nothing is passed then the method plots on a new\n figure axes. Default value is None\n :param string outfile: save the plot if a filename string is provided.\n Default value is None\n\n EXAMPLE:\n Assume we already have the arrays radii, chord, pitch, rake, skew for\n 10 blade sections.\n\n >>> sections_1 = np.asarray([blade.NacaProfile(digits='0012')\n for i in range(10)])\n >>> blade_1 = blade.Blade(sections=sections,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_1.apply_transformations()\n\n >>> sections_2 = np.asarray([blade.NacaProfile(digits='0012')\n for i in range(10)])\n >>> blade_2 = blade.Blade(sections=sections,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade_2.apply_transformations()\n >>> blade_2.rotate(rot_angle_deg=72)\n\n >>> fig = plt.figure()\n >>> ax = fig.gca(projection=Axes3D.name)\n >>> blade_1.plot(ax=ax)\n >>> blade_2.plot(ax=ax)\n\n On the other hand, if we need to plot for a single blade object,\n we can just ignore such parameter, and the method will internally\n create a new instance for the figure axes, i.e.\n\n >>> sections = np.asarray([blade.NacaProfile(digits='0012')\n for i in range(10)])\n >>> blade = blade.Blade(sections=sections,\n radii=radii,\n chord_lengths=chord,\n pitch=pitch,\n rake=rake,\n skew_angles=skew)\n >>> blade.apply_transformations()\n >>> blade.plot()\n \"\"\"\n if not self.blade_coordinates_up:\n raise ValueError('You must apply transformations before plotting.')\n if ax:\n ax = ax\n else:\n fig = plt.figure()\n ax = fig.gca(projection=Axes3D.name)\n ax.set_aspect('equal')\n\n for i in range(self.n_sections):\n ax.plot(self.blade_coordinates_up[i][0],\n self.blade_coordinates_up[i][1],\n self.blade_coordinates_up[i][2])\n ax.plot(self.blade_coordinates_down[i][0],\n self.blade_coordinates_down[i][1],\n self.blade_coordinates_down[i][2])\n\n plt.axis('equal')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('radii axis')\n ax.xaxis.label.set_color('red')\n ax.yaxis.label.set_color('red')\n ax.zaxis.label.set_color('red')\n ax.view_init(elev=elev, azim=azim)\n\n if outfile:\n plt.savefig(outfile)\n\n @staticmethod\n def _import_occ_libs():\n \"\"\"\n Private static method to import specific modules from the OCC package.\n \"\"\"\n from OCC.Core.BRepOffsetAPI import BRepOffsetAPI_ThruSections\n from OCC.Core.gp import gp_Pnt\n from OCC.Core.TColgp import TColgp_HArray1OfPnt\n from OCC.Core.GeomAPI import GeomAPI_Interpolate\n from OCC.Core.BRepBuilderAPI import BRepBuilderAPI_MakeVertex,\\\n BRepBuilderAPI_MakeEdge, BRepBuilderAPI_MakeWire,\\\n BRepBuilderAPI_Sewing, BRepBuilderAPI_MakeSolid\n\n # Set the imported modules as global variables to be used out of scope\n global BRepOffsetAPI_ThruSections, gp_Pnt, TColgp_HArray1OfPnt,\\\n GeomAPI_Interpolate, BRepBuilderAPI_MakeVertex,\\\n BRepBuilderAPI_MakeEdge, BRepBuilderAPI_MakeWire,\\\n BRepBuilderAPI_MakeSolid, BRepBuilderAPI_Sewing\n\n def _generate_upper_face(self, max_deg):\n \"\"\"\n Private method to generate the blade upper face.\n\n :param int max_deg: Define the maximal U degree of generated surface\n \"\"\"\n self._import_occ_libs()\n # Initializes ThruSections algorithm for building a shell passing\n # through a set of sections (wires). The generated faces between\n # the edges of every two consecutive wires are smoothed out with\n # a precision criterion = 1e-10\n generator = BRepOffsetAPI_ThruSections(False, False, 1e-10)\n generator.SetMaxDegree(max_deg)\n # Define upper edges (wires) for the face generation\n for i in range(self.n_sections):\n npoints = len(self.blade_coordinates_up[i][0])\n vertices = TColgp_HArray1OfPnt(1, npoints)\n for j in range(npoints):\n vertices.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_up[i][0][j],\n 1000 * self.blade_coordinates_up[i][1][j],\n 1000 * self.blade_coordinates_up[i][2][j]))\n # Initializes an algorithm for constructing a constrained\n # BSpline curve passing through the points of the blade i-th\n # section, with tolerance = 1e-9\n bspline = GeomAPI_Interpolate(vertices, False, 1e-9)\n bspline.Perform()\n edge = BRepBuilderAPI_MakeEdge(bspline.Curve()).Edge()\n if i == 0:\n bound_root_edge = edge\n # Add BSpline wire to the generator constructor\n generator.AddWire(BRepBuilderAPI_MakeWire(edge).Wire())\n # Returns the shape built by the shape construction algorithm\n generator.Build()\n # Returns the Face generated by each edge of the first section\n self.generated_upper_face = generator.GeneratedFace(bound_root_edge)\n\n def _generate_lower_face(self, max_deg):\n \"\"\"\n Private method to generate the blade lower face.\n\n :param int max_deg: Define the maximal U degree of generated surface\n \"\"\"\n self._import_occ_libs()\n # Initializes ThruSections algorithm for building a shell passing\n # through a set of sections (wires). The generated faces between\n # the edges of every two consecutive wires are smoothed out with\n # a precision criterion = 1e-10\n generator = BRepOffsetAPI_ThruSections(False, False, 1e-10)\n generator.SetMaxDegree(max_deg)\n # Define upper edges (wires) for the face generation\n for i in range(self.n_sections):\n npoints = len(self.blade_coordinates_down[i][0])\n vertices = TColgp_HArray1OfPnt(1, npoints)\n for j in range(npoints):\n vertices.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_down[i][0][j],\n 1000 * self.blade_coordinates_down[i][1][j],\n 1000 * self.blade_coordinates_down[i][2][j]))\n # Initializes an algorithm for constructing a constrained\n # BSpline curve passing through the points of the blade i-th\n # section, with tolerance = 1e-9\n bspline = GeomAPI_Interpolate(vertices, False, 1e-9)\n bspline.Perform()\n edge = BRepBuilderAPI_MakeEdge(bspline.Curve()).Edge()\n if i == 0:\n bound_root_edge = edge\n # Add BSpline wire to the generator constructor\n generator.AddWire(BRepBuilderAPI_MakeWire(edge).Wire())\n # Returns the shape built by the shape construction algorithm\n generator.Build()\n # Returns the Face generated by each edge of the first section\n self.generated_lower_face = generator.GeneratedFace(bound_root_edge)\n\n def _generate_tip(self, max_deg):\n \"\"\"\n Private method to generate the surface that closing the blade tip.\n\n :param int max_deg: Define the maximal U degree of generated surface\n \"\"\"\n self._import_occ_libs()\n\n generator = BRepOffsetAPI_ThruSections(False, False, 1e-10)\n generator.SetMaxDegree(max_deg)\n # npoints_up == npoints_down\n npoints = len(self.blade_coordinates_down[-1][0])\n vertices_1 = TColgp_HArray1OfPnt(1, npoints)\n vertices_2 = TColgp_HArray1OfPnt(1, npoints)\n for j in range(npoints):\n vertices_1.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_down[-1][0][j],\n 1000 * self.blade_coordinates_down[-1][1][j],\n 1000 * self.blade_coordinates_down[-1][2][j]))\n\n vertices_2.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_up[-1][0][j],\n 1000 * self.blade_coordinates_up[-1][1][j],\n 1000 * self.blade_coordinates_up[-1][2][j]))\n\n # Initializes an algorithm for constructing a constrained\n # BSpline curve passing through the points of the blade last\n # section, with tolerance = 1e-9\n bspline_1 = GeomAPI_Interpolate(vertices_1, False, 1e-9)\n bspline_1.Perform()\n\n bspline_2 = GeomAPI_Interpolate(vertices_2, False, 1e-9)\n bspline_2.Perform()\n\n edge_1 = BRepBuilderAPI_MakeEdge(bspline_1.Curve()).Edge()\n edge_2 = BRepBuilderAPI_MakeEdge(bspline_2.Curve()).Edge()\n\n # Add BSpline wire to the generator constructor\n generator.AddWire(BRepBuilderAPI_MakeWire(edge_1).Wire())\n generator.AddWire(BRepBuilderAPI_MakeWire(edge_2).Wire())\n # Returns the shape built by the shape construction algorithm\n generator.Build()\n # Returns the Face generated by each edge of the first section\n self.generated_tip = generator.GeneratedFace(edge_1)\n\n def _generate_root(self, max_deg):\n \"\"\"\n Private method to generate the surface that closing the blade at the root.\n\n :param int max_deg: Define the maximal U degree of generated surface\n \"\"\"\n self._import_occ_libs()\n\n generator = BRepOffsetAPI_ThruSections(False, False, 1e-10)\n generator.SetMaxDegree(max_deg)\n # npoints_up == npoints_down\n npoints = len(self.blade_coordinates_down[0][0])\n vertices_1 = TColgp_HArray1OfPnt(1, npoints)\n vertices_2 = TColgp_HArray1OfPnt(1, npoints)\n for j in range(npoints):\n vertices_1.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_down[0][0][j],\n 1000 * self.blade_coordinates_down[0][1][j],\n 1000 * self.blade_coordinates_down[0][2][j]))\n\n vertices_2.SetValue(\n j + 1,\n gp_Pnt(1000 * self.blade_coordinates_up[0][0][j],\n 1000 * self.blade_coordinates_up[0][1][j],\n 1000 * self.blade_coordinates_up[0][2][j]))\n\n # Initializes an algorithm for constructing a constrained\n # BSpline curve passing through the points of the blade last\n # section, with tolerance = 1e-9\n bspline_1 = GeomAPI_Interpolate(vertices_1, False, 1e-9)\n bspline_1.Perform()\n\n bspline_2 = GeomAPI_Interpolate(vertices_2, False, 1e-9)\n bspline_2.Perform()\n\n edge_1 = BRepBuilderAPI_MakeEdge(bspline_1.Curve()).Edge()\n edge_2 = BRepBuilderAPI_MakeEdge(bspline_2.Curve()).Edge()\n\n # Add BSpline wire to the generator constructor\n generator.AddWire(BRepBuilderAPI_MakeWire(edge_1).Wire())\n generator.AddWire(BRepBuilderAPI_MakeWire(edge_2).Wire())\n # Returns the shape built by the shape construction algorithm\n generator.Build()\n # Returns the Face generated by each edge of the first section\n self.generated_root = generator.GeneratedFace(edge_1)\n\n def _write_blade_errors(self, upper_face, lower_face, errors):\n \"\"\"\n Private method to write the errors between the generated foil points in\n 3D space from the parametric transformations, and their projections on\n the generated blade faces from the OCC algorithm.\n\n :param string upper_face: if string is passed then the method generates\n the blade upper surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .iges file holding\n the name <upper_face_string>.iges\n :param string lower_face: if string is passed then the method generates\n the blade lower surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .iges file holding\n the name <lower_face_string>.iges\n :param string errors: if string is passed then the method writes out\n the distances between each discrete point used to construct the\n blade and the nearest point on the CAD that is perpendicular to\n that point\n \"\"\"\n from OCC.Core.gp import gp_Pnt\n from OCC.Core.BRepBuilderAPI import BRepBuilderAPI_MakeVertex\n from OCC.Core.BRepExtrema import BRepExtrema_DistShapeShape\n\n output_string = '\\n'\n with open(errors + '.txt', 'w') as f:\n if upper_face:\n output_string += '########## UPPER FACE ##########\\n\\n'\n output_string += 'N_section\\t\\tN_point\\t\\t\\tX_crds\\t\\t\\t\\t'\n output_string += 'Y_crds\\t\\t\\t\\t\\tZ_crds\\t\\t\\t\\t\\tDISTANCE'\n output_string += '\\n\\n'\n for i in range(self.n_sections):\n alength = len(self.blade_coordinates_up[i][0])\n for j in range(alength):\n vertex = BRepBuilderAPI_MakeVertex(\n gp_Pnt(\n 1000 * self.blade_coordinates_up[i][0][j],\n 1000 * self.blade_coordinates_up[i][1][j], 1000\n * self.blade_coordinates_up[i][2][j])).Vertex()\n projection = BRepExtrema_DistShapeShape(\n self.generated_upper_face, vertex)\n projection.Perform()\n output_string += str(\n i) + '\\t\\t\\t' + str(j) + '\\t\\t\\t' + str(\n 1000 *\n self.blade_coordinates_up[i][0][j]) + '\\t\\t\\t'\n output_string += str(\n 1000 * self.blade_coordinates_up[i][1]\n [j]) + '\\t\\t\\t' + str(\n 1000 * self.blade_coordinates_up[i][2]\n [j]) + '\\t\\t\\t' + str(projection.Value())\n output_string += '\\n'\n\n if lower_face:\n output_string += '########## LOWER FACE ##########\\n\\n'\n output_string += 'N_section\\t\\tN_point\\t\\t\\tX_crds\\t\\t\\t\\t'\n output_string += 'Y_crds\\t\\t\\t\\t\\tZ_crds\\t\\t\\t\\t\\tDISTANCE'\n output_string += '\\n\\n'\n for i in range(self.n_sections):\n alength = len(self.blade_coordinates_down[i][0])\n for j in range(alength):\n vertex = BRepBuilderAPI_MakeVertex(\n gp_Pnt(\n 1000 * self.blade_coordinates_down[i][0][j],\n 1000 * self.blade_coordinates_down[i][1][j],\n 1000 *\n self.blade_coordinates_down[i][2][j])).Vertex()\n projection = BRepExtrema_DistShapeShape(\n self.generated_lower_face, vertex)\n projection.Perform()\n output_string += str(\n i) + '\\t\\t\\t' + str(j) + '\\t\\t\\t' + str(\n 1000 *\n self.blade_coordinates_down[i][0][j]) + '\\t\\t\\t'\n output_string += str(\n 1000 * self.blade_coordinates_down[i][1]\n [j]) + '\\t\\t\\t' + str(\n 1000 * self.blade_coordinates_down[i][2]\n [j]) + '\\t\\t\\t' + str(projection.Value())\n output_string += '\\n'\n f.write(output_string)\n\n def generate_iges(self,\n upper_face=None,\n lower_face=None,\n tip=None,\n root=None,\n max_deg=1,\n display=False,\n errors=None):\n \"\"\"\n Generate and export the .iges CAD for the blade upper face, lower face,\n tip and root. This method requires PythonOCC (7.4.0) to be installed.\n\n :param string upper_face: if string is passed then the method generates\n the blade upper surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .iges file holding\n the name <upper_face_string>.iges. Default value is None\n :param string lower_face: if string is passed then the method generates\n the blade lower surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .iges file holding\n the name <lower_face_string>.iges. Default value is None\n :param string tip: if string is passed then the method generates\n the blade tip using the BRepOffsetAPI_ThruSections algorithm\n in order to close the blade at the tip, then exports the generated \n CAD into .iges file holding the name <tip_string>.iges. \n Default value is None\n :param string root: if string is passed then the method generates\n the blade root using the BRepOffsetAPI_ThruSections algorithm\n in order to close the blade at the root, then exports the generated \n CAD into .iges file holding the name <tip_string>.iges. \n Default value is None\n :param int max_deg: Define the maximal U degree of generated surface.\n Default value is 1\n :param bool display: if True, then display the generated CAD. Default\n value is False\n :param string errors: if string is passed then the method writes out\n the distances between each discrete point used to construct the\n blade and the nearest point on the CAD that is perpendicular to\n that point. Default value is None\n\n We note that the blade object must have its radial sections be arranged\n in order from the blade root to the blade tip, so that generate_iges\n method can build the CAD surface that passes through the corresponding\n airfoils. Also to be able to identify and close the blade tip and root.\n \"\"\"\n\n from OCC.Core.IGESControl import IGESControl_Writer\n from OCC.Display.SimpleGui import init_display\n\n if max_deg <= 0:\n raise ValueError('max_deg argument must be a positive integer.')\n\n if upper_face:\n self._check_string(filename=upper_face)\n self._generate_upper_face(max_deg=max_deg)\n # Write IGES\n iges_writer = IGESControl_Writer()\n iges_writer.AddShape(self.generated_upper_face)\n iges_writer.Write(upper_face + '.iges')\n\n if lower_face:\n self._check_string(filename=lower_face)\n self._generate_lower_face(max_deg=max_deg)\n # Write IGES\n iges_writer = IGESControl_Writer()\n iges_writer.AddShape(self.generated_lower_face)\n iges_writer.Write(lower_face + '.iges')\n\n if tip:\n self._check_string(filename=tip)\n self._generate_tip(max_deg=max_deg)\n # Write IGES\n iges_writer = IGESControl_Writer()\n iges_writer.AddShape(self.generated_tip)\n iges_writer.Write(tip + '.iges')\n\n if root:\n self._check_string(filename=root)\n self._generate_root(max_deg=max_deg)\n # Write IGES\n iges_writer = IGESControl_Writer()\n iges_writer.AddShape(self.generated_root)\n iges_writer.Write(root + '.iges')\n\n if errors:\n # Write out errors between discrete points and constructed faces\n self._check_string(filename=errors)\n self._check_errors(upper_face=upper_face, lower_face=lower_face)\n\n self._write_blade_errors(\n upper_face=upper_face, lower_face=lower_face, errors=errors)\n\n if display:\n display, start_display, add_menu, add_function_to_menu = init_display(\n )\n\n ## DISPLAY FACES\n if upper_face:\n display.DisplayShape(self.generated_upper_face, update=True)\n if lower_face:\n display.DisplayShape(self.generated_lower_face, update=True)\n if tip:\n display.DisplayShape(self.generated_tip, update=True)\n if root:\n display.DisplayShape(self.generated_root, update=True)\n start_display()\n\n def generate_blade_solid(self,\n max_deg=1,\n display=False,\n errors=None):\n \"\"\"\n Generate a solid blade assembling the upper face, lower face, tip and\n root using the BRepBuilderAPI_MakeSolid algorithm. \n This method requires PythonOCC (7.4.0) to be installed.\n\n :param int max_deg: Define the maximal U degree of generated surface.\n Default value is 1\n :param bool display: if True, then display the generated CAD. Default\n value is False\n :param string errors: if string is passed then the method writes out\n the distances between each discrete point used to construct the\n blade and the nearest point on the CAD that is perpendicular to\n that point. Default value is None\n :raises RuntimeError: if the assembling of the solid blade is not \n completed successfully\n \"\"\"\n from OCC.Core.IGESControl import IGESControl_Writer\n from OCC.Display.SimpleGui import init_display\n from OCC.Core.TopoDS import TopoDS_Shape, TopoDS_Shell\n import OCC.Core.TopoDS\n\n if max_deg <= 0:\n raise ValueError('max_deg argument must be a positive integer.')\n\n self._generate_upper_face(max_deg=max_deg)\n self._generate_lower_face(max_deg=max_deg)\n self._generate_tip(max_deg=max_deg)\n self._generate_root(max_deg=max_deg)\n\n if errors:\n # Write out errors between discrete points and constructed faces\n self._check_string(filename=errors)\n self._check_errors(upper_face=upper_face, lower_face=lower_face)\n\n self._write_blade_errors(\n upper_face=upper_face, lower_face=lower_face, errors=errors)\n\n if display:\n display, start_display, add_menu, add_function_to_menu = init_display(\n )\n\n ## DISPLAY FACES\n display.DisplayShape(self.generated_upper_face, update=True)\n display.DisplayShape(self.generated_lower_face, update=True)\n display.DisplayShape(self.generated_tip, update=True)\n display.DisplayShape(self.generated_root, update=True)\n start_display()\n\n sewer = BRepBuilderAPI_Sewing(1e-2)\n sewer.Add(self.generated_upper_face)\n sewer.Add(self.generated_lower_face)\n sewer.Add(self.generated_tip)\n sewer.Add(self.generated_root)\n sewer.Perform()\n result_shell = sewer.SewedShape()\n solid_maker = BRepBuilderAPI_MakeSolid()\n solid_maker.Add(OCC.Core.TopoDS.topods_Shell(result_shell))\n if not solid_maker.IsDone():\n raise RuntimeError('Unsuccessful assembling of solid blade')\n result_solid = solid_maker.Solid()\n \treturn result_solid \n\n def generate_stl_smesh(self, min_length=None, max_length=None, outfile_stl=None):\n \"\"\"\n Generate and export the .STL surface mesh for the blade as a whole,\n including the upper face, lower face and tip. The method utilizes\n modules from OCC SMESH which is standalone mesh framework based on\n SALOME mesher project. Please refer to https://github.com/tpaviot\n and http://docs.salome-platform.org/7/gui/SMESH/index.html for\n further details.\n\n This method requires PythonOCC and SMESH to be installed.\n\n :param double min_length: smallest distance between two nodes. Default\n value is None\n :param double max_length: largest distance between two nodes. Default\n value is None\n :param string outfile_stl: if string is passed then the method exports\n the generated 2D surface mesh into .stl file holding the name\n <outfile_stl>.stl. Default value is None\n\n We note that since the current implementation performs triangulation\n based on a topological compound that combines the blade 3 generated\n shapes without \"fusion\", it may happen that the generated triangulation\n of the upper and lower blade faces do not share the same exact nodes\n on the joint edge/wire resulting from the faces intersection. The\n current implementation can be enough for visualization purpose. However\n if the generated mesh is intended for computational analysis then a\n manual mesh healing is recommended by the user (e.g. see\n \"Repair > Sewing\" in SALOME GUI) for a proper mesh closure.\n \"\"\"\n from OCC.SMESH import SMESH_Gen\n from OCC.StdMeshers import (\n StdMeshers_Arithmetic1D, StdMeshers_TrianglePreference,\n StdMeshers_Regular_1D, StdMeshers_MEFISTO_2D)\n from OCC.Core.BRep import BRep_Builder\n from OCC.Core.TopoDS import TopoDS_Shape, TopoDS_Compound\n\n if min_length <= 0 or max_length <= 0:\n raise ValueError('min_length and max_length must be positive.')\n if min_length >= max_length:\n raise ValueError('min_length can not be greater than max_length')\n\n # First we check that blade shapes are generated, otherwise we generate\n # them. After that we combine the generated_upper_face,\n # generated_lower_face, and generated_tip into a topological compound\n # that we use to compute the surface mesh\n if (self.generated_upper_face is None) or not isinstance(\n self.generated_upper_face, TopoDS_Shape):\n # Upper face is generated with a maximal U degree = 1\n self._generate_upper_face(max_deg=1)\n if (self.generated_lower_face is None) or not isinstance(\n self.generated_lower_face, TopoDS_Shape):\n # Upper face is generated with a maximal U degree = 1\n self._generate_lower_face(max_deg=1)\n if (self.generated_tip is None) or not isinstance(\n self.generated_tip, TopoDS_Shape):\n # Upper face is generated with a maximal U degree = 1\n self._generate_tip(max_deg=1)\n\n # Now we regroup all the shapes into a TopoDS_Compound\n aCompound = TopoDS_Compound()\n aBuilder = BRep_Builder()\n aBuilder.MakeCompound(aCompound)\n # Add shapes\n aBuilder.Add(aCompound, self.generated_upper_face)\n aBuilder.Add(aCompound, self.generated_lower_face)\n aBuilder.Add(aCompound, self.generated_tip)\n\n # In the following we build the surface mesh according to the given\n # hypotheses\n aMeshGen = SMESH_Gen()\n aMesh = aMeshGen.CreateMesh(0, True)\n # Adding 1D hypothesis and algorithms\n # Wire discretization. Nodes are distributed based on Arithmetic1D\n # hypothesis which allows to split edges into segments with a length\n # that changes in arithmetic progression (Lk = Lk-1 + d) beginning\n # from a given min length and up to a given max length. More about\n # 1D hypotheses can be viewed through:\n # http://docs.salome-platform.org/7/gui/SMESH/a1d_meshing_hypo_page.html\n an1DHypothesis = StdMeshers_Arithmetic1D(0, 0, aMeshGen)\n # Smallest distance between 2 points\n an1DHypothesis.SetLength(min_length, False)\n # Longest distance between 2 points\n an1DHypothesis.SetLength(max_length, True)\n # Regular Interpolation\n an1DAlgo = StdMeshers_Regular_1D(1, 0, aMeshGen)\n # Adding 2D hypothesis and algorithms\n # 2D surface mesh -- Triangulations\n a2dHypothseis = StdMeshers_TrianglePreference(2, 0, aMeshGen)\n a2dAlgo = StdMeshers_MEFISTO_2D(3, 0, aMeshGen)\n\n #Calculate mesh for the topological compound containing the 3 shapes\n aMesh.ShapeToMesh(aCompound)\n\n #Assign hyptothesis to mesh\n aMesh.AddHypothesis(aCompound, 0)\n aMesh.AddHypothesis(aCompound, 1)\n aMesh.AddHypothesis(aCompound, 2)\n aMesh.AddHypothesis(aCompound, 3)\n\n if outfile_stl is not None:\n if not isinstance(outfile_stl, str):\n raise ValueError('outfile_stl must be a valid string.')\n\n #Compute the data\n aMeshGen.Compute(aMesh, aMesh.GetShapeToMesh())\n # Export STL\n aMesh.ExportSTL(outfile_stl + '.stl', False)\n\n def generate_stl(self, upper_face=None,\n lower_face=None,\n tip=None,\n root=None,\n max_deg=1, \n display=False, \n errors=None):\n \"\"\"\n Generate and export the .STL files for upper face, lower face, tip\n and root. This method requires PythonOCC (7.4.0) to be installed.\n\n :param string upper_face: if string is passed then the method generates\n the blade upper surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .stl file holding\n the name <upper_face_string>.stl. Default value is None\n :param string lower_face: if string is passed then the method generates\n the blade lower surface using the BRepOffsetAPI_ThruSections\n algorithm, then exports the generated CAD into .stl file holding\n the name <lower_face_string>.stl. Default value is None\n :param string tip: if string is passed then the method generates\n the blade tip using the BRepOffsetAPI_ThruSections algorithm\n in order to close the blade at the tip, then exports the generated \n CAD into .stl file holding the name <tip_string>.stl. \n Default value is None\n :param string root: if string is passed then the method generates\n the blade root using the BRepOffsetAPI_ThruSections algorithm\n in order to close the blade at the root, then exports the generated \n CAD into .stl file holding the name <tip_string>.stl. \n Default value is None\n :param int max_deg: Define the maximal U degree of generated surface.\n Default value is 1\n :param bool display: if True, then display the generated CAD. Default\n value is False\n :param string errors: if string is passed then the method writes out\n the distances between each discrete point used to construct the\n blade and the nearest point on the CAD that is perpendicular to\n that point. Default value is None\n\n We note that the blade object must have its radial sections be arranged\n in order from the blade root to the blade tip, so that generate_stl\n method can build the CAD surface that passes through the corresponding\n airfoils. Also to be able to identify and close the blade tip and root.\n \"\"\"\n\n import os\n from OCC.Extend.DataExchange import write_stl_file\n from OCC.Display.SimpleGui import init_display\n\n if upper_face:\n self._check_string(filename=upper_face)\n self._generate_upper_face(max_deg=max_deg)\n # Write STL\n write_stl_file(self.generated_upper_face, upper_face + '.stl')\n\n if lower_face:\n self._check_string(filename=lower_face)\n self._generate_lower_face(max_deg=max_deg)\n # Write STL\n write_stl_file(self.generated_lower_face, lower_face + '.stl')\n\n if tip:\n self._check_string(filename=tip)\n self._generate_tip(max_deg=max_deg)\n # Write STL\n write_stl_file(self.generated_tip, tip + '.stl')\n\n if root:\n self._check_string(filename=root)\n self._generate_root(max_deg=max_deg)\n # Write STL\n write_stl_file(self.generated_root, root + '.stl')\n\n if errors:\n # Write out errors between discrete points and constructed faces\n self._check_string(filename=errors)\n self._check_errors(upper_face=upper_face, lower_face=lower_face)\n\n self._write_blade_errors(\n upper_face=upper_face, lower_face=lower_face, errors=errors)\n\n if display:\n display, start_display, add_menu, add_function_to_menu = init_display(\n )\n\n ## DISPLAY FACES\n if upper_face:\n display.DisplayShape(self.generated_upper_face, update=True)\n if lower_face:\n display.DisplayShape(self.generated_lower_face, update=True)\n if tip:\n display.DisplayShape(self.generated_tip, update=True)\n if root:\n display.DisplayShape(self.generated_root, update=True)\n start_display()\n\n\n\n @staticmethod\n def _check_string(filename):\n \"\"\"\n Private method to check if the parameter type is string\n\n :param string filename: filename of the generated .iges surface\n \"\"\"\n if not isinstance(filename, str):\n raise TypeError('IGES filename must be a valid string.')\n\n @staticmethod\n def _check_errors(upper_face, lower_face):\n \"\"\"\n Private method to check if either the blade upper face or lower face\n is passed in the generate_iges method. Otherwise it raises an exception\n\n :param string upper_face: blade upper face.\n :param string lower_face: blade lower face.\n \"\"\"\n if not (upper_face or lower_face):\n raise ValueError(\n 'Either upper_face or lower_face must not be None.')\n\n def _abs_to_norm(self, D_prop):\n \"\"\"\n Private method to normalize the blade parameters.\n\n :param float D_prop: propeller diameter\n \"\"\"\n self.radii = self.radii * 2. / D_prop\n self.chord_lengths = self.chord_lengths / D_prop\n self.pitch = self.pitch / D_prop\n self.rake = self.rake / D_prop\n\n def _norm_to_abs(self, D_prop):\n \"\"\"\n Private method that converts the normalized blade parameters into the\n actual values.\n\n :param float D_prop: propeller diameter\n \"\"\"\n self.radii = self.radii * D_prop / 2.\n self.chord_lengths = self.chord_lengths * D_prop\n self.pitch = self.pitch * D_prop\n self.rake = self.rake * D_prop\n\n def export_ppg(self,\n filename='data_out.ppg',\n D_prop=0.25,\n D_hub=0.075,\n n_blades=5,\n params_normalized=False):\n \"\"\"\n Export the generated blade parameters and sectional profiles into\n .ppg format.\n\n :param string filename: name of the exported file. Default is\n 'data/data_out.ppg'\n :param float D_prop: propeller diameter\n :param float D_hub: hub diameter\n :param float n_blades: number of blades\n :param bool params_normalized: since the standard .ppg format contains\n the blade parameters in the normalized form, therefore the user\n needs to inform whether the provided parameters (from the class\n Blade) are normalized or not. By default the argument is set to\n False, which assumes the user provides the blade parameters in\n their actual values, i.e. not normalized, hence a normalization\n operation needs to be applied so as to follow the .ppg standard\n format.\n \"\"\"\n thickness = np.zeros(self.n_sections)\n camber = np.zeros(self.n_sections)\n for i, section in enumerate(self.sections):\n # Evaluate maximum profile thickness and camber for each section.\n # We assume at the current step, that sectional profiles already\n # have the coordinates (x_up,x_down) normalized by chord length (C)\n # and subsequently (y_up,y_down) are also scaled. This implies that\n # the computed thickness and camber are given in their normalized\n # form, i.e. thickness=t/C and camber=f/C.\n thickness[i] = section.max_thickness()\n camber[i] = section.max_camber()\n\n if params_normalized is False:\n # Put the parameters (radii, chord, pitch, rake) in the normalized\n # form.\n self._abs_to_norm(D_prop=D_prop)\n\n output_string = \"\"\n output_string += 'propeller id = SVA\\n'\n output_string += 'propeller diameter = ' + str(D_prop) + '\\n'\n output_string += 'hub diameter = ' + str(D_hub) + '\\n'\n output_string += 'number of blades = ' + str(n_blades) + '\\n'\n output_string += \"'Elica PPTC workshop'\\n\"\n output_string += 'number of radial sections = ' + str(\n self.n_sections) + '\\n'\n output_string += 'number of radial sections = ' + str(\n self.n_sections) + '\\n'\n output_string += 'number of sectional profiles = ' + str(\n self.n_sections) + '\\n'\n output_string += 'description of sectional profiles = BNF\\n'\n output_string += ' r/R c/D skew[deg]'\\\n ' rake/D P/D t/C'\\\n ' f/C\\n'\n for i in range(self.n_sections):\n output_string += ' ' + str(\"%.8e\" % self.radii[i]) + ' ' + str(\n \"%.8e\" % self.chord_lengths[i]) + ' ' + str(\n \"%.8e\" % self.skew_angles[i]) + ' ' + str(\n \"%.8e\" % self.rake[i])\n output_string += ' ' + str(\"%.8e\" % self.pitch[i]) + ' ' + str(\n \"%.8e\" % thickness[i]) + ' ' + str(\"%.8e\" % camber[i]) + '\\n'\n\n for i in range(self.n_sections):\n output_string += str(\"%.8e\" % self.radii[i]) + ' ' + str(\n len(self.sections[i].xup_coordinates)) + '\\n'\n\n for value in self.sections[i].xup_coordinates:\n output_string += ' ' + str(\"%.8e\" % value)\n output_string += ' \\n'\n for value in self.sections[i].yup_coordinates:\n output_string += ' ' + str(\"%.8e\" % value)\n output_string += ' \\n'\n for value in self.sections[i].ydown_coordinates:\n output_string += ' ' + str(\"%.8e\" % value)\n output_string += ' \\n'\n\n hub_offsets = np.asarray(\n [[-3.0, 0.305], [-0.57, 0.305], [-0.49, 0.305], [-0.41, 0.305],\n [-0.33, 0.305], [-0.25, 0.305], [-0.17, 0.305], [0.23, 0.305],\n [0.31, 0.285], [0.39, 0.2656], [0.47, 0.2432], [0.55, 0.2124],\n [0.63, 0.1684], [0.71, 0.108], [0.79, 0.0]])\n\n output_string += 'number of Hub offsets = ' + str(\n len(hub_offsets)) + '\\n'\n\n for i, offset in enumerate(hub_offsets):\n if i == len(hub_offsets) - 1:\n output_string += str(\"%.8e\" % offset[0]) + ' ' + str(\n \"%.8e\" % hub_offsets[i][1])\n continue\n output_string += str(\"%.8e\" % offset[0]) + ' ' + str(\n \"%.8e\" % offset[1]) + '\\n'\n\n with open(filename, 'w') as f:\n f.write(output_string)\n\n if params_normalized is False:\n # Revert back normalized parameters into actual values.\n self._norm_to_abs(D_prop=D_prop)\n\n def __str__(self):\n \"\"\"\n This method prints all the parameters on the screen. Its purpose is\n for debugging.\n \"\"\"\n string = ''\n string += 'Blade number of sections = {}'.format(self.n_sections)\n string += '\\nBlade radii sections = {}'.format(self.radii)\n string += '\\nChord lengths of the sectional profiles'\\\n ' = {}'.format(self.chord_lengths)\n string += '\\nRadial distribution of the pitch (in unit lengths)'\\\n ' = {}'.format(self.pitch)\n string += '\\nRadial distribution of the rake (in unit length)'\\\n ' = {}'.format(self.rake)\n string += '\\nRadial distribution of the skew angles'\\\n ' (in degrees) = {}'.format(self.skew_angles)\n string += '\\nPitch angles (in radians) for the'\\\n ' sections = {}'.format(self.pitch_angles)\n string += '\\nInduced rake from skew (in unit length)'\\\n ' for the sections = {}'.format(self.induced_rake)\n return string"
] | [
[
"numpy.sin",
"numpy.dot",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.tan",
"matplotlib.pyplot.figure",
"numpy.radians",
"numpy.arctan",
"numpy.vstack",
"numpy.cos",
"matplotlib.pyplot.axis"
]
] |
Jiajie-Mei/tensorflow-generative-model-collections | [
"028d8e8aaf9d9954858cfbe33f57426a3e976468"
] | [
"snippets/scipy_and_iamgeio.py"
] | [
"from scipy.misc import imsave, imread\nimport imageio\nimport numpy as np\nimport pickle\nimport tensorflow as tf\n\n\n# array = imread('23.png')\n# array = array.astype(np.float64) / 255.0\narray = pickle.load(open('pixels.pickle', 'rb'))\nprint(array, array.dtype)\nprint(np.max(array), np.mean(array), np.min(array))\nimsave('scipy.png', array)\nimageio.imwrite('imageio.png', array)\n\narray2 = imageio.imread('imageio.png')\nprint(array2)\narray3 = array2 / 255.0\n\nprint(np.max(np.abs(array - array3)))\n\n\nimageio.imwrite('new_imageio.png', np.clip(2 * array3 - 1, 0., 1.))\n"
] | [
[
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.abs",
"numpy.clip",
"scipy.misc.imsave"
]
] |
yashppawar/Fun-Effects | [
"7e1ad63b552c05455245b31f063f7c26d77d8255"
] | [
"helper/face_detector.py"
] | [
"\nimport cv2\nimport numpy as np\n\ndef get_face_detector():\n \n modelFile = \"models/res10_300x300_ssd_iter_140000.caffemodel\"\n configFile = \"models/deploy.prototxt\"\n model = cv2.dnn.readNetFromCaffe(configFile, modelFile)\n \n return model\n\ndef find_faces(img, model):\n \n h, w = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,\n\t(300, 300), (104.0, 177.0, 123.0))\n model.setInput(blob)\n res = model.forward()\n faces = []\n for i in range(res.shape[2]):\n confidence = res[0, 0, i, 2]\n if confidence > 0.5:\n box = res[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, x1, y1) = box.astype(\"int\")\n faces.append([x, y, x1, y1])\n return faces\n\ndef draw_faces(img, faces):\n \n for x, y, x1, y1 in faces:\n img = cv2.rectangle(img, (x, y), (x1, y1), (255, 0, 25), 2)\n return img "
] | [
[
"numpy.array"
]
] |
dewyeon/toy2d | [
"e84f1b8b951bb1e85cb38ce5c4aae8734d6ed7de"
] | [
"baselines/csflow/custom_datasets/loader.py"
] | [
"import os\nfrom PIL import Image\nimport numpy as np\nimport torch\nfrom torchvision.io import read_video, write_jpeg\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms as T\n\n\n__all__ = ('MVTecDataset', 'StcDataset')\n\n\n# URL = 'ftp://guest:[email protected]/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz'\nMVTEC_CLASS_NAMES = ['bottle', 'cable', 'capsule', 'carpet', 'grid',\n 'hazelnut', 'leather', 'metal_nut', 'pill', 'screw',\n 'tile', 'toothbrush', 'transistor', 'wood', 'zipper']\n\nSTC_CLASS_NAMES = ['01', '02', '03', '04', '05', '06', \n '07', '08', '09', '10', '11', '12'] #, '13' - no ground-truth]\n\n\nclass StcDataset(Dataset):\n def __init__(self, c, is_train=True):\n assert c.class_name in STC_CLASS_NAMES, 'class_name: {}, should be in {}'.format(c.class_name, STC_CLASS_NAMES)\n self.class_name = c.class_name\n self.is_train = is_train\n self.cropsize = c.crp_size\n #\n if is_train:\n self.dataset_path = os.path.join(c.data_path, 'training')\n self.dataset_vid = os.path.join(self.dataset_path, 'videos')\n self.dataset_dir = os.path.join(self.dataset_path, 'frames')\n self.dataset_files = sorted([f for f in os.listdir(self.dataset_vid) if f.startswith(self.class_name)])\n if not os.path.isdir(self.dataset_dir):\n os.mkdir(self.dataset_dir)\n done_file = os.path.join(self.dataset_path, 'frames_{}.pt'.format(self.class_name))\n print(done_file)\n H, W = 480, 856\n if os.path.isfile(done_file):\n assert torch.load(done_file) == len(self.dataset_files), 'train frames are not processed!'\n else:\n count = 0\n for dataset_file in self.dataset_files:\n print(dataset_file)\n data = read_video(os.path.join(self.dataset_vid, dataset_file)) # read video file entirely -> mem issue!!!\n vid = data[0] # weird read_video that returns byte tensor in format [T,H,W,C]\n fps = data[2]['video_fps']\n print('video mu/std: {}/{} {}'.format(torch.mean(vid/255.0, (0,1,2)), torch.std(vid/255.0, (0,1,2)), vid.shape))\n assert [H, W] == [vid.size(1), vid.size(2)], 'same H/W'\n dataset_file_dir = os.path.join(self.dataset_dir, os.path.splitext(dataset_file)[0])\n os.mkdir(dataset_file_dir)\n count = count + 1\n for i, frame in enumerate(vid):\n filename = '{0:08d}.jpg'.format(i)\n write_jpeg(frame.permute((2, 0, 1)), os.path.join(dataset_file_dir, filename), 80)\n torch.save(torch.tensor(count), done_file)\n #\n self.x, self.y, self.mask = self.load_dataset_folder()\n else:\n self.dataset_path = os.path.join(c.data_path, 'testing')\n self.x, self.y, self.mask = self.load_dataset_folder()\n\n # set transforms\n if is_train:\n self.transform_x = T.Compose([\n T.Resize(c.img_size, Image.ANTIALIAS),\n T.RandomRotation(5),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n # test:\n else:\n self.transform_x = T.Compose([\n T.Resize(c.img_size, Image.ANTIALIAS),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n # mask\n self.transform_mask = T.Compose([\n T.ToPILImage(),\n T.Resize(c.img_size, Image.NEAREST),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n\n self.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])\n\n def __getitem__(self, idx):\n x, y, mask = self.x[idx], self.y[idx], self.mask[idx]\n x = Image.open(x).convert('RGB')\n x = self.normalize(self.transform_x(x))\n if y == 0: #self.is_train:\n mask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])\n else:\n mask = self.transform_mask(mask)\n #\n return x, y, mask\n\n def __len__(self):\n return len(self.x)\n\n def load_dataset_folder(self):\n phase = 'train' if self.is_train else 'test'\n x, y, mask = list(), list(), list()\n img_dir = os.path.join(self.dataset_path, 'frames')\n img_types = sorted([f for f in os.listdir(img_dir) if f.startswith(self.class_name)])\n gt_frame_dir = os.path.join(self.dataset_path, 'test_frame_mask')\n gt_pixel_dir = os.path.join(self.dataset_path, 'test_pixel_mask')\n for i, img_type in enumerate(img_types):\n print('Folder:', img_type)\n # load images\n img_type_dir = os.path.join(img_dir, img_type)\n img_fpath_list = sorted([os.path.join(img_type_dir, f) for f in os.listdir(img_type_dir) if f.endswith('.jpg')])\n x.extend(img_fpath_list)\n # labels for every test image\n if phase == 'test':\n gt_pixel = np.load('{}.npy'.format(os.path.join(gt_pixel_dir, img_type)))\n gt_frame = np.load('{}.npy'.format(os.path.join(gt_frame_dir, img_type)))\n if i == 0:\n m = gt_pixel\n y = gt_frame\n else:\n m = np.concatenate((m, gt_pixel), axis=0)\n y = np.concatenate((y, gt_frame), axis=0)\n #\n mask = [e for e in m] # np.expand_dims(e, axis=0)\n assert len(x) == len(y), 'number of x and y should be same'\n assert len(x) == len(mask), 'number of x and mask should be same'\n else:\n mask.extend([None] * len(img_fpath_list))\n y.extend([0] * len(img_fpath_list))\n #\n return list(x), list(y), list(mask)\n\n\nclass MVTecDataset(Dataset):\n def __init__(self, c, is_train=True):\n assert c.class_name in MVTEC_CLASS_NAMES, 'class_name: {}, should be in {}'.format(c.class_name, MVTEC_CLASS_NAMES)\n self.dataset_path = c.data_path\n self.class_name = c.class_name\n self.is_train = is_train\n self.cropsize = c.crp_size\n # load dataset\n self.x, self.y, self.mask = self.load_dataset_folder()\n # set transforms\n if is_train:\n self.transform_x = T.Compose([\n T.Resize(c.img_size, Image.ANTIALIAS),\n T.RandomRotation(5),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n # test:\n else:\n self.transform_x = T.Compose([\n T.Resize(c.img_size, Image.ANTIALIAS),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n # mask\n self.transform_mask = T.Compose([\n T.Resize(c.img_size, Image.NEAREST),\n T.CenterCrop(c.crp_size),\n T.ToTensor()])\n\n self.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])\n\n def __getitem__(self, idx):\n x, y, mask = self.x[idx], self.y[idx], self.mask[idx]\n #x = Image.open(x).convert('RGB')\n x = Image.open(x)\n if self.class_name in ['zipper', 'screw', 'grid']: # handle greyscale classes\n x = np.expand_dims(np.array(x), axis=2)\n x = np.concatenate([x, x, x], axis=2)\n \n x = Image.fromarray(x.astype('uint8')).convert('RGB')\n #\n x = self.normalize(self.transform_x(x))\n #\n if y == 0:\n mask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])\n else:\n mask = Image.open(mask)\n mask = self.transform_mask(mask)\n\n return x, y, mask\n\n def __len__(self):\n return len(self.x)\n\n def load_dataset_folder(self):\n phase = 'train' if self.is_train else 'test'\n x, y, mask = [], [], []\n\n img_dir = os.path.join(self.dataset_path, self.class_name, phase)\n gt_dir = os.path.join(self.dataset_path, self.class_name, 'ground_truth')\n\n img_types = sorted(os.listdir(img_dir))\n for img_type in img_types:\n\n # load images\n img_type_dir = os.path.join(img_dir, img_type)\n if not os.path.isdir(img_type_dir):\n continue\n img_fpath_list = sorted([os.path.join(img_type_dir, f)\n for f in os.listdir(img_type_dir)\n if f.endswith('.png')])\n x.extend(img_fpath_list)\n\n # load gt labels\n if img_type == 'good':\n y.extend([0] * len(img_fpath_list))\n mask.extend([None] * len(img_fpath_list))\n else:\n y.extend([1] * len(img_fpath_list))\n gt_type_dir = os.path.join(gt_dir, img_type)\n img_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]\n gt_fpath_list = [os.path.join(gt_type_dir, img_fname + '_mask.png')\n for img_fname in img_fname_list]\n mask.extend(gt_fpath_list)\n\n assert len(x) == len(y), 'number of x and y should be same'\n\n return list(x), list(y), list(mask)\n \nclass MultiModal_MVTecDataset(Dataset):\n\tdef __init__(self, c, is_train=True):\n\t\tindex_list = [int(i.strip()) for i in c.class_indexes.split(',')]\n\t\tfor idx in index_list:\n\t\t\tassert idx in range(15), 'class_index: {}, should be between 0 to 14'.format(idx)\n\t\t\n\t\tself.dataset_path = c.data_path\n\t\tself.class_indexes = index_list\n\t\tself.is_train = is_train\n\t\tself.cropsize = c.crp_size\n\t\t# load dataset\n\t\tself.x, self.y, self.mask = self.load_dataset_folder()\n\t\t# set transforms\n\t\tif is_train:\n\t\t\tself.transform_x = T.Compose([\n\t\t\t\tT.Resize(c.img_size, Image.ANTIALIAS),\n\t\t\t\tT.RandomRotation(5),\n\t\t\t\tT.CenterCrop(c.crp_size),\n\t\t\t\tT.ToTensor()])\n\t\t# test:\n\t\telse:\n\t\t\tself.transform_x = T.Compose([\n\t\t\t\tT.Resize(c.img_size, Image.ANTIALIAS),\n\t\t\t\tT.CenterCrop(c.crp_size),\n\t\t\t\tT.ToTensor()])\n\t\t# mask\n\t\tself.transform_mask = T.Compose([\n\t\t\tT.Resize(c.img_size, Image.NEAREST),\n\t\t\tT.CenterCrop(c.crp_size),\n\t\t\tT.ToTensor()])\n\n\t\tself.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])\n\n\t\t\n\t\t\"\"\" transform setting at Padim Experiment \"\"\"\n\t\t# # set transforms \n\t\t# self.transform_x = T.Compose([T.Resize(resize, Image.ANTIALIAS),\n\t\t#\t\t\t\t\t\t\t T.CenterCrop(cropsize),\n\t\t#\t\t\t\t\t\t\t T.ToTensor(),\n\t\t#\t\t\t\t\t\t\t T.Normalize(mean=[0.485, 0.456, 0.406],\n\t\t#\t\t\t\t\t\t\t\t\t\t std=[0.229, 0.224, 0.225])])\n\t\t# self.transform_mask = T.Compose([T.Resize(resize, Image.NEAREST),\n\t\t#\t\t\t\t\t\t\t\t T.CenterCrop(cropsize),\n\t\t#\t\t\t\t\t\t\t\t T.ToTensor()])\n\t\t\n\tdef __getitem__(self, idx):\n\t\tx, y, mask = self.x[idx], self.y[idx], self.mask[idx]\n\t\tx = Image.open(x).convert('RGB')\n\t\t# x = Image.open(x)\n\t\t# \"\"\" ziper, screw, grid class -> gray to RGB trnasform \"\"\"\n\t\t# if len(np.array(x).shape) == 2: # gray-scale image (only 1 channel)\n\t\t#\t x = np.expand_dims(np.array(x), axis=2)\n\t\t#\t x = np.concatenate([x, x, x], axis=2)\n\t\t#\t x = Image.fromarray(x.astype('uint8')).convert('RGB')\n\t \n\t\tx = self.normalize(self.transform_x(x))\n\n\t\t# if y == 0:\n\t\t# \tmask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])\n\t\t# else:\n\t\t# \tif mask is None:\n\t\t# \t\tmask = torch.zeros([1, self.cropsize[0], self.cropsize[1]]) + 1.\n\t\t# \telse:\n\t\t# \t\tmask = Image.open(mask)\n\t\t# \t\tmask = self.transform_mask(mask)\n\n\t\treturn x, y\n\n\tdef __len__(self):\n\t\treturn len(self.x)\n\n\tdef load_dataset_folder(self):\n\t\tphase = 'train' if self.is_train else 'test'\n\t\tx, y, mask = [], [], []\n\t\t\n\t\tcls_list = [MVTEC_CLASS_NAMES[cls_nm] for cls_nm in self.class_indexes if cls_nm is not None]\n\n\t\tfor cls_nm in cls_list:\n\t\t\timg_dir = os.path.join(self.dataset_path, cls_nm, phase)\n\t\t\tgt_dir = os.path.join(self.dataset_path, cls_nm, 'ground_truth')\n\t\t\t\n\t\t\timg_types = sorted(os.listdir(img_dir))\n\t\t\tfor img_type in img_types:\n\t\t\t\t# load images\n\t\t\t\timg_type_dir = os.path.join(img_dir, img_type)\n\t\t\t\tif not os.path.isdir(img_type_dir):\n\t\t\t\t\tcontinue\n\t\t\t\timg_fpath_list = sorted([os.path.join(img_type_dir, f)\n\t\t\t\t\t\t\t\t\t\t for f in os.listdir(img_type_dir)\n\t\t\t\t\t\t\t\t\t\t if f.endswith('.png')])\n\t\t\t\tx.extend(img_fpath_list)\n\n\t\t\t\t# load gt labels\n\t\t\t\tif img_type == 'good':\n\t\t\t\t\ty.extend([0] * len(img_fpath_list))\n\t\t\t\t\tmask.extend([None] * len(img_fpath_list))\n\t\t\t\telse:\n\t\t\t\t\ty.extend([1] * len(img_fpath_list))\n\t\t\t\t\tgt_type_dir = os.path.join(gt_dir, img_type)\n\t\t\t\t\timg_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]\n\t\t\t\t\tgt_fpath_list = [os.path.join(gt_type_dir, img_fname + '_mask.png')\n\t\t\t\t\t\t\t\t\tfor img_fname in img_fname_list]\n\t\t\t\t\tmask.extend(gt_fpath_list)\n\t\t\t\t \n\t\tassert len(x) == len(y), 'number of x and y should be same'\n\t\n\t\treturn list(x), list(y), list(mask)\n"
] | [
[
"torch.zeros",
"numpy.concatenate",
"numpy.array",
"torch.std",
"torch.tensor",
"torch.load",
"torch.mean"
]
] |
simonsmh/yolact | [
"e1726ea18eb5b64d98ab91a72ec07b29c8c38650"
] | [
"layers/box_utils.py"
] | [
"# -*- coding: utf-8 -*-\nimport torch\nfrom ..utils import timer\n\nfrom ..data import cfg\n\[email protected]\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\[email protected]\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2] ), 1) # w, h\n\[email protected]\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [n,A,4].\n box_b: (tensor) bounding boxes, Shape: [n,B,4].\n Return:\n (tensor) intersection area, Shape: [n,A,B].\n \"\"\"\n n = box_a.size(0)\n A = box_a.size(1)\n B = box_b.size(1)\n max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))\n min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))\n return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter\n\n\ndef jaccard(box_a, box_b, iscrowd:bool=False):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n use_batch = True\n if box_a.dim() == 2:\n use_batch = False\n box_a = box_a[None, ...]\n box_b = box_b[None, ...]\n\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *\n (box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]\n area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *\n (box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n\n out = inter / area_a if iscrowd else inter / union\n return out if use_batch else out.squeeze(0)\n\ndef elemwise_box_iou(box_a, box_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the inner dimension. \"\"\"\n max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])\n min_xy = torch.max(box_a[:, :2], box_b[:, :2])\n inter = torch.clamp((max_xy - min_xy), min=0)\n inter = inter[:, 0] * inter[:, 1]\n\n area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])\n area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])\n\n union = area_a + area_b - inter\n union = torch.clamp(union, min=0.1)\n\n # Return value is [n] for inputs [n, 4]\n return torch.clamp(inter / union, max=1)\n\ndef mask_iou(masks_a, masks_b, iscrowd=False):\n \"\"\"\n Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].\n The output is of size [a, b].\n\n Wait I thought this was \"box_utils\", why am I putting this in here?\n \"\"\"\n\n masks_a = masks_a.view(masks_a.size(0), -1)\n masks_b = masks_b.view(masks_b.size(0), -1)\n\n intersection = masks_a @ masks_b.t()\n area_a = masks_a.sum(dim=1).unsqueeze(1)\n area_b = masks_b.sum(dim=1).unsqueeze(0)\n\n return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a\n\ndef elemwise_mask_iou(masks_a, masks_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the outer dimension. \"\"\"\n masks_a = masks_a.view(-1, masks_a.size(-1))\n masks_b = masks_b.view(-1, masks_b.size(-1))\n\n intersection = (masks_a * masks_b).sum(dim=0)\n area_a = masks_a.sum(dim=0)\n area_b = masks_b.sum(dim=0)\n\n # Return value is [n] for inputs [h, w, n]\n return torch.clamp(intersection / torch.clamp(area_a + area_b - intersection, min=0.1), max=1)\n\n\n\ndef change(gt, priors):\n \"\"\"\n Compute the d_change metric proposed in Box2Pix:\n https://lmb.informatik.uni-freiburg.de/Publications/2018/UB18/paper-box2pix.pdf\n \n Input should be in point form (xmin, ymin, xmax, ymax).\n\n Output is of shape [num_gt, num_priors]\n Note this returns -change so it can be a drop in replacement for \n \"\"\"\n num_priors = priors.size(0)\n num_gt = gt.size(0)\n\n gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors)\n gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors)\n\n gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4)\n pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4)\n\n diff = gt_mat - pr_mat\n diff[:, :, 0] /= gt_w\n diff[:, :, 2] /= gt_w\n diff[:, :, 1] /= gt_h\n diff[:, :, 3] /= gt_h\n\n return -torch.sqrt( (diff ** 2).sum(dim=2) )\n\n\n\n\ndef match(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx, loc_data):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n pos_thresh: (float) IoU > pos_thresh ==> positive.\n neg_thresh: (float) IoU < neg_thresh ==> negative.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n crowd_boxes: (tensor) All the crowd box annotations or None if there are none.\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. Note: -1 means neutral.\n idx_t: (tensor) Tensor to be filled w/ the index of the matched gt box for each prior.\n idx: (int) current batch index.\n loc_data: (tensor) The predicted bbox regression coordinates for this batch.\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n decoded_priors = decode(loc_data, priors, cfg.use_yolo_regressors) if cfg.use_prediction_matching else point_form(priors)\n \n # Size [num_objects, num_priors]\n overlaps = jaccard(truths, decoded_priors) if not cfg.use_change_matching else change(truths, decoded_priors)\n\n # Size [num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n\n # We want to ensure that each gt gets used at least once so that we don't\n # waste any training data. In order to do that, find the max overlap anchor\n # with each gt, and force that anchor to use that gt.\n for _ in range(overlaps.size(0)):\n # Find j, the gt with the highest overlap with a prior\n # In effect, this will loop through overlaps.size(0) in a \"smart\" order,\n # always choosing the highest overlap first.\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n j = best_prior_overlap.max(0)[1]\n\n # Find i, the highest overlap anchor with this gt\n i = best_prior_idx[j]\n\n # Set all other overlaps with i to be -1 so that no other gt uses it\n overlaps[:, i] = -1\n # Set all other overlaps with j to be -1 so that this loop never uses j again\n overlaps[j, :] = -1\n\n # Overwrite i's score to be 2 so it doesn't get thresholded ever\n best_truth_overlap[i] = 2\n # Set the gt to be used for i to be j, overwriting whatever was there\n best_truth_idx[i] = j\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n\n conf[best_truth_overlap < pos_thresh] = -1 # label as neutral\n conf[best_truth_overlap < neg_thresh] = 0 # label as background\n\n # Deal with crowd annotations for COCO\n if crowd_boxes is not None and cfg.crowd_iou_threshold < 1:\n # Size [num_priors, num_crowds]\n crowd_overlaps = jaccard(decoded_priors, crowd_boxes, iscrowd=True)\n # Size [num_priors]\n best_crowd_overlap, best_crowd_idx = crowd_overlaps.max(1)\n # Set non-positives with crowd iou of over the threshold to be neutral.\n conf[(conf <= 0) & (best_crowd_overlap > cfg.crowd_iou_threshold)] = -1\n\n loc = encode(matches, priors, cfg.use_yolo_regressors)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n idx_t[idx] = best_truth_idx # [num_priors] indices for lookup\n\[email protected]\ndef encode(matched, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Encode bboxes matched with each prior into the format\n produced by the network. See decode for more details on\n this format. Note that encode(decode(x, p), p) = x.\n \n Args:\n - matched: A tensor of bboxes in point form with shape [num_priors, 4]\n - priors: The tensor of all priors with shape [num_priors, 4]\n Return: A tensor with encoded relative coordinates in the format\n outputted by the network (see decode). Size: [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Exactly the reverse of what we did in decode\n # In fact encode(decode(x, p), p) should be x\n boxes = center_size(matched)\n\n loc = torch.cat((\n boxes[:, :2] - priors[:, :2],\n torch.log(boxes[:, 2:] / priors[:, 2:])\n ), 1)\n else:\n variances = [0.1, 0.2]\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n loc = torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n \n return loc\n\[email protected]\ndef decode(loc, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Decode predicted bbox coordinates using the same scheme\n employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf\n\n b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x\n b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y\n b_w = prior_w * exp(loc_w)\n b_h = prior_h * exp(loc_h)\n \n Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]\n while priors are inputed as [x, y, w, h] where each coordinate\n is relative to size of the image (even sigmoid(x)). We do this\n in the network by dividing by the 'cell size', which is just\n the size of the convouts.\n \n Also note that prior_x and prior_y are center coordinates which\n is why we have to subtract .5 from sigmoid(pred_x and pred_y).\n \n Args:\n - loc: The predicted bounding boxes of size [num_priors, 4]\n - priors: The priorbox coords with size [num_priors, 4]\n \n Returns: A tensor of decoded relative coordinates in point form \n form with size [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Decoded boxes in center-size notation\n boxes = torch.cat((\n loc[:, :2] + priors[:, :2],\n priors[:, 2:] * torch.exp(loc[:, 2:])\n ), 1)\n\n boxes = point_form(boxes)\n else:\n variances = [0.1, 0.2]\n \n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n \n return boxes\n\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\[email protected]\ndef sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):\n \"\"\"\n Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size.\n Also converts from relative to absolute coordinates and casts the results to long tensors.\n\n If cast is false, the result won't be cast to longs.\n Warning: this does things in-place behind the scenes so copy if necessary.\n \"\"\"\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n x1 = torch.min(_x1, _x2)\n x2 = torch.max(_x1, _x2)\n x1 = torch.clamp(x1-padding, min=0)\n x2 = torch.clamp(x2+padding, max=img_size)\n\n return x1, x2\n\n\[email protected]\ndef crop(masks, boxes, padding:int=1):\n \"\"\"\n \"Crop\" predicted masks by zeroing out everything not in the predicted bbox.\n Vectorized by Chong (thanks Chong).\n\n Args:\n - masks should be a size [h, w, n] tensor of masks\n - boxes should be a size [n, 4] tensor of bbox coords in relative point form\n \"\"\"\n h, w, n = masks.size()\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)\n\n rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n)\n cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n)\n \n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_up = cols >= y1.view(1, 1, -1)\n masks_down = cols < y2.view(1, 1, -1)\n \n crop_mask = masks_left * masks_right * masks_up * masks_down\n \n return masks * crop_mask.float()\n\n\ndef index2d(src, idx):\n \"\"\"\n Indexes a tensor by a 2d index.\n\n In effect, this does\n out[i, j] = src[i, idx[i, j]]\n \n Both src and idx should have the same size.\n \"\"\"\n\n offs = torch.arange(idx.size(0), device=idx.device)[:, None].expand_as(idx)\n idx = idx + offs * idx.size(1)\n\n return src.view(-1)[idx.view(-1)].view(idx.size())\n"
] | [
[
"torch.cat",
"torch.min",
"torch.arange",
"torch.max",
"torch.clamp",
"torch.log",
"torch.exp"
]
] |
andreamunafo/automatic_control | [
"dd1d89f732bfd8d95b0ebef6fe99df29b18a1fc2"
] | [
"classical_control_theory/intro_to_control_theory.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: 02_Intro_to_control_theory.ipynb (unless otherwise specified).\n\n__all__ = ['Car', 'LinearCar', 'step', 'delta', 'ramp_as_impulses']\n\n# Cell\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Cell\nclass Car:\n _g = 9.8 # Gravity\n\n def __init__(self, x0, params):\n self._x_1 = x0[0] # position (along the road)\n self._x_2 = x0[1] # velocity (along the road)\n self._m, self._alpha, self._beta, self._gamma = params\n\n def step(self, dt, u, theta):\n self.theta = theta\n self._x_1 = self._x_1 + dt*self._x_2\n self._x_2 = self._x_2 + dt*(-self._alpha/self._m*abs(self._x_2)*self._x_2 - \\\n self._beta/self._m*self._x_2 + self._gamma/self._m*u - \\\n Car._g*np.sin(theta))\n\n def speedometer(self):\n v = self._x_2\n return (v,)\n\n # Utility function to simplify plotting\n def sensor_i(self):\n # Rotation matrix to get back to the main frame.\n R = np.array(((np.cos(theta), -np.sin(theta)), (np.sin(theta), np.cos(theta))))\n x_i, y_i = R.dot(np.array([[self._x_1],[0]]))\n v = self._x_2\n return (x_i, y_i, v)\n\n# Cell\nclass LinearCar:\n _g = 9.8\n\n def __init__(self, x0, params):\n self._x_1 = x0[0] # position (along the road)\n self._x_2 = x0[1] # velocity\n self._m, self._alpha, self._beta, self._gamma = params\n\n def step(self, dt, u, theta):\n self._theta = theta\n A = np.array([[0, 1], [0, -self._beta/self._m]])\n B = np.array([[0, 0], [self._gamma/self._m, -LinearCar._g]])\n\n x = np.array([[self._x_1],[self._x_2]])\n U = np.array([[u],[theta]])\n self._x_1 = (self._x_1 + dt*(A[0,np.newaxis,:].dot(x) + B[0,np.newaxis,:].dot(U))).item()\n self._x_2 = (self._x_2 + dt*(A[1,np.newaxis,:].dot(x) + B[1,np.newaxis,:].dot(U))).item()\n\n def speedometer(self):\n v = self._x_2\n return (v,)\n\n # Utility function to simplify plotting\n def sensor_i(self):\n # Rotation matrix to get back to the inertial frame..\n R = np.array(((np.cos(self._theta), -np.sin(self._theta)),\n (np.sin(self._theta), np.cos(self._theta))))\n x_i, y_i = R.dot(np.array([[self._x_1],[0]]))\n v = self._x_2\n return (x_i, y_i, v)\n\n# Cell\ndef step(t, step_time=0):\n \"\"\"Heaviside step function\"\"\"\n return 1 * (t >= step_time)\n\ndef delta(t, delta_t=0, eps=None): # Impulse\n if np.isscalar(t) and eps is None:\n raise Exception('eps must be defined for scalar values.')\n if eps is None and len(t) > 1:\n _eps=t[1]-t[0]\n else:\n _eps = eps\n return 1/_eps*(step(t, delta_t-_eps/2)-step(t, delta_t+_eps/2))\n\n# Cell\ndef ramp_as_impulses(t, time_vector):\n u = t*delta(time_vector, delta_t=t, eps=.01)\n return u"
] | [
[
"numpy.sin",
"numpy.array",
"numpy.isscalar",
"numpy.cos"
]
] |
dimahwang88/py-mcftracker | [
"b7e845efa3c0f560fe59f2d1c8765087774e78e5",
"b7e845efa3c0f560fe59f2d1c8765087774e78e5"
] | [
"bbox.py",
"pfe/player-feature-extractor/torchreid/models/inceptionresnetv2.py"
] | [
"import numpy as np\n\nclass Box(object):\n def __init__(self, tlbr, confidence, transform, imsize):\n self.tlbr = np.asarray(tlbr, dtype=np.float)\n self.confidence = float(confidence)\n self.transform = transform\n self.size = imsize\n\n def to_tlwh(self):\n tlbr = self.tlbr.copy()\n w = tlbr[2]-tlbr[0]\n h = tlbr[3]-tlbr[1]\n x, y = tlbr[0], tlbr[1]\n tlwh = [x,y,w,h]\n return tlwh\n\n def box2midpoint_normalised(self, box, iw, ih):\n w = box[2]-box[0]\n x, y = box[0] + w/2, box[3]\n return (x/iw, y/ih)\n\n def to_world(self):\n p = self.box2midpoint_normalised(self.tlbr, self.size[1], self.size[0])\n cx, cy = self.transform.video_to_ground(p[0], p[1])\n cx, cy = cx*self.transform.parameter.get(\"ground_width\"), cy*self.transform.parameter.get(\"ground_height\")\n return np.asarray([cx,cy], dtype=np.float)\n\n\n\n",
"\"\"\"\nCode imported from https://github.com/Cadene/pretrained-models.pytorch\n\"\"\"\nfrom __future__ import division, absolute_import\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n__all__ = ['inceptionresnetv2']\n\npretrained_settings = {\n 'inceptionresnetv2': {\n 'imagenet': {\n 'url':\n 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1000\n },\n 'imagenet+background': {\n 'url':\n 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1001\n }\n }\n}\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=False\n ) # verify bias false\n self.bn = nn.BatchNorm2d(\n out_planes,\n eps=0.001, # value found in tensorflow\n momentum=0.1, # default pytorch value\n affine=True\n )\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass Mixed_5b(nn.Module):\n\n def __init__(self):\n super(Mixed_5b, self).__init__()\n\n self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(192, 48, kernel_size=1, stride=1),\n BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(192, 64, kernel_size=1, stride=1),\n BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),\n BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),\n BasicConv2d(192, 64, kernel_size=1, stride=1)\n )\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block35(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block35, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),\n BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)\n )\n\n self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_6a(nn.Module):\n\n def __init__(self):\n super(Mixed_6a, self).__init__()\n\n self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n return out\n\n\nclass Block17(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block17, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 128, kernel_size=1, stride=1),\n BasicConv2d(\n 128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)\n ),\n BasicConv2d(\n 160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)\n )\n )\n\n self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_7a(nn.Module):\n\n def __init__(self):\n super(Mixed_7a, self).__init__()\n\n self.branch0 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),\n BasicConv2d(288, 320, kernel_size=3, stride=2)\n )\n\n self.branch3 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block8(nn.Module):\n\n def __init__(self, scale=1.0, noReLU=False):\n super(Block8, self).__init__()\n\n self.scale = scale\n self.noReLU = noReLU\n\n self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(2080, 192, kernel_size=1, stride=1),\n BasicConv2d(\n 192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)\n ),\n BasicConv2d(\n 224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)\n )\n )\n\n self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)\n if not self.noReLU:\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n if not self.noReLU:\n out = self.relu(out)\n return out\n\n\n# ----------------\n# Model Definition\n# ----------------\nclass InceptionResNetV2(nn.Module):\n \"\"\"Inception-ResNet-V2.\n\n Reference:\n Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual\n Connections on Learning. AAAI 2017.\n\n Public keys:\n - ``inceptionresnetv2``: Inception-ResNet-V2.\n \"\"\"\n\n def __init__(self, num_classes, loss='softmax', **kwargs):\n super(InceptionResNetV2, self).__init__()\n self.loss = loss\n\n # Modules\n self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)\n self.conv2d_2b = BasicConv2d(\n 32, 64, kernel_size=3, stride=1, padding=1\n )\n self.maxpool_3a = nn.MaxPool2d(3, stride=2)\n self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)\n self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)\n self.maxpool_5a = nn.MaxPool2d(3, stride=2)\n self.mixed_5b = Mixed_5b()\n self.repeat = nn.Sequential(\n Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),\n Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),\n Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),\n Block35(scale=0.17)\n )\n self.mixed_6a = Mixed_6a()\n self.repeat_1 = nn.Sequential(\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),\n Block17(scale=0.10), Block17(scale=0.10)\n )\n self.mixed_7a = Mixed_7a()\n self.repeat_2 = nn.Sequential(\n Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),\n Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),\n Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20)\n )\n\n self.block8 = Block8(noReLU=True)\n self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(1536, num_classes)\n\n def load_imagenet_weights(self):\n settings = pretrained_settings['inceptionresnetv2']['imagenet']\n pretrain_dict = model_zoo.load_url(settings['url'])\n model_dict = self.state_dict()\n pretrain_dict = {\n k: v\n for k, v in pretrain_dict.items()\n if k in model_dict and model_dict[k].size() == v.size()\n }\n model_dict.update(pretrain_dict)\n self.load_state_dict(model_dict)\n\n def featuremaps(self, x):\n x = self.conv2d_1a(x)\n x = self.conv2d_2a(x)\n x = self.conv2d_2b(x)\n x = self.maxpool_3a(x)\n x = self.conv2d_3b(x)\n x = self.conv2d_4a(x)\n x = self.maxpool_5a(x)\n x = self.mixed_5b(x)\n x = self.repeat(x)\n x = self.mixed_6a(x)\n x = self.repeat_1(x)\n x = self.mixed_7a(x)\n x = self.repeat_2(x)\n x = self.block8(x)\n x = self.conv2d_7b(x)\n return x\n\n def forward(self, x):\n f = self.featuremaps(x)\n v = self.global_avgpool(f)\n v = v.view(v.size(0), -1)\n\n if not self.training:\n return v\n\n y = self.classifier(v)\n\n if self.loss == 'softmax':\n return y\n elif self.loss == 'triplet':\n return y, v\n else:\n raise KeyError('Unsupported loss: {}'.format(self.loss))\n\n\ndef inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = InceptionResNetV2(num_classes=num_classes, loss=loss, **kwargs)\n if pretrained:\n model.load_imagenet_weights()\n return model\n"
] | [
[
"numpy.asarray"
],
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
zzzkk2009/anti-spoofing | [
"ac3992547c430619e236b338575109d7ecbba654"
] | [
"train-mtcnn-zq-mxnet/core/minibatch.py"
] | [
"import cv2\nimport threading\nfrom tools import image_processing\nimport numpy as np\nimport math\n\nclass MyThread(threading.Thread):\n def __init__(self, func, args=()):\n super(MyThread, self).__init__()\n self.func = func\n self.args = args\n def run(self):\n self.ims, self.labels, self.types, self.bboxes, self.landmarks = self.func(*self.args)\n def get_result(self):\n threading.Thread.join(self)\n try:\n return self.ims, self.labels, self.types, self.bboxes, self.landmarks\n except Exception:\n return None\n\ndef get_minibatch_thread(imdb, num_classes, im_size, with_type, with_cls, with_bbox, with_landmark):\n num_images = len(imdb)\n processed_ims = list()\n cls_label = list()\n type_label = list()\n bbox_reg_target = list()\n landmark_reg_target = list()\n #print(num_images)\n for i in range(num_images):\n filename = imdb[i]['image']\n #print(filename)\n im = cv2.imread(filename)\n h, w, c = im.shape\n if with_type:\n type = imdb[i]['type_label']\n type_label.append(type)\n if with_cls:\n cls = imdb[i]['label']\n cls_label.append(cls)\n if with_bbox:\n bbox_target = imdb[i]['bbox_target']\n bbox_reg_target.append(bbox_target)\n if with_landmark:\n landmark_target = imdb[i]['landmark_target']\n landmark_reg_target.append(landmark_target)\n\n assert h == w == im_size, \"image size wrong\"\n if imdb[i]['flipped']:\n im = im[:, ::-1, :]\n\n im_tensor = image_processing.transform(im,True)\n processed_ims.append(im_tensor)\n\n\n\n return processed_ims, cls_label, type_label, bbox_reg_target, landmark_reg_target\n\ndef get_minibatch(imdb, num_classes, im_size, with_type, with_cls, with_bbox, with_landmark, thread_num = 4):\n # im_size: 12, 24 or 48\n #flag = np.random.randint(3,size=1)\n num_images = len(imdb)\n thread_num = max(2,thread_num)\n num_per_thread = math.ceil(float(num_images)/thread_num)\n #print(num_per_thread)\n threads = []\n for t in range(thread_num):\n start_idx = int(num_per_thread*t)\n end_idx = int(min(num_per_thread*(t+1),num_images))\n cur_imdb = [imdb[i] for i in range(start_idx, end_idx)]\n cur_thread = MyThread(get_minibatch_thread,(cur_imdb,num_classes,im_size,with_type, with_cls, with_bbox, with_landmark))\n threads.append(cur_thread)\n for t in range(thread_num):\n threads[t].start()\n\n processed_ims = list()\n if with_type:\n type_label = list()\n if with_cls:\n cls_label = list()\n if with_bbox:\n bbox_reg_target = list()\n if with_landmark:\n landmark_reg_target = list()\n\n for t in range(thread_num):\n cur_process_ims, cur_cls_label, cur_type_label, cur_bbox_reg_target, cur_landmark_reg_target = threads[t].get_result()\n processed_ims = processed_ims + cur_process_ims\n if with_type:\n type_label = type_label + cur_type_label\n if with_cls:\n cls_label = cls_label + cur_cls_label\n if with_bbox:\n bbox_reg_target = bbox_reg_target + cur_bbox_reg_target\n if with_landmark:\n landmark_reg_target = landmark_reg_target + cur_landmark_reg_target \n \n im_array = np.vstack(processed_ims)\n if with_type:\n type_label_array = np.array(type_label)\n if with_cls:\n label_array = np.array(cls_label)\n if with_bbox:\n bbox_target_array = np.vstack(bbox_reg_target)\n if with_landmark:\n landmark_target_array = np.vstack(landmark_reg_target)\n \n data = {'data': im_array}\n label = {}\n if with_type:\n label['type_label'] = type_label_array\n if with_cls:\n label['label'] = label_array\n if with_bbox:\n label['bbox_target'] = bbox_target_array\n if with_landmark:\n label['landmark_target'] = landmark_target_array\n\n return data, label\n\ndef get_testbatch(imdb):\n assert len(imdb) == 1, \"Single batch only\"\n filename = imdb[0]['image']\n im = cv2.imread(filename)\n #print filename\n im_array = im\n data = {'data': im_array}\n label = {}\n return data, label\n"
] | [
[
"numpy.array",
"numpy.vstack"
]
] |
MQXB7/COMP0064 | [
"15b60d457ae9cda088000b65c78afe03ad5708fd"
] | [
"code/get_lat_long.py"
] | [
"import pandas as pd \nfrom pathlib import Path\n\nfolder = Path('{Path to directory}/Online-Ponzi-Schemes/data/getaddress/').rglob('*.csv')\nfiles = [x for x in folder]\n\nfor name in files:\n\tdf = pd.read_csv(name, usecols=[0,1], header=0)\n\tprint(name, \" \", df.head(1))"
] | [
[
"pandas.read_csv"
]
] |
bianzhenkun/IntelligentShip | [
"ea8a4c1cd0bed11be63d2d10bb7e4cb03e001ed3"
] | [
"vis_simulator/script/boat_simulator.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\n\nBoat simulator\n\nauthor SheffieldWang\n\n\"\"\"\n#import basic\nimport math\nimport numpy as np\nimport bisect\n\n#import ROS\nimport rospy\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseStamped\nfrom control.msg import Command\n\ndt = 0.1 # time tick[s]\nL = 0.5 # width length of the boat [m]\nmax_steer = np.deg2rad(90.0) # maximum steering angle[rad]\n\nclass State:\n\n def __init__(self, x=0.0, y=0.0, yaw=np.radians(90), v=0.0):\n self.x = x\n self.y = y\n self.yaw = yaw\n self.v = v\n\ndef normalize_angle(angle):\n \"\"\"\n Normalize an angle to [-pi, pi].\n \"\"\"\n while angle > np.pi:\n angle -= 2.0 * np.pi\n\n while angle < -np.pi:\n angle += 2.0 * np.pi\n\n return angle\n\ndef update(state, a, delta):\n\n if delta >= max_steer:\n delta = max_steer\n if delta <= - max_steer:\n delta = - max_steer\n\n state.x = state.x + state.v * math.cos(state.yaw) * dt\n state.y = state.y + state.v * math.sin(state.yaw) * dt\n state.yaw = state.yaw + state.v / L * math.tan(delta) * dt\n #state.yaw = np.clip(state.yaw,-np.radians(45),np.radians(45))\n print(\"yaw = \",180*state.yaw/3.14)\n \n state.v = 0.3\n\n return state\n\nclass BoatSimulator():\n def __init__(self):\n self.state_ = State(x=0.0, y=0.0, yaw=0.0, v=0.0)\n self.command_sub_ = rospy.Subscriber(\"control_command\",Command,self.callbackFromCommand)\n self.boat_path_pub_ = rospy.Publisher(\"boat_path\",Path,queue_size=10)\n self.pose_pub_ = rospy.Publisher(\"boat_pose\",PoseStamped,queue_size=10)\n self.a_ = 0.0\n self.dl_ = 0.0\n #Path\n self.pose_ = PoseStamped()\n self.path_ = Path()\n\n def run(self):\n print(\"yaw = \",180*self.state_.yaw/3.14)\n rospy.spin()\n\n def callbackFromCommand(self,command):\n self.a_ = command.a\n self.dl_ = command.steer\n self.state_ = update(self.state_,self.a_,self.dl_)\n #print(\"x = \",self.state_.x)\n #print(\"y = \",self.state_.y)\n self.pose_.pose.position.x = self.state_.x\n self.pose_.pose.position.y = self.state_.y\n self.pose_.pose.position.z = self.state_.v\n self.pose_.pose.orientation.z = self.state_.yaw\n \n pose_tmp = PoseStamped()\n pose_tmp.pose.position.x = self.state_.x\n pose_tmp.pose.position.y = self.state_.y\n self.path_.header.frame_id = \"slamware_map\"\n self.path_.header.stamp = rospy.Time.now()\n self.path_.poses.append(pose_tmp)\n \n self.pose_pub_.publish(self.pose_)\n self.boat_path_pub_.publish(self.path_)\n\n\n\n\nif __name__ == '__main__':\n print(\"Boat Simulator Start!\")\n rospy.init_node('boat_simulator', anonymous=True)\n bsn = BoatSimulator()\n bsn.run()\n"
] | [
[
"numpy.deg2rad",
"numpy.radians"
]
] |
Tom-Ryder/VIforSSMs | [
"eb96596c867afe79975e8e98a84cd159c32ca22d"
] | [
"lotka_volterra_partial.py"
] | [
"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\n# python data types\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom tensorflow.python.client import timeline\nfrom tensorflow.python.ops import clip_ops\nfrom optimisers.adamax import AdamaxOptimizer\n\n\nDTYPE = tf.float32\nNP_DTYPE = np.float32\n\ntfd = tf.contrib.distributions\ntfb = tfd.bijectors\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n\nclass init_dist(tfd.Normal):\n\n def __init__(self, loc, scale, batch_dims, target_dims):\n self.batch_dims = batch_dims\n self.target_dims = target_dims\n tfd.Normal.__init__(self, loc=loc, scale=scale)\n\n def slp(self, p):\n sample = self.sample(p)\n log_prob = tf.reduce_sum(self.log_prob(\n sample)[:, -self.batch_dims:], axis=1)\n return sample, log_prob\n\n\nclass Bivariate_Normal():\n '''\n bivariate batched normal dist\n '''\n\n def __init__(self, mu, chol):\n self.mu = tf.expand_dims(mu, 2)\n self.det = tf.reduce_prod(tf.matrix_diag_part(chol), axis=1) ** 2\n self.cov_inv = tf.matrix_inverse(chol @ tf.transpose(chol, [0, 2, 1]))\n\n def log_prob(self, x):\n x = tf.expand_dims(x, 2)\n log_prob = - (1 / 2) * tf.log(self.det) + tf.squeeze(-0.5 * tf.transpose((x - self.mu), [0, 2, 1]) @ self.cov_inv @ (x - self.mu)) - np.log(2 * np.pi)\n return log_prob\n\n\nclass IAF():\n\n \"\"\"\n single-stack local IAF with feature injection\n \"\"\"\n\n def __init__(self, network_dims, theta, ts_feats, feat_dims = 10):\n self.network_dims = network_dims\n self.num_layers = len(network_dims)\n self.theta = theta\n self.ts_feats = ts_feats\n self.feat_dims = feat_dims\n\n def _create_flow(self, base_dist, p, kernel_len, batch_dims, target_dims, first_flow = False):\n base_sample, self.base_logprob = base_dist.slp(p)\n\n feat_layers = [self.ts_feats[:, :-1, :]]\n for i in range(3):\n feat_layers.append(tf.layers.dense(\n inputs=feat_layers[-1], units=self.network_dims[0], activation=tf.nn.elu))\n feat_layers.append(tf.transpose(tf.layers.dense(\n inputs=feat_layers[-1], units=self.feat_dims, activation=tf.nn.elu), [0, 2, 1]))\n\n convnet_inp = tf.concat(\n [tf.expand_dims(base_sample[:, :-1], 2), feat_layers[-1]], axis=2)\n\n layer1A = tf.layers.conv1d(inputs=convnet_inp, filters=network_dims[0],\n kernel_size=kernel_len, strides=1, padding='valid', activation=None)\n layer1B1 = tf.layers.dense(\n inputs=self.theta, units=self.network_dims[0], activation=None)\n layer1B2 = tf.layers.dense(\n inputs=layer1B1, units=self.network_dims[0], activation=None)\n layer1B = tf.layers.dense(\n inputs=layer1B2, units=self.network_dims[0], activation=None)\n\n layer1C = layer1A + tf.expand_dims(layer1B, 1)\n layers = [tf.nn.elu(layer1C)]\n\n for i in range(1, self.num_layers - 1):\n layers.append(tf.layers.conv1d(\n inputs=layers[-1], filters=self.network_dims[i], kernel_size=1, strides=1, activation=tf.nn.elu))\n layers.append(tf.layers.batch_normalization(layers[-1]))\n layers.append(tf.layers.conv1d(inputs=layers[-1], filters=2, kernel_size=1, strides=2, activation=None))\n\n mu_temp, sigma_temp = tf.split(layers[-1], 2, axis=2)\n mu_aug = tf.reshape(tf.concat([tf.zeros(tf.shape(mu_temp)), mu_temp], axis = 2), [p, -1])\n sigma_aug = tf.reshape(tf.concat([tf.ones(tf.shape(sigma_temp)), tf.nn.softplus(sigma_temp) + 1e-10], axis = 2), [p, -1])\n\n self.sigma_log = tf.log(sigma_aug[:, -batch_dims:])\n self.output = base_sample[:, kernel_len:] * sigma_aug + mu_aug\n\n def slp(self, *args):\n logprob = self.base_logprob - tf.reduce_sum(self.sigma_log, axis=1)\n return self.output, logprob\n\n\nclass Flow_Stack():\n\n \"\"\"\n Create locally variant IAF stack\n \"\"\"\n\n def __init__(self, flows, kernel_len, batch_dims, target_dims):\n base_dims = kernel_len * no_flows + batch_dims + 2\n base_dist = init_dist(loc=[0.0] * base_dims, scale=[1e0] *\n base_dims, batch_dims=batch_dims, target_dims=target_dims)\n flows.insert(0, base_dist)\n\n for i in range(1, len(flows)):\n if i == 1:\n flows[i]._create_flow(\n flows[i - 1], p, kernel_len, batch_dims, target_dims, first_flow = True)\n else:\n flows[i]._create_flow(\n flows[i - 1], p, kernel_len, batch_dims, target_dims)\n\n self.output = flows[-1]\n\n def slp(self):\n return self.output.slp()\n\n\nclass Permute():\n '''\n class to permute IAF\n '''\n\n def __init__(self, permute_tensor):\n '''\n :params permute_index: permutations as list\n '''\n self.permute_tensor = permute_tensor\n\n def _create_flow(self, base_dist, *args):\n '''\n function to permute base dist order\n :params base_dist: base dist to permute\n '''\n\n sample, self.log_prob = base_dist.slp()\n shape = tf.shape(sample)\n self.sample = tf.scatter_nd(self.permute_tensor, sample, shape)\n\n def slp(self, *args):\n return self.sample, self.log_prob\n\n\nclass VI_SSM():\n\n def __init__(self, obs, obs_bin, time_till, x0, theta_dist, priors, dt, T, p, kernel_len, batch_dims, network_dims, target_dims, no_flows, feat_window, learn_rate = 1e-3, pre_train=True):\n # raw inputs -> class variables\n self.obs = obs\n self.obs_bin = obs_bin\n obs_flatten = np.reshape(obs, -1, 'F')\n self.flow_dims = 2\n self.theta_dist = theta_dist\n self.theta = theta_dist.sample(p)\n self.theta_log_prob = theta_dist.log_prob(self.theta)\n self.priors = priors\n self.dt = dt\n self.p = p\n self.kernel_len = kernel_len\n self.batch_dims = batch_dims\n self.network_dims = network_dims\n self.target_dims = target_dims\n self.no_flows = no_flows\n self.theta_eval = self._theta_strech()\n self.diffusivity = tf.placeholder(DTYPE, 1)\n self.pre_train = pre_train\n self.learn_rate = learn_rate\n self.kernel_ext = self.kernel_len * self.no_flows + \\\n self.flow_dims * self.batch_dims + 2\n\n # augementing raw inputs\n self.obs_pad_store = []\n for i in range(0, feat_window*5, 5):\n self.obs_pad_store.append(np.concatenate(\n (np.zeros(self.no_flows * self.kernel_len + self.flow_dims - i), obs_flatten, np.zeros(i)), axis=0))\n self.time_pad = np.concatenate((np.zeros(self.no_flows * self.kernel_len + self.flow_dims),\n np.repeat(np.arange(dt, T + dt, dt), self.flow_dims)), axis=0)\n time_till_pad = np.reshape(np.repeat(np.arange(np.round((self.no_flows * self.kernel_len + self.flow_dims) * (\n self.dt / self.flow_dims), 1), 0., -self.dt), self.flow_dims), (self.flow_dims, -1), 'F')\n self.time_till = np.reshape(np.concatenate(\n (time_till_pad, time_till), 1), -1, 'F')\n self.bin_feats = np.float32(np.concatenate(\n (np.zeros(self.no_flows * self.kernel_len + self.flow_dims), np.ones(self.target_dims * self.flow_dims)), axis=0))\n self.mask_vals = np.concatenate(\n (np.zeros((2, 1)), np.ones((self.flow_dims, self.target_dims))), axis=1)\n self.shift_vals = np.concatenate(\n (np.expand_dims(x0, 1), np.zeros((self.flow_dims, self.target_dims))), axis=1)\n\n # perm index\n perm_list = []\n for j in range(self.p):\n for i in range(1, self.kernel_ext - self.kernel_len, 2):\n perm_list.append([j, i])\n perm_list.append([j, i - 1])\n self.perm_index = tf.constant(np.reshape(\n np.array(perm_list), (self.p, -1, 2)), tf.int32)\n\n # model placeholders\n self.time_feats = tf.placeholder(\n shape=[self.p, self.kernel_len * self.no_flows + self.flow_dims * self.batch_dims + self.flow_dims, feat_window + 3], dtype=DTYPE)\n self.obs_eval = tf.transpose(tf.reshape(\n self.time_feats[:, -self.flow_dims * self.batch_dims:, 0], [p, -1, 2]), [0, 2, 1])\n self.mask = tf.placeholder(\n shape=[self.p, self.flow_dims, self.batch_dims + 1], dtype=DTYPE)\n self.shift = tf.placeholder(\n shape=[self.p, self.flow_dims, self.batch_dims + 1], dtype=DTYPE)\n self.bin_feed = tf.placeholder(\n shape=[self.p, self.flow_dims, self.batch_dims], dtype=DTYPE)\n\n def _theta_strech(self):\n slice_stash = []\n for i in range(len(self.priors)):\n slice_stash.append(tf.reshape(tf.tile(tf.expand_dims(\n tf.exp(self.theta[:, i]), 1), [1, self.batch_dims]), [-1]))\n return slice_stash\n\n def _ELBO(self):\n obs_log_prob = tf.reduce_sum(tf.reshape(tfd.Normal(loc=self.obs_eval, scale=1.).log_prob(self.lf_sample[:, :, 1:]) * self.bin_feed, [self.p, -1]), 1)\n\n flow_head = self.lf_sample[:, :, :-1]\n flow_tail = self.lf_sample[:, :, 1:]\n\n latent_flow_diff = flow_tail - flow_head\n latent_flow_diff_flat = tf.concat([tf.reshape(latent_flow_diff[:, 0, :], [-1, 1]),\n tf.reshape(latent_flow_diff[:, 1, :], [-1, 1])], 1)\n\n def alpha(x1, x2, theta):\n drift_vec = tf.concat([tf.reshape(theta[0] * x1 - theta[1] * x1 * x2, [-1, 1]),\n tf.reshape(theta[1] * x1 * x2 - theta[2] * x2, [-1, 1])], axis=1)\n return drift_vec\n\n def sqrt_beta(x1, x2, theta):\n a = tf.reshape(\n tf.sqrt(theta[0] * x1 + theta[1] * x1 * x2), [-1, 1, 1])\n b = tf.reshape(- theta[1] * x1 * x2, [-1, 1, 1]) / a\n c = tf.sqrt(tf.reshape(\n theta[1] * x1 * x2 + theta[2] * x2, [-1, 1, 1]) - b ** 2)\n zeros = tf.zeros(tf.shape(a))\n beta_chol = tf.concat(\n [tf.concat([a, zeros], 2), tf.concat([b, c], 2)], 1)\n return beta_chol\n\n SDE_log_prob = tf.reduce_sum(tf.reshape(Bivariate_Normal(mu=self.dt * alpha(tf.reshape(flow_head[:, 0, :], [-1]), tf.reshape(flow_head[:, 1, :], [-1]), self.theta_eval),\n chol=tf.sqrt(self.dt) * sqrt_beta(tf.reshape(flow_head[:, 0, :], [-1]), tf.reshape(flow_head[:, 1, :], [-1]), self.theta_eval)).log_prob(latent_flow_diff_flat), [self.p, -1]), 1)\n\n prior_mean = [item[0] for item in self.priors]\n prior_scale = [item[1] for item in self.priors]\n\n prior_log_prob = tfd.MultivariateNormalDiag(\n loc=prior_mean, scale_diag=prior_scale).log_prob(self.theta)\n\n ELBO = (self.target_dims / self.batch_dims) * (SDE_log_prob -\n self.lf_log_prob + obs_log_prob) + prior_log_prob - self.theta_log_prob\n\n # ELBO = (self.target_dims / self.batch_dims) * (SDE_log_prob -\n # self.lf_log_prob) + prior_log_prob - self.theta_log_prob\n\n return ELBO, SDE_log_prob, obs_log_prob, prior_log_prob\n\n def build_flow(self):\n flows = []\n for i in range(self.no_flows):\n flows.append(IAF(network_dims=self.network_dims, theta=self.theta,\n ts_feats=self.time_feats, feat_dims = self.kernel_ext - 1 - i * kernel_len))\n if i == 0:\n flows.append(Permute(permute_tensor=self.perm_index))\n else:\n flows.append(\n Permute(permute_tensor=self.perm_index[:, :-(i * self.kernel_len), :]))\n SSM = Flow_Stack(flows[:-1], self.kernel_len,\n self.batch_dims * self.flow_dims, self.target_dims)\n lf_sample_init, lf_log_prob_init = SSM.slp()\n lf_sample_neg = tf.transpose(tf.reshape(\n lf_sample_init, [p, -1, 2]), [0, 2, 1])\n self.lf_sample = tfb.Softplus(event_ndims=2).forward(\n lf_sample_neg) * self.mask + self.shift\n\n self.lf_log_prob = lf_log_prob_init + \\\n tfb.Softplus(event_ndims=2).inverse_log_det_jacobian(\n self.lf_sample[:, :, 1:])\n loss, self.sde_loss, self.obs_loss, prior_prob = self._ELBO()\n self.mean_loss = tf.reduce_mean(loss)\n\n self.t1 = AdamaxOptimizer(\n learning_rate=1e-3, beta1=0.9).minimize((self.lf_sample - 75) ** 2)\n\n self.t2 = AdamaxOptimizer(learning_rate=1e-3, beta1=0.9).minimize(\n (self.theta - tf.log(tf.tile([[.5, .0025, .3]], [self.p, 1]))) ** 2)\n\n # do something nicer with this!\n theta_pos_index = [True, True, True]\n with tf.name_scope('loss'):\n tf.summary.scalar('ELBO', self.mean_loss)\n tf.summary.scalar(\n 'SDE_log_prob', (self.target_dims / self.batch_dims) * tf.reduce_mean(self.sde_loss))\n tf.summary.scalar('theta_log_prob',\n tf.reduce_mean(self.theta_log_prob))\n tf.summary.scalar(\n 'obs_log_prob', (self.target_dims / self.batch_dims) * tf.reduce_mean(self.obs_loss))\n tf.summary.scalar(\n 'path_log_prob', (self.target_dims / self.batch_dims) * tf.reduce_mean(self.lf_log_prob))\n tf.summary.scalar('truth_log_prob', -self.theta_dist.log_prob(tf.log([[.5, .0025, .3]]))[0])\n # theta summaries\n\n for i in range(len(theta_pos_index)):\n if theta_pos_index[i]:\n tf.summary.histogram(str(i), tf.exp(\n self.theta[:, i]), family='parameters')\n else:\n tf.summary.histogram(\n str(i), self.theta[:, i], family='parameters')\n\n with tf.name_scope('optimize'):\n opt = AdamaxOptimizer(learning_rate=self.learn_rate, beta1=0.95)\n gradients, variables = zip(\n *opt.compute_gradients(-loss))\n global_norm = tf.global_norm(gradients)\n self.gradients, _ = tf.clip_by_global_norm(gradients, 1e9)\n self.train_step = opt.apply_gradients(\n zip(self.gradients, variables))\n tf.summary.scalar(\n 'global_norm', global_norm)\n\n self.merged = tf.summary.merge_all()\n self.loss = loss\n\n def train(self, tensorboard_path, save_path):\n writer = tf.summary.FileWriter(\n '%s/%s' % (tensorboard_path, datetime.now().strftime(\"%d:%m:%y-%H:%M:%S\")), sess.graph)\n\n min_glob_loss = (1e99, -1)\n run = 0\n converged = False\n\n if self.batch_dims * self.p >= self.target_dims:\n replace_bool = True\n else:\n replace_bool = False\n\n pre_train_count = 0\n\n while not converged:\n batch_select = np.random.choice(\n np.arange(0, self.target_dims, self.batch_dims), size=self.p, replace=replace_bool)\n\n # batch_select = np.random.choice(\n # np.arange(0, self.target_dims-self.batch_dims+1), size=self.p, replace=replace_bool)\n\n obs_pad_feats = []\n for item in self.obs_pad_store:\n obs_pad_feats.append(np.concatenate([np.reshape(\n item[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0))\n feat1 = np.concatenate(obs_pad_feats, axis=2)\n feat2 = np.concatenate([np.reshape(\n self.bin_feats[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n feat3 = np.concatenate([np.reshape(\n self.time_pad[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n feat4 = np.concatenate([np.reshape(\n self.time_till[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n\n time_feats_feed = np.concatenate(\n [feat1, feat2, feat3, feat4], axis=2)\n\n mask_feed = np.concatenate([np.expand_dims(self.mask_vals[:, index:(\n index + self.batch_dims + 1)], 0) for index in batch_select], axis=0)\n shift_feed = np.concatenate([np.expand_dims(self.shift_vals[:, index:(\n index + self.batch_dims + 1)], 0) for index in batch_select], axis=0)\n bin_feed = np.concatenate([np.expand_dims(\n self.obs_bin[:, index:index + self.batch_dims], 0) for index in batch_select], 0)\n\n if self.pre_train:\n if run == 0:\n print(\"Initialising paths and parameters...\")\n _, test, lf_sample = sess.run([self.t1, self.lf_log_prob, self.lf_sample], feed_dict={\n self.time_feats: time_feats_feed, self.mask: mask_feed, self.shift: shift_feed, self.bin_feed: bin_feed, self.diffusivity: [0.0]})\n if np.sum(np.isinf(test)) == 0:\n pre_train_count += 1\n else:\n pre_train_count = 0\n if pre_train_count == 1000:\n self.pre_train = False\n print(\"Finished pre-training...\")\n run = 0\n\n else:\n _, summary, batch_loss = sess.run([self.train_step, self.merged, self.mean_loss], feed_dict={self.time_feats: time_feats_feed,\n self.mask: mask_feed, self.shift: shift_feed, self.bin_feed: bin_feed, self.diffusivity: [0.0]})\n writer.add_summary(summary, run)\n\n if run % 1000 == 0:\n self.save(save_path)\n\n run += 1\n\n def save(self, PATH):\n saver = tf.train.Saver()\n saver.save(sess, PATH)\n print(\"Model saved\")\n\n def load(self, PATH):\n self.pre_train = False\n saver = tf.train.Saver()\n saver.restore(sess, PATH)\n print(\"Model restored\")\n\n def save_paths(self, PATH_obs):\n\n path_store = []\n\n for index_temp in np.arange(0, self.target_dims, self.batch_dims):\n\n batch_select = np.tile(index_temp, self.p)\n\n obs_pad_feats = []\n for item in self.obs_pad_store:\n obs_pad_feats.append(np.concatenate([np.reshape(\n item[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0))\n feat1 = np.concatenate(obs_pad_feats, axis=2)\n feat2 = np.concatenate([np.reshape(\n self.bin_feats[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n feat3 = np.concatenate([np.reshape(\n self.time_pad[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n feat4 = np.concatenate([np.reshape(\n self.time_till[index: index + self.kernel_ext], [1, -1, 1]) for index in self.flow_dims * batch_select], axis=0)\n\n time_feats_feed = np.concatenate(\n [feat1, feat2, feat3, feat4], axis=2)\n\n mask_feed = np.concatenate([np.expand_dims(self.mask_vals[:, index:(\n index + self.batch_dims + 1)], 0) for index in batch_select], axis=0)\n shift_feed = np.concatenate([np.expand_dims(self.shift_vals[:, index:(\n index + self.batch_dims + 1)], 0) for index in batch_select], axis=0)\n bin_feed = np.concatenate([np.expand_dims(\n self.obs_bin[:, index:index + self.batch_dims], 0) for index in batch_select], 0)\n\n path_out = sess.run(self.lf_sample, feed_dict={\n self.time_feats: time_feats_feed, self.mask: mask_feed, self.shift: shift_feed, self.bin_feed: bin_feed, self.diffusivity: [0.0]})\n\n path_store.append(path_out[:, :, 1:])\n\n paths = np.concatenate(path_store, axis=2)\n\n f = open(PATH_obs, 'w')\n np.savetxt(f, np.reshape(paths, (self.p, -1)))\n f.close()\n\n\n########### setting up the model ###########\n# hyperparams\np = 50\nkernel_len = 20\ndt = 0.1\nT = 50.\ntarget_dims = np.int32(T / dt)\nbatch_dims = 50\nnetwork_dims = [50] * 5\nno_flows = 3\n# priors = [(0., 3.0), (0.0, 3.0), (0.0, 3.0)]\npriors = [(np.log(4.428 / 10), 1e-4), (np.log(0.029 / 10), 1e-4), (np.log(2.957 / 10), 1e-4)]\nfeat_window = 10\n\n# obs and theta\nx0 = np.array([100., 100.])\nf1 = open('dat/LV_obs_partial.txt', 'r')\nf2 = open('dat/LV_obs_binary.txt', 'r')\nf3 = open('dat/LV_time_till.txt', 'r')\n# f1 = open(\"/Users/localadmin/Documents/PhD/autoregressive_flows/locally_variant/LV_obs_partial.txt\", 'r')\n# f2 = open(\"/Users/localadmin/Documents/PhD/autoregressive_flows/locally_variant/LV_obs_binary.txt\", 'r')\nobs = np.loadtxt(f1, NP_DTYPE)\nobs_bin = np.loadtxt(f2, NP_DTYPE)\ntime_till = np.loadtxt(f3, NP_DTYPE)\nf1.close()\nf2.close()\nf3.close()\n\n# theta dist\nbijectors = []\nnum_bijectors = 4\nfor i in range(num_bijectors):\n bijectors.append(tfb.Invert(tfb.MaskedAutoregressiveFlow(\n shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(\n hidden_layers=[5, 5, 5], activation=tf.nn.elu))))\n if i < (num_bijectors - 1):\n bijectors.append(tfb.Permute(\n permutation=np.random.permutation(np.arange(0, len(priors)))))\nflow_bijector = tfb.Chain(list(reversed(bijectors)))\n\ntheta_dist = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=flow_bijector,\n event_shape=[len(priors)])\n\n# theta_dist = tfd.MultivariateNormalDiag(loc = [tf.Variable(0.05), tf.Variable(.05), tf.Variable(0.05)], scale_diag= [tf.Variable(1.), tf.Variable(1.), tf.Variable(1.)])\n\n# buiding the model\nvar_model = VI_SSM(obs, obs_bin, time_till, x0, theta_dist, priors, dt, T, p,\n kernel_len, batch_dims, network_dims, target_dims, no_flows, feat_window, learn_rate = 1e-3, pre_train=True)\nvar_model.build_flow()\n\n# new session\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\n# training!\n# var_model.load('model_saves/LV_model_%i_3.ckpt' % batch_dims)\n\n# var_model.ELBO_estimate(1e5, 'locally_variant/ELBO_%i' % batch_dims)\n\nvar_model.save_paths('locally_variant/LV_obs_paths.txt')\n# np.savetxt('/home/b2028663/scripts/arf/locally_variant/local_post.txt', sess.run(theta_dist.sample([100000])))\n\nvar_model.train(tensorboard_path='locally_variant/train/',\n save_path='model_saves/LV_model_%i_3.ckpt' % batch_dims)\n\n\n# var_model.save_paths('/home/b2028663/scripts/arf/locally_variant/obs_paths.txt')\n"
] | [
[
"tensorflow.exp",
"numpy.tile",
"tensorflow.reshape",
"tensorflow.scatter_nd",
"tensorflow.sqrt",
"tensorflow.tile",
"tensorflow.global_variables_initializer",
"tensorflow.InteractiveSession",
"tensorflow.set_random_seed",
"numpy.concatenate",
"tensorflow.shape",
"tensorflow.matrix_diag_part",
"tensorflow.concat",
"numpy.log",
"tensorflow.train.Saver",
"tensorflow.layers.conv1d",
"tensorflow.transpose",
"tensorflow.layers.batch_normalization",
"numpy.arange",
"tensorflow.nn.softplus",
"tensorflow.layers.dense",
"tensorflow.split",
"numpy.int32",
"numpy.expand_dims",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.summary.scalar",
"numpy.round",
"tensorflow.log",
"numpy.loadtxt",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.name_scope",
"tensorflow.global_norm",
"tensorflow.summary.merge_all",
"tensorflow.clip_by_global_norm",
"numpy.isinf",
"numpy.random.seed",
"numpy.ones",
"tensorflow.nn.elu",
"tensorflow.reduce_mean"
]
] |
PascalIversen/gluon-ts | [
"60f7d39a965d77d583883d3ddde75d6510c06737"
] | [
"test/trainer/test_model_averaging.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Third-party imports\nimport mxnet as mx\nimport numpy as np\nimport pytest\n\n# First-party imports\nfrom gluonts.mx.trainer.model_averaging import (\n SelectNBestMean,\n SelectNBestSoftmax,\n)\n\n\[email protected](\"strategy\", [SelectNBestMean, SelectNBestSoftmax])\[email protected](\"num_models\", [1, 2])\ndef test_model_averaging(strategy, num_models):\n total_models = 2\n\n # model 1\n param_1 = {\n \"arg1\": mx.nd.array([[1, 2, 3], [1, 2, 3]]),\n \"arg2\": mx.nd.array([[1, 2], [1, 2]]),\n }\n loss_1 = 1\n\n # model 2\n param_2 = {\n \"arg1\": mx.nd.array([[1, 1, 1], [1, 1, 1]]),\n \"arg2\": mx.nd.array([[1, 1], [1, 1]]),\n }\n loss_2 = 1.5\n assert (\n loss_1 < loss_2\n ) # to keep it simple we assume that the first model has lower loss than the second\n\n # combine models\n all_arg_params = [param_1, param_2]\n dummy_checkpoints = [\n {\"params_path\": \"dummy_path\", \"epoch_no\": 0, \"score\": loss_1,},\n {\"params_path\": \"dummy_path\", \"epoch_no\": 0, \"score\": loss_2,},\n ]\n\n # compute expected weights\n avg = strategy(num_models=num_models)\n _, weights = avg.select_checkpoints(dummy_checkpoints)\n assert len(weights) == num_models\n\n if isinstance(avg, SelectNBestMean):\n exp_weights = [1 / num_models for _ in range(num_models)]\n assert weights == exp_weights\n elif isinstance(avg, SelectNBestSoftmax):\n losses = [c[\"score\"] for c in dummy_checkpoints]\n losses = sorted(losses)[:num_models]\n exp_weights = [np.exp(-l) for l in losses]\n exp_weights = [x / sum(exp_weights) for x in exp_weights]\n assert weights == exp_weights\n\n # compute expected output\n weights = weights + [0] * (\n total_models - num_models\n ) # include 0 weights for the models that are not averaged\n exp_output = {\n \"arg1\": weights[0] * mx.nd.array([[1, 2, 3], [1, 2, 3]])\n + weights[1] * mx.nd.array([[1, 1, 1], [1, 1, 1]]),\n \"arg2\": weights[0] * mx.nd.array([[1, 2], [1, 2]])\n + weights[1] * mx.nd.array([[1, 1], [1, 1]]),\n }\n\n avg_params = {}\n for k in all_arg_params[0]:\n arrays = [p[k] for p in all_arg_params]\n avg_params[k] = avg.average_arrays(arrays, weights)\n\n for k in all_arg_params[0]:\n assert all_arg_params[0][k].shape == exp_output[k].shape\n assert mx.nd.sum(avg_params[k] - exp_output[k]) < 1e-20\n"
] | [
[
"numpy.exp"
]
] |
seankmartin/nengo | [
"de345f6d201ac5063fc4c5a7e56c0b16c26785c1"
] | [
"nengo/networks/tests/test_circularconv.py"
] | [
"import numpy as np\nimport pytest\n\nimport nengo\nfrom nengo.networks.circularconvolution import circconv, transform_in, transform_out\nfrom nengo.utils.numpy import rms\n\n\[email protected](\"invert_a\", [True, False])\[email protected](\"invert_b\", [True, False])\ndef test_circularconv_transforms(invert_a, invert_b, rng, allclose):\n \"\"\"Test the circular convolution transforms\"\"\"\n dims = 100\n x = rng.randn(dims)\n y = rng.randn(dims)\n z0 = circconv(x, y, invert_a=invert_a, invert_b=invert_b)\n\n tr_a = transform_in(dims, \"A\", invert_a)\n tr_b = transform_in(dims, \"B\", invert_b)\n tr_out = transform_out(dims)\n XY = np.dot(tr_a, x) * np.dot(tr_b, y)\n z1 = np.dot(tr_out, XY)\n\n assert allclose(z0, z1)\n\n\ndef test_input_magnitude(Simulator, seed, rng, dims=16, magnitude=10):\n \"\"\"Test to make sure the magnitude scaling works.\n\n Builds two different CircularConvolution networks, one with the correct\n magnitude and one with 1.0 as the input_magnitude.\n \"\"\"\n neurons_per_product = 128\n\n a = rng.normal(scale=np.sqrt(1.0 / dims), size=dims) * magnitude\n b = rng.normal(scale=np.sqrt(1.0 / dims), size=dims) * magnitude\n result = circconv(a, b)\n\n model = nengo.Network(label=\"circular conv\", seed=seed)\n model.config[nengo.Ensemble].neuron_type = nengo.LIFRate()\n with model:\n input_a = nengo.Node(a)\n input_b = nengo.Node(b)\n cconv = nengo.networks.CircularConvolution(\n neurons_per_product, dimensions=dims, input_magnitude=magnitude\n )\n nengo.Connection(input_a, cconv.input_a, synapse=None)\n nengo.Connection(input_b, cconv.input_b, synapse=None)\n res_p = nengo.Probe(cconv.output)\n cconv_bad = nengo.networks.CircularConvolution(\n neurons_per_product, dimensions=dims, input_magnitude=1\n ) # incorrect magnitude\n nengo.Connection(input_a, cconv_bad.input_a, synapse=None)\n nengo.Connection(input_b, cconv_bad.input_b, synapse=None)\n res_p_bad = nengo.Probe(cconv_bad.output)\n with Simulator(model) as sim:\n sim.run(0.01)\n\n error = rms(result - sim.data[res_p][-1]) / (magnitude ** 2)\n error_bad = rms(result - sim.data[res_p_bad][-1]) / (magnitude ** 2)\n\n assert error < 0.1\n assert error_bad > 0.1\n\n\[email protected](\"dims\", [16, 32])\ndef test_neural_accuracy(Simulator, seed, rng, dims, neurons_per_product=128):\n a = rng.normal(scale=np.sqrt(1.0 / dims), size=dims)\n b = rng.normal(scale=np.sqrt(1.0 / dims), size=dims)\n result = circconv(a, b)\n\n model = nengo.Network(label=\"circular conv\", seed=seed)\n model.config[nengo.Ensemble].neuron_type = nengo.LIFRate()\n with model:\n input_a = nengo.Node(a)\n input_b = nengo.Node(b)\n cconv = nengo.networks.CircularConvolution(neurons_per_product, dimensions=dims)\n nengo.Connection(input_a, cconv.input_a, synapse=None)\n nengo.Connection(input_b, cconv.input_b, synapse=None)\n res_p = nengo.Probe(cconv.output)\n with Simulator(model) as sim:\n sim.run(0.01)\n\n error = rms(result - sim.data[res_p][-1])\n\n assert error < 0.1\n\n\ndef test_old_input_deprecation_warning():\n with nengo.Network():\n c = nengo.networks.CircularConvolution(n_neurons=10, dimensions=1)\n with pytest.warns(DeprecationWarning):\n assert c.A is c.input_a\n with pytest.warns(DeprecationWarning):\n assert c.B is c.input_b\n"
] | [
[
"numpy.dot",
"numpy.sqrt"
]
] |
noetits/ophelia | [
"49f4b1495bbe6c768806cf3f1b0415f73e06008c"
] | [
"perceptual_experiment/compute_score.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# df.iloc[:,-8:]\n\n\ndef idx_to_case_xy(idx):\n x=((idx/100).astype(int)/100*5).astype(int)\n y=((idx-100*(idx/100).astype(int))/100*5).astype(int)\n\n return x,y\n\n\n# df['Answer.selected_idx']\n# df['Input.ref_idx']\n\n\nX=np.load('X.npy')\n\nw=(X[:,0].max()-X[:,0].min())\nh=(X[:,1].max()-X[:,1].min())\n\ndef compute_scores(df):\n a_xs,a_ys=idx_to_case_xy(df['Answer.selected_idx'])\n i_xs,i_ys=idx_to_case_xy(df['Input.ref_idx'])\n dx=a_xs-i_xs\n dy=(a_ys-i_ys)*h/w\n scores=np.sqrt(dx**2+dy**2)\n return scores\n\ndef compute_score(df):\n return compute_scores(df).mean()\n\n\n\n# worst baseline\nri_xs=np.random.randint(5, size=100000)\nri_ys=np.random.randint(5, size=100000)\n\n# these should be equal with n>>>\nx_max_delta=np.max([4-ri_xs, ri_xs], axis=0)\ny_max_delta=np.max([4-ri_ys, ri_ys], axis=0)*h/w\n\nmax_score=np.sqrt(x_max_delta**2+y_max_delta**2).mean()\nprint('Worst baseline:', max_score)\n\n\n# //!!\\\\ for these I should also do the *h/w if I want to use them\n\n# other method for computing max_score: instead of random and max deltas, just each case and max deltas\nfrom itertools import product\nn_cases=5\na=np.arange(n_cases)\ncases=np.array(list(product(a, a*h/w)))\nmax_delta=np.max([[4,4*h/w]-cases, cases], axis=0)\n# max_score=np.sqrt(x_max_delta**2+y_max_delta**2).mean()\nmax_score=np.sqrt((max_delta**2).sum(axis=1)).mean()\n\n# baseline all in center\ncenter_score=np.sqrt(((cases-np.array([2,2*h/w]))**2).sum(axis=1)).mean()\n# equal : look at the 25 possible distances in the square from the center\ndef dist(a,b):\n return np.sqrt(a**2+b**2)\ny=h/w\n(1+2+2*dist(1,y)+2*dist(2,y)+2*dist(2,2*y)+2*dist(1, 2*y)+y+2*y)*2/25\n\n# random baseline\nri_xs=np.random.randint(5, size=100000)\nri_ys=np.random.randint(5, size=100000)\nra_xs=np.random.randint(5, size=100000)\nra_ys=np.random.randint(5, size=100000)\nr_dx=ra_xs-ri_xs\nr_dy=(ra_ys-ri_ys)*h/w\nr_scores=np.sqrt(r_dx**2+r_dy**2)\nr_score=r_scores.mean()\n\nfrom scipy.stats import t\n\nn=len(r_scores)\nts=t.ppf(0.975, n)\n\nCI_r=r_scores.std()*ts/np.sqrt(n)\n\nprint('Random baseline:', r_score, ' +-', CI_r)\n# print('center baseline:', center_score)\n\ndef print_scores_from_csv(df):\n score=compute_score(df)\n print('\\n\\n')\n print('Overall :', score)\n print('\\n')\n for usr in pd.unique(df['Turkle.Username']):\n df_usr=df[df['Turkle.Username']==usr]\n print(usr, ': ', compute_score(df_usr))\n\n# df=pd.read_csv('batch_25_same_sent-Batch_17_results_noe.csv')\n# score=compute_score(df)\n# print('noe:', score)\n\ncsv_path='batch_same_text_many-Batch_19_results.csv'\ndf=pd.read_csv(csv_path)\n# print_scores_from_csv(df)\n\ncsv_path='batch_diff_text_many-Batch_20_results.csv'\ndf=pd.read_csv(csv_path)\n# print_scores_from_csv(df)\n\ncsv_path='batch_isialab_same_text.csv'\ndf=pd.read_csv(csv_path)\n# print_scores_from_csv(df)\n\ncsv_path='batch_victor_same.csv'\ndf=pd.read_csv(csv_path)\n# print_scores_from_csv(df)\n\ncsv_path='batch_isialab_diff_text.csv'\ndf=pd.read_csv(csv_path)\n# print_scores_from_csv(df)\n\n\npaths_same_text=['batch_same_text_many-Batch_19_results.csv',\n'batch_isialab_same_text.csv',\n'batch_victor_same.csv']\n\npaths_diff_text=['batch_diff_text_many-Batch_20_results.csv',\n'batch_isialab_diff_text.csv']\n\n\n\ndfs_same=[pd.read_csv(csv_path) for csv_path in paths_same_text]\ndfs_diff=[pd.read_csv(csv_path) for csv_path in paths_diff_text]\n\ndf_same=pd.concat(dfs_same)\ndf_diff=pd.concat(dfs_diff)\n\n# print_scores_from_csv(df_same)\n# print_scores_from_csv(df_diff)\n\ndf_same['scores']=compute_scores(df_same)\ndf_diff['scores']=compute_scores(df_diff)\n\n# df=pd.read_csv('batch_isialab_same_text.csv')\n# score=compute_score(df_same)\n# print(score)\n\n# score=compute_score(df_diff)\n# print(score)\n\nfig, ax = plt.subplots()\nscores_list=[df_same['scores'], df_diff['scores'], r_scores]\nax.boxplot(scores_list,showmeans=True,meanline=True)\nmethods=[\"same text\", 'different text', 'Random']\nax.set_xticklabels(methods)\nfig.savefig('boxplot.png')\n\nplt.clf()\nax = sns.violinplot(df.iloc[:,0], y=df.iloc[:,1])\nplt.savefig('violinplot.png')\n\n\n# https://en.wikipedia.org/wiki/Confidence_interval\n# https://en.wikipedia.org/wiki/Student%27s_t-distribution\n# We assume a normal distribution. We compute sample mean. \n# We do not know the true std and have to estimate it. \n# Therefore we use student's t distrib to have the \"critical value\" t* to obtain the CI: t*s/sqrt(n)\n# the table is here: https://en.wikipedia.org/wiki/Student%27s_t-distribution#Table_of_selected_values\n\nfrom scipy.stats import t\n# this is equal to the number written in the table at 97.5% (one-sided) or 95% (two-sided)\nprint(t.ppf(0.975, 60))\n\nn=len(df_same['scores'])\nts=t.ppf(0.975, n)\nCI_same=df_same['scores'].std()*ts/np.sqrt(n)\n\nprint('score same:', df_same['scores'].mean(), '+-', CI_same)\n\nn=len(df_diff['scores'])\nts=t.ppf(0.975, n)\nCI_diff=df_diff['scores'].std()*ts/np.sqrt(n)\n\nprint('score same:', df_diff['scores'].mean(), '+-', CI_diff)\n\n\ndef df_by_usr(df, info='scores'):\n d_usr_same={}\n for usr in pd.unique(df['Turkle.Username']):\n df_usr=df[df['Turkle.Username']==usr]\n # print(usr, compute_score(df_usr))\n d_usr_same[usr]=df_usr[info].tolist()\n df_usr=pd.DataFrame.from_dict(d_usr_same,orient='index').transpose()\n return df_usr\n\n\ndf_usr_same=df_by_usr(df_same, info='scores')\nprint('\\n\\n')\ndf_usr_diff=df_by_usr(df_diff, info='scores')\n\n\ndf_usr_same_duration=df_by_usr(df_same, info='WorkTimeInSeconds')\nprint('\\n\\n')\ndf_usr_diff_duration=df_by_usr(df_diff, info='WorkTimeInSeconds')\n\n\n\n# https://en.wikipedia.org/wiki/Confidence_interval\n# https://en.wikipedia.org/wiki/Student%27s_t-distribution\n# We assume a normal distribution. We compute sample mean. \n# We do not know the true std and have to estimate it. \n# Therefore we use student's t distrib to have the \"critical value\" t* to obtain the CI: t*s/sqrt(n)\n# the table is here: https://en.wikipedia.org/wiki/Student%27s_t-distribution#Table_of_selected_values\n\nfrom scipy.stats import t\n# this is equal to the number written in the table at 97.5% (one-sided) or 95% (two-sided)\n# print(t.ppf(0.975, 60))\n\ndef CI(df_usr):\n n=df_usr.count(axis=1)\n ts=t.ppf(0.975, n)\n CI=df_usr.std(axis=1)*ts/np.sqrt(n)\n return CI\n\nCI(df_usr_diff)\n# to compute the mean \\pm CI by index, n=number of participants that responded (non nan) = df_usr_diff.count(axis=1)\ndf_usr_diff.mean(axis=1)\nn=df_usr_diff.count(axis=1)\nts=t.ppf(0.975, n)\nCI_diff=df_usr_diff.std(axis=1)*ts/np.sqrt(n)\n\ndf_usr_same.mean(axis=1)\nn=df_usr_same.count(axis=1)\nts=t.ppf(0.975, n)\nCI_same=df_usr_same.std(axis=1)*ts/np.sqrt(n)\n\nprint('total answers same: ',df_usr_same.count().sum())\nprint('total answers diff: ',df_usr_diff.count().sum())\n\n\nimport seaborn as sns\n\ndef violin_by_idx(df, name='same', showmeans=True, showoutlier=True):\n x=[]\n y=[]\n df=df[:15]\n for column in df:\n # print(df_usr_diff[column])\n x+=df[column].index.tolist()\n y+=df[column].tolist()\n data=pd.DataFrame([x,y]).T.dropna()\n\n scores_list=[]\n for i in range(len(df)):\n scores_list.append(data[data.iloc[:,0]==i].iloc[:,1].tolist())\n \n plt.clf()\n if showoutlier:\n result=plt.boxplot(scores_list,showmeans=showmeans,meanline=True)\n else:\n result=plt.boxplot(scores_list,showmeans=showmeans,meanline=True,sym='')\n # ax.set_xticklabels(range(df.shape[-1])\n plt.savefig('boxplot_by_idx_'+name+'.png')\n\n plt.clf()\n ax = sns.violinplot(x=data.iloc[:,0], y=data.iloc[:,1] , cut=0)\n plt.savefig('violinplot_'+name+'.png')\n\n return result\n\n\ndef stat_list(result, stat='means'):\n return [el._y[0] for el in result[stat]]\n\nresult_same=violin_by_idx(df_usr_same)\nresult_diff=violin_by_idx(df_usr_diff, name='diff')\n\nresult_same_duration=violin_by_idx(df_usr_same_duration, name='same_duration', showmeans=False, showoutlier=False)\nresult_diff_duration=violin_by_idx(df_usr_diff_duration, name='diff_duration', showmeans=False, showoutlier=False)\n\nmeans_same=stat_list(result_same, stat='means')\nmeans_diff=stat_list(result_diff, stat='means')\nmedians_same_duration=stat_list(result_same_duration, stat='medians')\nmedians_diff_duration=stat_list(result_diff_duration, stat='medians')\n\nboxes_diff_duration=[el._y for el in result_diff_duration['boxes']]\n\nfrom scipy.stats import linregress\n\n\n\nprint('means same:',linregress(range(len(means_same)),means_same))\nprint('means diff:',linregress(range(len(means_diff)),means_diff))\n\nprint('medians duration same:',linregress(range(len(medians_same_duration)),medians_same_duration))\nprint('medians duration diff:',linregress(range(len(medians_diff_duration)),medians_diff_duration))\n\n# plt.clf()\n# ax = sns.lineplot(x=x, y=y)\n# plt.savefig('lineplot.png')\n\n\n# plt.clf()\n# ax = sns.regplot(x=x, y=y)\n# plt.savefig('regplot.png')\n\n# df=pd.DataFrame([x,y]).T.dropna()\n# plt.clf()\n# ax = plt.hist2d(x=df.iloc[:,0], y=df.iloc[:,1], bins=(10, 5))\n# plt.savefig('hist2d.png')\n\n\n# t-test to see if means are different\n# https://towardsdatascience.com/inferential-statistics-series-t-test-using-numpy-2718f8f9bf2f"
] | [
[
"numpy.max",
"numpy.array",
"pandas.unique",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.load",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.subplots",
"numpy.random.randint",
"numpy.arange",
"numpy.sqrt",
"pandas.concat",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"scipy.stats.t.ppf"
]
] |
dianlujitao/WGAN-GP | [
"d50c26013aca2a6ca7d2c606125614f5bf2c2b01"
] | [
"utils.py"
] | [
"import torch\nimport matplotlib.pyplot as plt\nimport torchvision.utils as vutils\n\n\ndef visualize_data(dataloader):\n batch = next(iter(dataloader))\n plt.figure(figsize=(8, 8))\n plt.axis(\"off\")\n plt.title(\"Training images\")\n images = vutils.make_grid(batch[0][:64], normalize=True, range=(-1, 1))\n images = images.permute(1, 2, 0)\n plt.imshow(images)\n plt.show()\n\n\ndef visualize_batch(data, batches_done):\n plt.figure(figsize=(5, 5))\n plt.axis(\"off\")\n plt.title(\"Batches done %d\" % batches_done)\n images = vutils.make_grid(data.cpu().detach()[:25],\n 5,\n normalize=True,\n range=(-1, 1))\n images = images.permute(1, 2, 0)\n plt.imshow(images)\n plt.show()\n\n\ndef save_imgs(generator):\n r, c = 5, 5\n noise = torch.randn(r * c, 100)\n # gen_imgs should be shape (25, 64, 64, 3)\n gen_imgs = generator(noise)\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i, j].imshow(gen_imgs[cnt, :, :, :])\n axs[i, j].axis('off')\n cnt += 1\n fig.savefig(\"samples/output.png\")\n plt.close()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"torch.randn",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
brettkoonce/datasets | [
"55bb2a80ab674c2f6254ac74d90bd6e5f478e895"
] | [
"tensorflow_datasets/core/file_format_adapter_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.file_format_adapter.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_datasets.core import dataset_builder\nfrom tensorflow_datasets.core import dataset_info\nfrom tensorflow_datasets.core import features\nfrom tensorflow_datasets.core import file_format_adapter\nfrom tensorflow_datasets.core import splits\nfrom tensorflow_datasets.core import test_utils\n\ntf.enable_eager_execution()\n\n\nclass DummyTFRecordBuilder(dataset_builder.GeneratorBasedBuilder):\n\n def _split_generators(self, dl_manager):\n return [\n splits.SplitGenerator(\n name=[splits.Split.TRAIN, splits.Split.VALIDATION],\n num_shards=[2, 1],\n gen_kwargs={\"range_\": range(30)}),\n splits.SplitGenerator(\n name=splits.Split.TEST,\n num_shards=1,\n gen_kwargs={\"range_\": range(30, 40)}),\n ]\n\n def _generate_examples(self, range_):\n for i in range_:\n yield self.info.features.encode_example({\n \"x\": i,\n \"y\": np.array([-i]).astype(np.int64)[0],\n \"z\": tf.compat.as_text(str(i))\n })\n\n def _info(self):\n return dataset_info.DatasetInfo(\n features=features.FeaturesDict({\n \"x\": tf.int64,\n \"y\": tf.int64,\n \"z\": tf.string,\n }),\n )\n\n\nclass DummyCSVBuilder(DummyTFRecordBuilder):\n\n @property\n def _file_format_adapter(self):\n file_adapter_cls = file_format_adapter.CSVAdapter\n serialized_info = self.info.features.get_serialized_info()\n return file_adapter_cls(serialized_info)\n\n\nclass FileFormatAdapterTest(tf.test.TestCase):\n\n def _test_generator_based_builder(self, builder_cls):\n with test_utils.tmp_dir(self.get_temp_dir()) as tmp_dir:\n builder = builder_cls(data_dir=tmp_dir)\n builder.download_and_prepare()\n train_dataset = builder.as_dataset(split=splits.Split.TRAIN)\n valid_dataset = builder.as_dataset(split=splits.Split.VALIDATION)\n test_dataset = builder.as_dataset(split=splits.Split.TEST)\n\n def validate_dataset(dataset, min_val, max_val, test_range=False):\n els = []\n for el in dataset:\n x, y, z = el[\"x\"].numpy(), el[\"y\"].numpy(), el[\"z\"].numpy()\n self.assertEqual(-x, y)\n self.assertEqual(x, int(z))\n self.assertGreaterEqual(x, min_val)\n self.assertLess(x, max_val)\n els.append(x)\n if test_range:\n self.assertEqual(list(range(min_val, max_val)), sorted(els))\n\n validate_dataset(train_dataset, 0, 30)\n validate_dataset(valid_dataset, 0, 30)\n validate_dataset(test_dataset, 30, 40, True)\n\n def test_tfrecords(self):\n self._test_generator_based_builder(DummyTFRecordBuilder)\n\n def test_csv(self):\n self._test_generator_based_builder(DummyCSVBuilder)\n\n\nclass TFRecordUtilsTest(tf.test.TestCase):\n\n def test_dict_to_example(self):\n example = file_format_adapter._dict_to_tf_example({\n \"a\": 1,\n \"b\": [\"foo\", \"bar\"],\n \"c\": [2.0],\n })\n feature = example.features.feature\n self.assertEqual([1], list(feature[\"a\"].int64_list.value))\n self.assertEqual([b\"foo\", b\"bar\"], list(feature[\"b\"].bytes_list.value))\n self.assertEqual([2.0], list(feature[\"c\"].float_list.value))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"numpy.array",
"tensorflow.enable_eager_execution",
"tensorflow.test.main"
]
] |
zheng-xing/highresnet | [
"4c2e91f993dbdcb63f67837315eded5e5931518e"
] | [
"highresnet/modules/residual.py"
] | [
"import torch\nimport torch.nn as nn\n\nfrom .convolution import ConvolutionalBlock\n\n\nBATCH_DIM = 0\nCHANNELS_DIM = 1\n\n\nclass ResidualBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n num_layers,\n dilation,\n dimensions,\n batch_norm=True,\n instance_norm=False,\n residual=True,\n residual_type='pad',\n padding_mode='constant',\n ):\n assert residual_type in ('pad', 'project')\n super().__init__()\n self.residual = residual\n self.change_dimension = in_channels != out_channels\n self.residual_type = residual_type\n self.dimensions = dimensions\n if self.change_dimension:\n if residual_type == 'project':\n conv_class = nn.Conv2d if dimensions == 2 else nn.Conv3d\n self.change_dim_layer = conv_class(\n in_channels,\n out_channels,\n kernel_size=1,\n dilation=dilation,\n bias=False, # as in NiftyNet and PyTorch's ResNet model\n )\n\n conv_blocks = nn.ModuleList()\n for _ in range(num_layers):\n conv_block = ConvolutionalBlock(\n in_channels,\n out_channels,\n dilation,\n dimensions,\n batch_norm=batch_norm,\n instance_norm=instance_norm,\n padding_mode=padding_mode,\n )\n conv_blocks.append(conv_block)\n in_channels = out_channels\n self.residual_block = nn.Sequential(*conv_blocks)\n\n def forward(self, x):\n \"\"\"\n From the original ResNet paper, page 4:\n\n \"When the dimensions increase, we consider two options:\n\n (A) The shortcut still performs identity mapping,\n with extra zero entries padded for increasing dimensions.\n This option introduces no extra parameter\n\n (B) The projection shortcut in Eqn.(2) is used to\n match dimensions (done by 1x1 convolutions).\n\n For both options, when the shortcuts go across feature maps of\n two sizes, they are performed with a stride of 2.\"\n \"\"\"\n out = self.residual_block(x)\n if self.residual:\n if self.change_dimension:\n if self.residual_type == 'project':\n x = self.change_dim_layer(x)\n elif self.residual_type == 'pad':\n batch_size = x.shape[BATCH_DIM]\n x_channels = x.shape[CHANNELS_DIM]\n out_channels = out.shape[CHANNELS_DIM]\n spatial_dims = x.shape[2:]\n diff_channels = out_channels - x_channels\n zeros_half = x.new_zeros(\n batch_size, diff_channels // 2, *spatial_dims)\n x = torch.cat((zeros_half, x, zeros_half),\n dim=CHANNELS_DIM)\n out = x + out\n return out\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.ModuleList"
]
] |
PawelA/DALI | [
"3a4fc3373b119075e81a55eeb5dcc92e1ab1315a"
] | [
"docs/examples/use_cases/tensorflow/resnet-n/nvutils/image_processing.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport sys\nimport os\nimport numpy as np\nfrom subprocess import call\nimport horovod.tensorflow.keras as hvd\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.fn as fn\nimport nvidia.dali.types as types\nimport nvidia.dali.tfrecord as tfrec\ntry:\n import nvidia.dali.plugin.tf as dali_tf\nexcept:\n pass\n\nNUM_CLASSES = 1000\n\n_R_MEAN = 123.68\n_G_MEAN = 116.78\n_B_MEAN = 103.94\nCHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]\n\ndef _deserialize_image_record(record):\n feature_map = {\n 'image/encoded': tf.io.FixedLenFeature([ ], tf.string, ''),\n 'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),\n 'image/class/text': tf.io.FixedLenFeature([ ], tf.string, ''),\n 'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)\n }\n with tf.name_scope('deserialize_image_record'):\n obj = tf.io.parse_single_example(record, feature_map)\n imgdata = obj['image/encoded']\n label = tf.cast(obj['image/class/label'], tf.int32)\n bbox = tf.stack([obj['image/object/bbox/%s'%x].values\n for x in ['ymin', 'xmin', 'ymax', 'xmax']])\n bbox = tf.transpose(tf.expand_dims(bbox, 0), [0,2,1])\n text = obj['image/class/text']\n return imgdata, label, bbox, text\n\ndef _decode_jpeg(imgdata, channels=3):\n return tf.image.decode_jpeg(imgdata, channels=channels,\n fancy_upscaling=False,\n dct_method='INTEGER_FAST')\n\ndef _crop_and_resize_image(image, original_bbox, height, width, deterministic=False, random_crop=False):\n with tf.name_scope('random_crop_and_resize'):\n eval_crop_ratio = 0.8\n if random_crop:\n bbox_begin, bbox_size, bbox = \\\n tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.zeros(shape=[1,0,4]), # No bounding boxes\n min_object_covered=0.1,\n aspect_ratio_range=[0.8, 1.25],\n area_range=[0.1, 1.0],\n max_attempts=100,\n seed=7 * (1+hvd.rank()) if deterministic else 0,\n use_image_if_no_bounding_boxes=True)\n image = tf.slice(image, bbox_begin, bbox_size)\n else:\n # Central crop\n image = tf.image.central_crop(image, eval_crop_ratio)\n image = tf.compat.v1.image.resize_images(\n image,\n [height, width],\n tf.image.ResizeMethod.BILINEAR,\n align_corners=False)\n image.set_shape([height, width, 3])\n return image\n\ndef _distort_image_color(image, order=0):\n with tf.name_scope('distort_color'):\n image = tf.math.multiply(image, 1. / 255.)\n brightness = lambda img: tf.image.random_brightness(img, max_delta=32. / 255.)\n saturation = lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5)\n hue = lambda img: tf.image.random_hue(img, max_delta=0.2)\n contrast = lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5)\n if order == 0: ops = [brightness, saturation, hue, contrast]\n else: ops = [brightness, contrast, saturation, hue]\n for op in ops:\n image = op(image)\n # The random_* ops do not necessarily clamp the output range\n image = tf.clip_by_value(image, 0.0, 1.0)\n # Restore the original scaling\n image = tf.multiply(image, 255.)\n return image\n\ndef _parse_and_preprocess_image_record(record, height, width,\n deterministic=False, random_crop=False,\n distort_color=False):\n imgdata, label, bbox, text = _deserialize_image_record(record)\n label -= 1 # Change to 0-based (don't use background class)\n with tf.name_scope('preprocess_train'):\n try: image = _decode_jpeg(imgdata, channels=3)\n except: image = tf.image.decode_png(imgdata, channels=3)\n\n image = _crop_and_resize_image(image, bbox, height, width, deterministic, random_crop)\n\n # image comes out of crop as float32, which is what distort_color expects\n if distort_color:\n image = _distort_image_color(image)\n image = tf.cast(image, tf.float32)\n if random_crop:\n image = tf.image.random_flip_left_right(image,\n seed=11 * (1 + hvd.rank()) if deterministic else None)\n return image, label\n\n# Synthetic images are generated once, and the same batch is repeated again and\n# again. The H2D copy is also repeated.\ndef fake_image_set(batch_size, height, width, with_label=True):\n data_shape = [batch_size, height, width, 3] # 3 channels\n images = tf.random.truncated_normal(\n data_shape, dtype=tf.float32, mean=112, stddev=70,\n name='fake_images')\n images = tf.clip_by_value(images, 0.0, 255.0)\n images = tf.cast(images, tf.float32)\n if with_label:\n labels = tf.random.uniform(\n [batch_size], minval=0, maxval=1000-1, dtype=tf.int32,\n name='fake_labels')\n ds = tf.data.Dataset.from_tensor_slices(([images], [labels]))\n else:\n ds = tf.data.Dataset.from_tensor_slices(([images]))\n ds = ds.repeat()\n return ds\n\ndef get_dali_pipeline(\n tfrec_filenames,\n tfrec_idx_filenames,\n height, width,\n batch_size,\n num_threads,\n device_id,\n shard_id,\n num_gpus,\n deterministic=False,\n dali_cpu=True,\n training=True):\n\n kwargs = dict()\n\n if deterministic:\n kwargs['seed'] = 7 * (1 + hvd.rank())\n pipeline = Pipeline(batch_size, num_threads, device_id, **kwargs)\n with pipeline:\n inputs = fn.tfrecord_reader(\n path=tfrec_filenames,\n index_path=tfrec_idx_filenames,\n random_shuffle=training,\n shard_id=shard_id,\n num_shards=num_gpus,\n initial_fill=10000,\n features={\n 'image/encoded': tfrec.FixedLenFeature((), tfrec.string, \"\"),\n 'image/class/label': tfrec.FixedLenFeature([1], tfrec.int64, -1),\n 'image/class/text': tfrec.FixedLenFeature([ ], tfrec.string, ''),\n 'image/object/bbox/xmin': tfrec.VarLenFeature(tfrec.float32, 0.0),\n 'image/object/bbox/ymin': tfrec.VarLenFeature(tfrec.float32, 0.0),\n 'image/object/bbox/xmax': tfrec.VarLenFeature(tfrec.float32, 0.0),\n 'image/object/bbox/ymax': tfrec.VarLenFeature(tfrec.float32, 0.0)})\n\n decode_device = \"cpu\" if dali_cpu else \"mixed\"\n resize_device = \"cpu\" if dali_cpu else \"gpu\"\n if training:\n images = fn.image_decoder_random_crop(\n inputs[\"image/encoded\"],\n device=decode_device,\n output_type=types.RGB,\n random_aspect_ratio=[0.75, 1.25],\n random_area=[0.05, 1.0],\n num_attempts=100)\n images = fn.resize(images, device=resize_device, resize_x=width, resize_y=height)\n else:\n images = fn.image_decoder(\n inputs[\"image/encoded\"],\n device=decode_device,\n output_type=types.RGB)\n # Make sure that every image > 224 for CropMirrorNormalize\n images = fn.resize(images, device=resize_device, resize_shorter=256)\n\n images = fn.crop_mirror_normalize(\n images.gpu(),\n dtype=types.FLOAT,\n crop=(height, width),\n mean=[123.68, 116.78, 103.94],\n std=[58.4, 57.12, 57.3],\n output_layout=\"HWC\",\n mirror = fn.random.coin_flip())\n labels = inputs[\"image/class/label\"].gpu()\n\n labels -= 1 # Change to 0-based (don't use background class)\n pipeline.set_outputs(images, labels)\n return pipeline\n\nclass DALIPreprocessor(object):\n def __init__(self,\n filenames,\n idx_filenames,\n height, width,\n batch_size,\n num_threads,\n dtype=tf.uint8,\n dali_cpu=True,\n deterministic=False,\n training=False):\n device_id = hvd.local_rank()\n shard_id = hvd.rank()\n num_gpus = hvd.size()\n self.pipe = get_dali_pipeline(\n tfrec_filenames=filenames,\n tfrec_idx_filenames=idx_filenames,\n height=height,\n width=width,\n batch_size=batch_size,\n num_threads=num_threads,\n device_id=device_id,\n shard_id=shard_id,\n num_gpus=num_gpus,\n deterministic=deterministic,\n dali_cpu=dali_cpu,\n training=training)\n\n self.daliop = dali_tf.DALIIterator()\n\n self.batch_size = batch_size\n self.height = height\n self.width = width\n self.device_id = device_id\n\n self.dalidataset = dali_tf.DALIDataset(\n pipeline=self.pipe,\n output_shapes=((batch_size, height, width, 3), (batch_size)),\n batch_size=batch_size,\n output_dtypes=(tf.float32, tf.int64),\n device_id=device_id)\n\n def get_device_minibatches(self):\n with tf.device(\"/gpu:0\"):\n images, labels = self.daliop(\n pipeline=self.pipe,\n shapes=[(self.batch_size, self.height, self.width, 3), ()],\n dtypes=[tf.float32, tf.int64],\n device_id=self.device_id)\n return images, labels\n\n def get_device_dataset(self):\n return self.dalidataset\n\ndef image_set(filenames, batch_size, height, width, training=False,\n distort_color=False, num_threads=10, nsummary=10,\n deterministic=False, use_dali=None, idx_filenames=None):\n if use_dali:\n if idx_filenames is None:\n raise ValueError(\"Must provide idx_filenames if Dali is enabled\")\n\n preprocessor = DALIPreprocessor(\n filenames,\n idx_filenames,\n height, width,\n batch_size,\n num_threads,\n dali_cpu=True if use_dali == 'CPU' else False,\n deterministic=deterministic, training=training)\n return preprocessor\n else:\n shuffle_buffer_size = 10000\n num_readers = 10\n ds = tf.data.Dataset.from_tensor_slices(filenames)\n\n # AUTOTUNE can give better perf for non-horovod cases\n thread_config = num_threads\n\n # shard should be before any randomizing operations\n if training:\n ds = ds.shard(hvd.size(), hvd.rank())\n\n # read up to num_readers files and interleave their records\n ds = ds.interleave(\n tf.data.TFRecordDataset, cycle_length=num_readers)\n\n if training:\n # Improve training performance when training data is in remote storage and\n # can fit into worker memory.\n ds = ds.cache()\n\n if training:\n # shuffle data before repeating to respect epoch boundaries\n ds = ds.shuffle(shuffle_buffer_size)\n ds = ds.repeat()\n\n preproc_func = (lambda record:\n _parse_and_preprocess_image_record(record, height, width,\n deterministic=deterministic, random_crop=training,\n distort_color=distort_color))\n ds = ds.map(preproc_func,\n num_parallel_calls=thread_config)\n\n ds = ds.batch(batch_size, drop_remainder=True)\n\n # prefetching\n ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n options = tf.data.Options()\n options.experimental_slack = True\n ds = ds.with_options(options)\n\n return ds\n\n"
] | [
[
"tensorflow.image.central_crop",
"tensorflow.image.random_hue",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.image.decode_jpeg",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.io.FixedLenFeature",
"tensorflow.image.random_brightness",
"tensorflow.data.Options",
"tensorflow.zeros",
"tensorflow.expand_dims",
"tensorflow.compat.v1.image.resize_images",
"tensorflow.random.uniform",
"tensorflow.name_scope",
"tensorflow.io.VarLenFeature",
"tensorflow.image.random_saturation",
"tensorflow.multiply",
"tensorflow.image.random_contrast",
"tensorflow.io.parse_single_example",
"tensorflow.math.multiply",
"tensorflow.image.decode_png",
"tensorflow.random.truncated_normal",
"tensorflow.device",
"tensorflow.slice"
]
] |
giovp/SingleCellOpenProblems | [
"8b4243a71f9e4553558b019a08eb46090cd8445e"
] | [
"openproblems/tasks/spatial_decomposition/methods/nnls.py"
] | [
"from ....tools.decorators import method\nfrom ....tools.utils import check_version\nfrom .._utils import normalize_coefficients\nfrom .._utils import obs_means\n\nimport numpy as np\nimport pandas as pd\n\n\n@method(\n method_name=\"NNLS\",\n paper_name=\"Solving Least Squares Problems\",\n paper_url=\"https://epubs.siam.org/doi/pdf/10.1137/1.9781611971217.bm\",\n paper_year=1987,\n code_url=\"https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.nnls.html\", # noqa: E501\n code_version=check_version(\"scipy\"),\n)\ndef nnls_scipy(adata):\n from scipy.optimize import nnls\n from scipy.sparse import issparse\n\n adata_sc = adata.uns[\"sc_reference\"].copy()\n labels = adata_sc.obs[\"label\"].cat.categories\n adata_means = obs_means(adata_sc, \"label\")\n\n if issparse(adata.X):\n X = adata_means.X.T.toarray()\n y = adata.X.T.toarray()\n else:\n X = adata_means.X.T\n y = adata.X.T\n res = np.zeros((y.shape[1], X.shape[1])) # (voxels,cells)\n for i in range(y.shape[1]):\n x, _ = nnls(X, y[:, i])\n res[i] = x\n\n res_prop = normalize_coefficients(res)\n\n adata.obsm[\"proportions_pred\"] = pd.DataFrame(\n res_prop, columns=labels, index=adata.obs_names\n )\n\n return adata\n"
] | [
[
"scipy.optimize.nnls",
"scipy.sparse.issparse",
"numpy.zeros",
"pandas.DataFrame"
]
] |
dancoombs/fastai | [
"762aa0847fa8a7cec13cceab7e50d1c9ace77ed0"
] | [
"nbs/vgg16.py"
] | [
"from __future__ import division, print_function\n\nimport os, json\nfrom glob import glob\nimport numpy as np\nfrom scipy import misc, ndimage\nfrom scipy.ndimage.interpolation import zoom\n\nfrom keras.utils.data_utils import get_file\nfrom keras import backend as K\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils.data_utils import get_file\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.preprocessing import image\n\n\nvgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))\ndef vgg_preprocess(x):\n x = x - vgg_mean\n return x[:, ::-1] # reverse axis rgb->bgr\n\n\nclass Vgg16():\n \"\"\"The VGG 16 Imagenet model\"\"\"\n\n\n def __init__(self):\n self.FILE_PATH = 'http://www.platform.ai/models/'\n self.create()\n self.get_classes()\n\n\n def get_classes(self):\n fname = 'imagenet_class_index.json'\n fpath = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')\n with open(fpath) as f:\n class_dict = json.load(f)\n self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]\n\n def predict(self, imgs, details=False):\n all_preds = self.model.predict(imgs)\n idxs = np.argmax(all_preds, axis=1)\n preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]\n classes = [self.classes[idx] for idx in idxs]\n return np.array(preds), idxs, classes\n\n\n def ConvBlock(self, layers, filters):\n model = self.model\n for i in range(layers):\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(filters, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n\n def FCBlock(self):\n model = self.model\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n\n\n def create(self):\n model = self.model = Sequential()\n model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))\n\n self.ConvBlock(2, 64)\n self.ConvBlock(2, 128)\n self.ConvBlock(3, 256)\n self.ConvBlock(3, 512)\n self.ConvBlock(3, 512)\n\n model.add(Flatten())\n self.FCBlock()\n self.FCBlock()\n model.add(Dense(1000, activation='softmax'))\n\n fname = 'vgg16.h5'\n model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))\n\n\n def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):\n return gen.flow_from_directory(path, target_size=(224,224),\n class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)\n\n\n def ft(self, num):\n model = self.model\n model.pop()\n for layer in model.layers: layer.trainable=False\n model.add(Dense(num, activation='softmax'))\n self.compile()\n\n def finetune(self, batches):\n model = self.model\n model.pop()\n for layer in model.layers: layer.trainable=False\n model.add(Dense(batches.nb_class, activation='softmax'))\n self.compile()\n\n\n def compile(self, lr=0.001):\n self.model.compile(optimizer=Adam(lr=lr),\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n\n def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):\n self.model.fit(trn, labels, nb_epoch=nb_epoch,\n validation_data=(val, val_labels), batch_size=batch_size)\n\n\n def fit(self, batches, val_batches, nb_epoch=1):\n self.model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=nb_epoch,\n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\n\n def test(self, path, batch_size=8):\n test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)\n return test_batches, self.model.predict_generator(test_batches, test_batches.nb_sample)\n\n"
] | [
[
"numpy.array",
"numpy.argmax"
]
] |
sevimcaliskann/is_fid_score | [
"24d6b2844a9e85e66e7c35362e7eca9f67abde33"
] | [
"inception_score.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch.utils.data\n\nfrom torchvision.models.inception import inception_v3\n\nimport numpy as np\nfrom scipy.stats import entropy\n\nfrom inception import InceptionV3\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nimport os\nimport pathlib\nfrom tqdm import tqdm\nfrom scipy.misc import imread, imresize\n\n\nparser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\nparser.add_argument('path', type=str, nargs=2,\n help=('Path to the generated images or '\n 'to .npz statistic files'))\nparser.add_argument('--batch-size', type=int, default=50,\n help='Batch size to use')\nparser.add_argument('--dims', type=int, default=2048,\n choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),\n help=('Dimensionality of Inception features to use. '\n 'By default, uses pool3 features'))\nparser.add_argument('-c', '--gpu', default='', type=str,\n help='GPU to use (leave blank for CPU only)')\n\n\n\ndef get_pred(x, model):\n tmp = model.model(x)\n tmp = model.emo_layer(tmp)\n return F.softmax(tmp).data.cpu().numpy()\n\n\ndef get_scores(files, model, batch_size=50, dims=8,\n cuda=False, verbose=False):\n \"\"\"Calculates the activations of the pool_3 layer for all images.\n\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : Batch size of images for the model to process at once.\n Make sure that the number of samples is a multiple of\n the batch size, otherwise some samples are ignored. This\n behavior is retained to match the original FID score\n implementation.\n -- dims : Dimensionality of features returned by Inception\n -- cuda : If set to True, use GPU\n -- verbose : If set to True and parameter out_step is given, the number\n of calculated batches is reported.\n Returns:\n -- A numpy array of dimension (num images, dims) that contains the\n activations of the given tensor when feeding inception with the\n query tensor.\n \"\"\"\n model.model.eval()\n model.emo_layer.eval()\n\n if len(files) % batch_size != 0:\n print(('Warning: number of images is not a multiple of the '\n 'batch size. Some samples are going to be ignored.'))\n if batch_size > len(files):\n print(('Warning: batch size is bigger than the data size. '\n 'Setting batch size to data size'))\n batch_size = len(files)\n\n n_batches = len(files) // batch_size\n n_used_imgs = n_batches * batch_size\n N = len(files)\n\n pred_arr = np.empty((n_used_imgs, dims))\n\n for i in tqdm(range(n_batches)):\n if verbose:\n print('\\rPropagating batch %d/%d' % (i + 1, n_batches))\n start = i * batch_size\n end = start + batch_size\n\n images = [imread(str(f)).astype(np.float32)\n for f in files[start:end]]\n\n single_channel_images = [np.stack((img,)*3, axis=-1)\n for img in images if len(img.shape)==2]\n images.extend(single_channel_images)\n\n images = np.array([imresize(img, (299, 299)).astype(np.float32)\n for img in images if len(img.shape)>2 and img.shape[2]==3])\n\n # Reshape to (n_images, 3, height, width)\n images = images.transpose((0, 3, 1, 2))\n images /= 255\n\n batch = torch.from_numpy(images).type(torch.FloatTensor)\n if cuda:\n batch = batch.cuda()\n\n pred = get_pred(batch, model)\n pred_arr[start:end] = pred.reshape(batch_size, -1)\n\n # Now compute the mean kl-div\n split_scores = []\n splits = 8\n\n for k in range(splits):\n part = pred_arr[k * (N // splits): (k+1) * (N // splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\n if verbose:\n print(' done')\n\n return np.mean(split_scores), np.std(split_scores)\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n model = InceptionV3()\n if args.gpu != '':\n model.cuda()\n\n for p in args.path:\n if not os.path.exists(p):\n raise RuntimeError('Invalid path: %s' % p)\n\n path = pathlib.Path(p)\n files = list(path.glob('*.jpg')) + list(path.glob('*.png'))\n m, s = get_scores(files, model, batch_size=50, dims=8,\n cuda=args.gpu != '', verbose=True)\n print('For path -> %s , the inception scores are : mean: %.3f, STD: %.3f ' % (p, m, s))\n"
] | [
[
"numpy.empty",
"scipy.stats.entropy",
"numpy.mean",
"torch.from_numpy",
"scipy.misc.imresize",
"numpy.std",
"numpy.stack",
"torch.nn.functional.softmax"
]
] |
yoavkt/causallib | [
"cd258bd8c7ff5b5323a1f649ee7c887dcecff991"
] | [
"causallib/survival/univariate_curve_fitter.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom typing import Optional\nfrom sklearn.base import BaseEstimator as SKLearnBaseEstimator\nfrom .survival_utils import safe_join\nfrom .regression_curve_fitter import RegressionCurveFitter\n\n\nclass UnivariateCurveFitter:\n def __init__(self, learner: Optional[SKLearnBaseEstimator] = None):\n \"\"\"\n Default implementation of a univariate survival curve fitter.\n Construct a curve fitter, either non-parametric (Kaplan-Meier) or parametric.\n API follows 'lifelines' convention for univariate models, see here for example:\n https://lifelines.readthedocs.io/en/latest/fitters/univariate/KaplanMeierFitter.html#lifelines.fitters.kaplan_meier_fitter.KaplanMeierFitter.fit\n Args:\n learner: optional scikit-learn estimator (needs to implement `predict_proba`). If provided, will\n compute parametric curve by fitting a time-varying hazards model. if None, will compute\n non-parametric Kaplan-Meier estimator.\n \"\"\"\n self.learner = learner\n\n def fit(self, durations, event_observed=None, weights=None):\n \"\"\"\n Fits a univariate survival curve (Kaplan-Meier or parametric, if a learner was provided in constructor)\n\n Args:\n durations (Iterable): Duration subject was observed\n event_observed (Optional[Iterable]): Boolean or 0/1 iterable, where True means 'outcome event' and False\n means 'right censoring'. If unspecified, assumes that all events are\n 'outcome' (no censoring).\n weights (Optional[Iterable]): Optional subject weights\n\n Returns:\n Self\n \"\"\"\n # If 'event_observed' is unspecified, assumes that all events are 'outcome' (no censoring).\n if event_observed is None:\n event_observed = pd.Series(data=1, index=durations.index)\n\n if weights is None:\n weights = pd.Series(data=1, index=durations.index, name='weights')\n else:\n weights = pd.Series(data=weights, index=durations.index, name='weights')\n self.timeline_ = np.sort(np.unique(durations))\n\n # If sklearn classifier is provided, fit parametric curve\n if self.learner is not None:\n self.curve_fitter_ = RegressionCurveFitter(learner=self.learner)\n fit_data, (duration_col_name, event_col_name, weights_col_name) = safe_join(\n df=None, list_of_series=[durations, event_observed, weights], return_series_names=True\n )\n self.curve_fitter_.fit(df=fit_data, duration_col=duration_col_name, event_col=event_col_name,\n weights_col=weights_col_name)\n\n # Else, compute Kaplan Meier estimator non parametrically\n else:\n # Code inspired by lifelines KaplanMeierFitter\n df = pd.DataFrame({\n 't': durations,\n 'removed': weights.to_numpy(),\n 'observed': weights.to_numpy() * (event_observed.to_numpy(dtype=bool))\n })\n\n death_table = df.groupby(\"t\").sum()\n death_table['censored'] = (death_table['removed'] - death_table['observed']).astype(int)\n\n births = pd.DataFrame(np.zeros(durations.shape[0]), columns=[\"t\"])\n births['entrance'] = np.asarray(weights)\n births_table = births.groupby(\"t\").sum()\n event_table = death_table.join(births_table, how=\"outer\", sort=True).fillna(\n 0) # http://wesmckinney.com/blog/?p=414\n event_table['at_risk'] = event_table['entrance'].cumsum() - event_table['removed'].cumsum().shift(1).fillna(\n 0)\n self.event_table_ = event_table\n\n return self\n\n def predict(self, times=None, interpolate=False):\n \"\"\"\n Compute survival curve for time points given in 'times' param.\n Args:\n times: sequence of time points for prediction\n interpolate: if True, linearly interpolate non-observed times. Otherwise, repeat last observed time point.\n\n Returns:\n pd.Series: with times index and survival values\n\n \"\"\"\n if times is None:\n times = self.timeline_\n else:\n times = sorted(times)\n\n if self.learner is not None:\n # Predict parametric survival curve\n survival = self.curve_fitter_.predict_survival_function(X=None, times=pd.Series(times))\n else:\n # Compute hazard at each time step\n hazard = self.event_table_['observed'] / self.event_table_['at_risk']\n timeline = hazard.index # if computed non-parametrically, timeline is all observed data points\n # Compute survival from hazards\n survival = pd.Series(data=np.cumprod(1 - hazard), index=timeline, name='survival')\n\n if interpolate:\n survival = pd.Series(data=np.interp(times, survival.index.values, survival.values),\n index=pd.Index(data=times, name='t'), name='survival')\n else:\n survival = survival.asof(times).squeeze()\n\n # Round near-zero values (may occur when using weights and all observed subjects \"died\" at some point)\n survival[np.abs(survival) < np.finfo(float).resolution] = 0\n\n return survival\n"
] | [
[
"pandas.Index",
"numpy.cumprod",
"numpy.asarray",
"numpy.zeros",
"numpy.interp",
"numpy.finfo",
"numpy.abs",
"pandas.Series",
"numpy.unique"
]
] |
aprasad16/text | [
"c1607c98c70534abc3c75eb231830ce6d87be645"
] | [
"tensorflow_text/python/ops/split_merge_from_logits_tokenizer.py"
] | [
"# coding=utf-8\n# Copyright 2021 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Ops to tokenize words into subwords.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor\nfrom tensorflow_text.python.ops.tokenization import TokenizerWithOffsets\n\n# pylint: disable=g-bad-import-order\nfrom tensorflow.python.framework import load_library\nfrom tensorflow.python.platform import resource_loader\ngen_split_merge_from_logits_tokenizer = load_library.load_op_library(resource_loader.get_path_to_datafile('_split_merge_from_logits_tokenizer.so'))\n\n\n_tf_text_split_merge_from_logits_tokenizer_op_create_counter = monitoring.Counter(\n '/nlx/api/python/split_merge_from_logits_tokenizer_create_counter',\n 'Counter for number of SplitMergeFromLogitsTokenizer instances '\n 'created in Python.')\n\n\nclass SplitMergeFromLogitsTokenizer(TokenizerWithOffsets):\n \"\"\"Tokenizes a tensor of UTF-8 string into words according to logits.\"\"\"\n\n def __init__(self, force_split_at_break_character=True):\n \"\"\"Initializes a new instance.\n\n Args:\n force_split_at_break_character: a bool that indicates whether to force\n start a new word after an ICU-defined whitespace character. Regardless\n of this parameter, we never include a whitespace into a token, and we\n always ignore the split/merge action for the whitespace character\n itself. This parameter indicates what happens after a whitespace.\n * if force_split_at_break_character is true, create a new word starting\n at the first non-space character, regardless of the 0/1 label for\n that character, for instance:\n\n ```python\n s = [2.0, 1.0] # sample pair of logits indicating a split action\n m = [1.0, 3.0] # sample pair of logits indicating a merge action\n\n strings=[\"New York\"]\n logits=[[s, m, m, s, m, m, m, m]]\n output tokens=[[\"New\", \"York\"]]\n\n strings=[\"New York\"]\n logits=[[s, m, m, m, m, m, m, m]]\n output tokens=[[\"New\", \"York\"]]\n\n strings=[\"New York\"],\n logits=[[s, m, m, m, s, m, m, m]]\n output tokens=[[\"New\", \"York\"]]\n ```\n * otherwise, create a new word / continue the current one depending on\n the action for the first non-whitespace character.\n\n ```python\n s = [2.0, 1.0] # sample pair of logits indicating a split action\n m = [1.0, 3.0] # sample pair of logits indicating a merge action\n\n strings=[\"New York\"],\n logits=[[s, m, m, s, m, m, m, m]]\n output tokens=[[\"NewYork\"]]\n\n strings=[\"New York\"],\n logits=[[s, m, m, m, m, m, m, m]]\n output tokens=[[\"NewYork\"]]\n\n strings=[\"New York\"],\n logits=[[s, m, m, m, s, m, m, m]]\n output tokens=[[\"New\", \"York\"]]\n ```\n \"\"\"\n super(SplitMergeFromLogitsTokenizer, self).__init__()\n self._force_split_at_break_character = force_split_at_break_character\n counter = _tf_text_split_merge_from_logits_tokenizer_op_create_counter\n counter.get_cell().increase_by(1)\n\n def tokenize(self, strings, logits):\n \"\"\"Tokenizes a tensor of UTF-8 strings according to logits.\n\n The logits refer to the split / merge action we should take for each\n character. For more info, see the doc for the logits argument below.\n\n ### Example:\n\n >>> strings = ['IloveFlume!', 'and tensorflow']\n >>> logits = [\n ... [\n ... # 'I'\n ... [5.0, -3.2], # I: split\n ... # 'love'\n ... [2.2, -1.0], # l: split\n ... [0.2, 12.0], # o: merge\n ... [0.0, 11.0], # v: merge\n ... [-3.0, 3.0], # e: merge\n ... # 'Flume'\n ... [10.0, 0.0], # F: split\n ... [0.0, 11.0], # l: merge\n ... [0.0, 11.0], # u: merge\n ... [0.0, 12.0], # m: merge\n ... [0.0, 12.0], # e: merge\n ... # '!'\n ... [5.2, -7.0], # !: split\n ... # padding:\n ... [1.0, 0.0], [1.0, 1.0], [1.0, 0.0],\n ... ], [\n ... # 'and'\n ... [2.0, 0.7], # a: split\n ... [0.2, 1.5], # n: merge\n ... [0.5, 2.3], # d: merge\n ... # ' '\n ... [1.7, 7.0], # <space>: merge\n ... # 'tensorflow'\n ... [2.2, 0.1], # t: split\n ... [0.2, 3.1], # e: merge\n ... [1.1, 2.5], # n: merge\n ... [0.7, 0.9], # s: merge\n ... [0.6, 1.0], # o: merge\n ... [0.3, 1.0], # r: merge\n ... [0.2, 2.2], # f: merge\n ... [0.7, 3.1], # l: merge\n ... [0.4, 5.0], # o: merge\n ... [0.8, 6.0], # w: merge\n ... ]]\n >>> tokenizer = SplitMergeFromLogitsTokenizer()\n >>> tokenizer.tokenize(strings, logits)\n <tf.RaggedTensor [[b'I', b'love', b'Flume', b'!'], [b'and', b'tensorflow']]>\n\n Args:\n strings: a 1D `Tensor` of UTF-8 strings.\n logits: 3D Tensor; logits[i,j,0] is the logit for the split action for\n j-th character of strings[i]. logits[i,j,1] is the logit for the merge\n action for that same character. For each character, we pick the action\n with the greatest logit. Split starts a new word at this character and\n merge adds this character to the previous word. The shape of this\n tensor should be (n, m, 2) where n is the number of strings, and m is\n greater or equal with the number of characters from each strings[i]. As\n the elements of the strings tensor may have different lengths (in UTF-8\n chars), padding may be required to get a dense vector; for each row, the\n extra (padding) pairs of logits are ignored.\n\n Returns:\n A `RaggedTensor` of strings where `tokens[i, k]` is the string\n content of the `k-th` token in `strings[i]`\n\n Raises:\n InvalidArgumentError: if one of the input Tensors has the wrong shape.\n E.g., if the logits tensor does not have enough elements for one of the\n strings.\n \"\"\"\n subword, _, _ = self.tokenize_with_offsets(strings, logits)\n return subword\n\n def tokenize_with_offsets(self, strings, logits):\n \"\"\"Tokenizes a tensor of UTF-8 strings into tokens with [start,end) offsets.\n\n ### Example:\n\n >>> strings = ['IloveFlume!', 'and tensorflow']\n >>> logits = [\n ... [\n ... # 'I'\n ... [5.0, -3.2], # I: split\n ... # 'love'\n ... [2.2, -1.0], # l: split\n ... [0.2, 12.0], # o: merge\n ... [0.0, 11.0], # v: merge\n ... [-3.0, 3.0], # e: merge\n ... # 'Flume'\n ... [10.0, 0.0], # F: split\n ... [0.0, 11.0], # l: merge\n ... [0.0, 11.0], # u: merge\n ... [0.0, 12.0], # m: merge\n ... [0.0, 12.0], # e: merge\n ... # '!'\n ... [5.2, -7.0], # !: split\n ... # padding:\n ... [1.0, 0.0], [1.0, 1.0], [1.0, 0.0],\n ... ], [\n ... # 'and'\n ... [2.0, 0.7], # a: split\n ... [0.2, 1.5], # n: merge\n ... [0.5, 2.3], # d: merge\n ... # ' '\n ... [1.7, 7.0], # <space>: merge\n ... # 'tensorflow'\n ... [2.2, 0.1], # t: split\n ... [0.2, 3.1], # e: merge\n ... [1.1, 2.5], # n: merge\n ... [0.7, 0.9], # s: merge\n ... [0.6, 1.0], # o: merge\n ... [0.3, 1.0], # r: merge\n ... [0.2, 2.2], # f: merge\n ... [0.7, 3.1], # l: merge\n ... [0.4, 5.0], # o: merge\n ... [0.8, 6.0], # w: merge\n ... ]]\n >>> tokenizer = SplitMergeFromLogitsTokenizer()\n >>> tokens, starts, ends = tokenizer.tokenize_with_offsets(strings, logits)\n >>> tokens\n <tf.RaggedTensor [[b'I', b'love', b'Flume', b'!'], [b'and', b'tensorflow']]>\n >>> starts\n <tf.RaggedTensor [[0, 1, 5, 10], [0, 4]]>\n >>> ends\n <tf.RaggedTensor [[1, 5, 10, 11], [3, 14]]>\n\n Args:\n strings: A 1D `Tensor` of UTF-8 strings.\n logits: 3D Tensor; logits[i,j,0] is the logit for the split action for\n j-th character of strings[i]. logits[i,j,1] is the logit for the merge\n action for that same character. For each character, we pick the action\n with the greatest logit. Split starts a new word at this character and\n merge adds this character to the previous word. The shape of this\n tensor should be (n, m, 2) where n is the number of strings, and m is\n greater or equal with the number of characters from each strings[i]. As\n the elements of the strings tensor may have different lengths (in UTF-8\n chars), padding may be required to get a dense vector; for each row, the\n extra (padding) pairs of logits are ignored.\n\n Returns:\n A tuple `(tokens, start_offsets, end_offsets)` where:\n * `tokens` is a `RaggedTensor` of strings where `tokens[i, k]` is\n the string content of the `k-th` token in `strings[i]`\n * `start_offsets` is a `RaggedTensor` of int64s where\n `start_offsets[i, k]` is the byte offset for the start of the\n `k-th` token in `strings[i]`.\n * `end_offsets` is a `RaggedTensor` of int64s where\n `end_offsets[i, k]` is the byte offset immediately after the\n end of the `k-th` token in `strings[i]`.\n\n Raises:\n InvalidArgumentError: if one of the input Tensors has the wrong shape.\n E.g., if the tensor logits does not have enough elements for one of the\n strings.\n \"\"\"\n name = None\n with ops.name_scope(name, 'SplitMergeFromLogitsTokenizer',\n [strings, logits]):\n # Tokenize the strings into tokens.\n force_split = self._force_split_at_break_character\n token_values, token_row_splits, start_values, end_values = (\n gen_split_merge_from_logits_tokenizer.tokenizer_from_logits(\n strings=strings,\n logits=logits,\n force_split_at_break_character=force_split))\n\n # Put token info into RaggedTensors, as indicated by token_row_splits.\n def put_token_info_into_ragged_tensor(token_info_values):\n return RaggedTensor.from_row_splits(\n token_info_values, token_row_splits, validate=False)\n\n tokens = put_token_info_into_ragged_tensor(token_values)\n start_offsets = put_token_info_into_ragged_tensor(start_values)\n end_offsets = put_token_info_into_ragged_tensor(end_values)\n return tokens, start_offsets, end_offsets\n"
] | [
[
"tensorflow.python.eager.monitoring.Counter",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits",
"tensorflow.python.platform.resource_loader.get_path_to_datafile"
]
] |
nhatminh46vn/transformers | [
"912f6881d2b69f180522172a5283702bd8c41d9c"
] | [
"src/transformers/models/bart/modeling_tf_bart.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TF BART model, ported from the fairseq repo.\"\"\"\n\nimport math\nimport random\nimport warnings\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import ACT2FN\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutput,\n TFBaseModelOutputWithPast,\n TFSeq2SeqLMOutput,\n TFSeq2SeqModelOutput,\n)\n\n# Public API\nfrom ...modeling_tf_utils import (\n DUMMY_INPUTS,\n TFPreTrainedModel,\n TFSharedEmbeddings,\n TFWrappedEmbeddings,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_bart import BartConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BartConfig\"\n_TOKENIZER_FOR_DOC = \"BartTokenizer\"\n\nLARGE_NEGATIVE = -1e8\n\n\ndef shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, eos_token_id: int):\n shifted_input_ids = tf.cast(input_ids, tf.int32)\n shifted_input_ids = tf.roll(shifted_input_ids, 1, axis=-1)\n start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), eos_token_id)\n shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1)\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids = tf.where(\n shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids\n )\n\n # \"Verify that `labels` has only positive values and -100\"\n assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, tf.int32))\n\n # Make sure the assertion op is called by wrapping the result in an identity no-op\n with tf.control_dependencies([assert_gte0]):\n shifted_input_ids = tf.identity(shifted_input_ids)\n\n return shifted_input_ids\n\n\ndef _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = tf.ones((tgt_len, tgt_len), dtype=tf.float32) * LARGE_NEGATIVE\n mask_cond = tf.range(shape_list(mask)[-1])\n\n mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)\n mask = tf.cast(mask, tf.float32)\n\n if past_key_values_length > 0:\n mask = tf.concat([tf.zeros((tgt_len, past_key_values_length), dtype=tf.float32), mask], axis=-1)\n return tf.broadcast_to(mask[None, None, :, :], (bsz, 1, tgt_len, tgt_len + past_key_values_length))\n\n\ndef _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = shape_list(mask)\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = tf.cast(tf.broadcast_to(mask[:, None, None, :], (bsz, 1, tgt_len, src_len)), tf.float32)\n\n return (1.0 - expanded_mask) * LARGE_NEGATIVE\n\n\nclass TFBartLearnedPositionalEmbedding(TFSharedEmbeddings):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting\n based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to\n the forward function.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset, **kwargs):\n # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2\n # and adjust num_embeddings appropriately. Other models dont have this hack\n self.offset = offset\n assert padding_idx is not None, \"padding_idx cannot be None\"\n num_embeddings += offset\n super().__init__(num_embeddings, embedding_dim, **kwargs)\n\n def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_shape[:2]\n\n positions = tf.range(\n past_key_values_length, seq_len + past_key_values_length, delta=1, dtype=tf.int32, name=\"range\"\n )\n return super().call(positions + self.offset) # super object is not callable for some reason\n\n\nclass TFBartSinusoidalPositionalEmbedding(tf.keras.layers.Embedding):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions: int, embedding_dim: int, **kwargs):\n\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n super().__init__(\n num_positions,\n embedding_dim,\n **kwargs,\n )\n\n def build(self, input_shape: tf.TensorShape):\n \"\"\"\n Build shared token embedding layer Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n super().build(input_shape) # Instantiates self.weight so it can be loaded\n weight: np.ndarray = self._init_weight(self.input_dim, self.output_dim)\n self.set_weights([weight]) # overwrite self.weight to correct value\n\n @staticmethod\n def _init_weight(n_pos: int, dim: int):\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n # index 0 is all zero\n position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])\n position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])\n # convert to tensor\n table = tf.convert_to_tensor(position_enc, dtype=tf.float32)\n tf.stop_gradient(table)\n return table\n\n def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_shape[:2]\n\n positions = tf.range(\n past_key_values_length, seq_len + past_key_values_length, delta=1, dtype=tf.int32, name=\"range\"\n )\n return super().call(positions)\n\n\nclass TFBartAttention(tf.keras.layers.Layer):\n \"\"\"Multi-headed attention from \"Attention Is All You Need\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n\n self.num_heads = num_heads\n self.dropout = tf.keras.layers.Dropout(dropout)\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"k_proj\")\n self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"q_proj\")\n self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"v_proj\")\n self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"out_proj\")\n\n def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):\n return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))\n\n def call(\n self,\n hidden_states: tf.Tensor,\n key_value_states: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,\n attention_mask: Optional[tf.Tensor] = None,\n training=False,\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = shape_list(hidden_states)\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = tf.concat([past_key_value[0], key_states], axis=2)\n value_states = tf.concat([past_key_value[1], value_states], axis=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n value_states = tf.reshape(value_states, proj_shape)\n\n src_len = shape_list(key_states)[1]\n attn_weights = tf.matmul(query_states, key_states, transpose_b=True)\n\n tf.debugging.assert_equal(\n shape_list(attn_weights),\n [bsz * self.num_heads, tgt_len, src_len],\n message=f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}\",\n )\n\n if attention_mask is not None:\n tf.debugging.assert_equal(\n shape_list(attention_mask),\n [bsz, 1, tgt_len, src_len],\n message=f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}\",\n )\n attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_weights = tf.nn.softmax(attn_weights, axis=-1)\n\n attn_probs = self.dropout(attn_weights, training=training)\n\n attn_output = tf.matmul(attn_probs, value_states)\n\n tf.debugging.assert_equal(\n shape_list(attn_output),\n [bsz * self.num_heads, tgt_len, self.head_dim],\n message=f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}\",\n )\n\n attn_output = tf.transpose(\n tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)\n )\n attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))\n\n attn_output = self.out_proj(attn_output)\n attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))\n\n return attn_output, attn_weights, past_key_value\n\n\nclass TFBartEncoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: BartConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFBartAttention(\n self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name=\"self_attn\"\n )\n self.normalize_before = config.normalize_before\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, training=False):\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n \"\"\"\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, _ = self.self_attn(\n hidden_states=hidden_states, attention_mask=attention_mask\n )\n tf.debugging.assert_equal(\n shape_list(hidden_states),\n shape_list(residual),\n message=f\"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}\",\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n\n return hidden_states, self_attn_weights\n\n\nclass TFBartDecoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: BartConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFBartAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"self_attn\",\n is_decoder=True,\n )\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n self.normalize_before = config.normalize_before\n\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.encoder_attn = TFBartAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"encoder_attn\",\n is_decoder=True,\n )\n self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"encoder_attn_layer_norm\")\n self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(\n self,\n hidden_states,\n attention_mask: Optional[tf.Tensor] = None,\n encoder_hidden_states: Optional[tf.Tensor] = None,\n encoder_attention_mask: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[tf.Tensor]] = None,\n training=False,\n ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states\n \"\"\"\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, _, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n past_key_value=cross_attn_past_key_value,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n if not self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n\n return (\n hidden_states,\n self_attn_weights,\n present_key_value,\n )\n\n\nclass TFBartPretrainedModel(TFPreTrainedModel):\n config_class = BartConfig\n base_model_prefix = \"model\"\n\n @property\n def dummy_inputs(self):\n pad_token = 1\n input_ids = tf.cast(tf.constant(DUMMY_INPUTS), tf.int32)\n decoder_input_ids = tf.cast(tf.constant(DUMMY_INPUTS), tf.int32)\n dummy_inputs = {\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": tf.math.not_equal(input_ids, pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n\nclass TFPretrainedBartModel(TFBartPretrainedModel):\n def __init_subclass__(self):\n warnings.warn(\n \"The class `TFPretrainedBartModel` has been deprecated, please use `TFBartPretrainedModel` instead.\",\n FutureWarning,\n )\n\n\nBART_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\n embeddings, pruning heads etc.)\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Args:\n config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the\n model weights.\n\"\"\"\n\nBART_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for translation and summarization training. By default, the model will create this tensor by\n shifting the input_ids right, following the paper.\n decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n encoder_outputs (:obj:`tf.FloatTensor`, `optional`):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.TFModelOutput` instead of a plain tuple.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@keras_serializable\nclass TFBartEncoder(tf.keras.layers.Layer):\n config_class = BartConfig\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`TFBartEncoderLayer`.\n\n Args:\n config: BartConfig\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.layerdrop = config.encoder_layerdrop\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n\n self.embed_tokens = embed_tokens\n if config.static_position_embeddings:\n self.embed_positions = TFBartSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n else:\n self.embed_positions = TFBartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n self.padding_idx,\n config.extra_pos_embeddings,\n name=\"embed_positions\",\n )\n self.layers = [TFBartEncoderLayer(config, name=f\"layers.{i}\") for i in range(config.encoder_layers)]\n self.layernorm_embedding = (\n tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layernorm_embedding\")\n if config.normalize_embedding\n else tf.keras.layers.Layer()\n )\n self.layer_norm = (\n tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n if config.add_final_layer_norm\n else None\n )\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"])\n else:\n inputs[\"inputs_embeds\"] = inputs[\"inputs_embeds\"]\n\n inputs[\"inputs_embeds\"] = inputs[\"inputs_embeds\"] * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n hidden_states = inputs[\"inputs_embeds\"] + embed_pos\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = self.dropout(hidden_states, training=inputs[\"training\"])\n\n # check attention mask and invert\n if inputs[\"attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n inputs[\"attention_mask\"] = _expand_mask(inputs[\"attention_mask\"])\n\n encoder_states = () if inputs[\"output_hidden_states\"] else None\n all_attentions = () if inputs[\"output_attentions\"] else None\n\n # encoder layers\n for encoder_layer in self.layers:\n\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if inputs[\"training\"] and (dropout_probability < self.layerdrop): # skip the layer\n continue\n\n hidden_states, attn = encoder_layer(hidden_states, inputs[\"attention_mask\"])\n\n if inputs[\"output_attentions\"]:\n all_attentions += (attn,)\n if self.layer_norm:\n hidden_states = self.layer_norm(hidden_states)\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n\n if not inputs[\"return_dict\"]:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return TFBaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\n@keras_serializable\nclass TFBartDecoder(tf.keras.layers.Layer):\n config_class = BartConfig\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFBartDecoderLayer`\n\n Args:\n config: BartConfig\n embed_tokens: output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.padding_idx = config.pad_token_id\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.layerdrop = config.decoder_layerdrop\n if config.static_position_embeddings:\n self.embed_positions = TFBartSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n else:\n self.embed_positions = TFBartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n self.padding_idx,\n config.extra_pos_embeddings,\n name=\"embed_positions\",\n )\n self.layers = [TFBartDecoderLayer(config, name=f\"layers.{i}\") for i in range(config.decoder_layers)]\n self.layernorm_embedding = (\n tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layernorm_embedding\")\n if config.normalize_embedding\n else tf.keras.layers.Layer()\n )\n self.layer_norm = (\n tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n if config.add_final_layer_norm\n else None\n )\n\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n past_key_values_length = (\n inputs[\"past_key_values\"][0][0].shape[2] if inputs[\"past_key_values\"] is not None else 0\n )\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"])\n\n hidden_states = inputs[\"inputs_embeds\"] * self.embed_scale\n\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)\n else:\n combined_attention_mask = _expand_mask(\n tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]\n )\n\n if inputs[\"attention_mask\"] is None and inputs[\"input_ids\"] is not None and input_shape[-1] > 1:\n inputs[\"attention_mask\"] = tf.cast(\n tf.math.not_equal(inputs[\"input_ids\"], self.config.pad_token_id), inputs[\"input_ids\"].dtype\n )\n inputs[\"attention_mask\"] = tf.concat(\n [\n tf.ones((input_shape[0], past_key_values_length), dtype=inputs[\"attention_mask\"].dtype),\n inputs[\"attention_mask\"],\n ],\n axis=-1,\n )\n else:\n inputs[\"attention_mask\"] = tf.ones(\n (input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.int32\n )\n\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = combined_attention_mask + _expand_mask(\n inputs[\"attention_mask\"], tgt_len=input_shape[-1]\n )\n\n if inputs[\"encoder_hidden_states\"] is not None and inputs[\"encoder_attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n inputs[\"encoder_attention_mask\"] = _expand_mask(inputs[\"encoder_attention_mask\"], tgt_len=input_shape[-1])\n\n if self.do_blenderbot_90_layernorm:\n hidden_states = self.layernorm_embedding(hidden_states) + positions\n else:\n hidden_states = self.layernorm_embedding(hidden_states + positions)\n hidden_states = self.dropout(hidden_states, training=inputs[\"training\"])\n\n # decoder layers\n all_hidden_states = ()\n all_self_attns = ()\n present_key_values = ()\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n\n if inputs[\"training\"] and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = inputs[\"past_key_values\"][idx] if inputs[\"past_key_values\"] is not None else None\n\n hidden_states, layer_self_attn, present_key_value = decoder_layer(\n hidden_states,\n attention_mask=combined_attention_mask,\n encoder_hidden_states=inputs[\"encoder_hidden_states\"],\n encoder_attention_mask=inputs[\"encoder_attention_mask\"],\n past_key_value=past_key_value,\n )\n\n if inputs[\"use_cache\"]:\n present_key_values += (present_key_value,)\n\n if inputs[\"output_attentions\"]:\n all_self_attns += (layer_self_attn,)\n\n if self.layer_norm is not None: # same as if config.add_final_layer_norm\n hidden_states = self.layer_norm(hidden_states)\n\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n else:\n all_hidden_states = None\n\n all_self_attns = list(all_self_attns) if inputs[\"output_attentions\"] else None\n\n present_key_values = (inputs[\"encoder_hidden_states\"], present_key_values) if inputs[\"use_cache\"] else None\n\n if not inputs[\"return_dict\"]:\n return hidden_states, present_key_values, all_hidden_states, all_self_attns\n else:\n return TFBaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=present_key_values,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n )\n\n\n@add_start_docstrings(\n \"The bare BART Model outputting raw hidden-states without any specific head on top.\",\n BART_START_DOCSTRING,\n)\n@keras_serializable\nclass TFBartModel(TFBartPretrainedModel):\n base_model_prefix = \"model\"\n\n def __init__(self, config: BartConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name=\"model.shared\")\n\n with tf.compat.v1.variable_scope(\"model.shared\") as shared_abs_scope_name:\n pass\n\n # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.\n embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)\n embed_tokens.vocab_size = self.shared.vocab_size\n embed_tokens.hidden_size = self.shared.hidden_size\n\n self.encoder = TFBartEncoder(config, embed_tokens, name=\"encoder\")\n self.decoder = TFBartDecoder(config, embed_tokens, name=\"decoder\")\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=TFSeq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"decoder_input_ids\"] is None and inputs[\"decoder_inputs_embeds\"] is None:\n inputs[\"use_cache\"] = False\n\n inputs[\"output_hidden_states\"] = (\n inputs[\"output_hidden_states\"]\n if inputs[\"output_hidden_states\"] is not None\n else self.config.output_hidden_states\n )\n\n if inputs[\"decoder_input_ids\"] is None and inputs[\"input_ids\"] is not None:\n inputs[\"decoder_input_ids\"] = shift_tokens_right(\n inputs[\"input_ids\"], self.config.pad_token_id, self.config.eos_token_id\n )\n\n if inputs[\"encoder_outputs\"] is None:\n inputs[\"encoder_outputs\"] = self.encoder(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True\n elif inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], TFBaseModelOutput):\n inputs[\"encoder_outputs\"] = TFBaseModelOutput(\n last_hidden_state=inputs[\"encoder_outputs\"][0],\n hidden_states=inputs[\"encoder_outputs\"][1] if len(inputs[\"encoder_outputs\"]) > 1 else None,\n attentions=inputs[\"encoder_outputs\"][2] if len(inputs[\"encoder_outputs\"]) > 2 else None,\n )\n # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False\n elif not inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], tuple):\n inputs[\"encoder_outputs\"] = inputs[\"encoder_outputs\"].to_tuple()\n\n decoder_outputs = self.decoder(\n inputs[\"decoder_input_ids\"],\n attention_mask=inputs[\"decoder_attention_mask\"],\n encoder_hidden_states=inputs[\"encoder_outputs\"][0],\n encoder_attention_mask=inputs[\"attention_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n if not inputs[\"return_dict\"]:\n return decoder_outputs + inputs[\"encoder_outputs\"]\n\n return TFSeq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=inputs[\"encoder_outputs\"].last_hidden_state,\n encoder_hidden_states=inputs[\"encoder_outputs\"].hidden_states,\n encoder_attentions=inputs[\"encoder_outputs\"].attentions,\n )\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n\n def get_output_embeddings(self):\n return self.shared\n\n\n@add_start_docstrings(\n \"The BART Model with a language modeling head. Can be used for summarization.\",\n BART_START_DOCSTRING,\n)\nclass TFBartForConditionalGeneration(TFBartPretrainedModel):\n _keys_to_ignore_on_load_unexpected = [\n r\"model.encoder.embed_tokens.weight\",\n r\"model.decoder.embed_tokens.weight\",\n ]\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.model = TFBartModel(config, name=\"model\")\n self.use_cache = config.use_cache\n # final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.\n self.final_logits_bias = self.add_weight(\n name=\"final_logits_bias\", shape=[1, config.vocab_size], initializer=\"zeros\", trainable=False\n )\n\n def get_decoder(self):\n return self.model.decoder\n\n def resize_token_embeddings(self, new_num_tokens):\n super().resize_token_embeddings(new_num_tokens=new_num_tokens)\n\n # BART is a special case where the bias has two dimensions\n # and not named just `bias`\n if new_num_tokens is not None:\n num_tokens_to_copy = min(self.final_logits_bias.shape[0], new_num_tokens)\n init_bias = tf.zeros((new_num_tokens,))\n init_bias[:num_tokens_to_copy] = self.final_logits_bias.value()[:num_tokens_to_copy]\n self.final_logits_bias = self.add_weight(\n shape=(1, new_num_tokens),\n initializer=\"zeros\",\n trainable=False,\n name=\"final_logits_bias\",\n )\n self.final_logits_bias.assign(init_bias)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs: Optional[TFBaseModelOutput] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n Returns:\n\n Examples::\n\n # Mask filling only works for bart-large\n from transformers import BartTokenizer, TFBartForConditionalGeneration\n import tensorflow as tf\n mname = 'facebook/bart-large'\n tokenizer = BartTokenizer.from_pretrained(mname)\n TXT = \"My friends are <mask> but they eat too many carbs.\"\n model = TFBartForConditionalGeneration.from_pretrained(mname)\n batch = tokenizer([TXT], return_tensors='tf')\n logits = model(inputs=batch.input_ids).logits\n probs = tf.nn.softmax(logits[0])\n # probs[5] is associated with the mask token\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"labels\"] is not None:\n inputs[\"use_cache\"] = False\n if inputs[\"decoder_input_ids\"] is None:\n inputs[\"decoder_input_ids\"] = shift_tokens_right(\n inputs[\"labels\"], self.config.pad_token_id, self.config.eos_token_id\n )\n\n outputs = self.model(\n inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n decoder_input_ids=inputs[\"decoder_input_ids\"],\n encoder_outputs=inputs[\"encoder_outputs\"],\n decoder_attention_mask=inputs[\"decoder_attention_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n decoder_inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n lm_logits = self.model.shared(outputs[0], mode=\"linear\")\n lm_logits = lm_logits + self.final_logits_bias\n masked_lm_loss = None if inputs[\"labels\"] is None else self.compute_loss(inputs[\"labels\"], lm_logits)\n\n if not inputs[\"return_dict\"]:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n return TFSeq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values, # index 1 of d outputs\n decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs\n decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs\n encoder_last_hidden_state=outputs.last_hidden_state, # index 0 of encoder outputs\n encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out\n encoder_attentions=outputs.encoder_attentions, # 2 of e out\n )\n\n def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs) -> Dict:\n assert past is not None and len(past) in {1, 2}, f\"past has to be an iterable of length 1,2 got {past}\"\n if len(past) == 1:\n assert isinstance(past[0], tf.Tensor), f\"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])\n past_key_values = None\n else:\n assert (\n len(past) == 2\n ), \"`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position.\"\n encoder_outputs, past_key_values = past\n if isinstance(encoder_outputs, tuple):\n assert isinstance(\n encoder_outputs[0], tf.Tensor\n ), f\"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])\n elif isinstance(encoder_outputs, tf.Tensor):\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)\n assert (\n past_key_values\n ), f\"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past\"\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n assert isinstance(\n encoder_outputs, TFBaseModelOutput\n ), f\"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}.\"\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past_key_values,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n if len(past) == 1:\n return past\n\n past_key_values = past[1]\n\n reordered_past = ()\n for layer_past_key_values in past_key_values:\n reordered_past += (\n tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values),\n )\n return (past[0], reordered_past)\n\n def adjust_logits_during_generation(self, logits, cur_len, max_length):\n if cur_len == 1 and self.config.force_bos_token_to_be_generated:\n vocab_range = tf.constant(range(self.config.vocab_size))\n return tf.where(vocab_range != self.config.bos_token_id, LARGE_NEGATIVE, logits)\n elif cur_len == max_length - 1:\n vocab_range = tf.constant(range(self.config.vocab_size))\n return tf.where(vocab_range != self.config.eos_token_id, LARGE_NEGATIVE, logits)\n else:\n return logits\n\n def get_output_embeddings(self):\n return self.model.shared\n\n def get_encoder(self):\n return self.model.encoder\n\n def compute_loss(self, labels, logits):\n \"\"\"CrossEntropyLoss that ignores pad tokens\"\"\"\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True,\n reduction=tf.keras.losses.Reduction.NONE,\n )\n melted_labels = tf.reshape(labels, (-1,))\n active_loss = tf.not_equal(melted_labels, self.config.pad_token_id)\n reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)\n labels = tf.boolean_mask(melted_labels, active_loss)\n return loss_fn(labels, reduced_logits)\n"
] | [
[
"tensorflow.ones",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.math.not_equal",
"tensorflow.broadcast_to",
"numpy.cos",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.identity",
"tensorflow.cast",
"numpy.sin",
"tensorflow.concat",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.keras.layers.Layer",
"tensorflow.zeros",
"tensorflow.where",
"tensorflow.compat.v1.variable_scope",
"tensorflow.keras.layers.Dropout",
"numpy.power",
"tensorflow.boolean_mask",
"tensorflow.convert_to_tensor",
"tensorflow.roll",
"tensorflow.not_equal",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.gather",
"tensorflow.stop_gradient"
]
] |
robertosousa1/streamlit-app | [
"9722d784b70658c712a71246079f03ce0a90c2ec"
] | [
"display_dataframe.py"
] | [
"import streamlit as st\nimport pandas as pd\n\ndef main():\n st.title('Hello, Streamlit!')\n st.header('A simple repository containing some steps to get started with the Stream library.')\n st.text('\\n')\n st.subheader('For more details see the documentation:')\n st.subheader('https://docs.streamlit.io/api.html')\n st.text('\\n\\n')\n st.subheader('Examples:')\n st.text('\\n')\n st.markdown('1. Display a dataframe:')\n file = st.file_uploader('Choose your file', type='csv')\n if file is not None:\n slider = st.slider('Values', 1, 100)\n df = pd.read_csv(file)\n st.dataframe(df.head(slider))\n st.text('\\n')\n st.table(df.head(slider))\n st.text('\\n')\n st.write(df.columns)\n st.table(df.groupby('species')['petal_width'].mean())\n\n\nif __name__ == '__main__':\n\tmain()"
] | [
[
"pandas.read_csv"
]
] |
YangRui2015/rlkit_pro | [
"fccde0877a0da043525f7239bf703995107a8e80"
] | [
"rlkit/core/eval_util.py"
] | [
"\"\"\"\nCommon evaluation utilities.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom numbers import Number\n\nimport numpy as np\n\nimport rlkit.pythonplusplus as ppp\n\n\ndef get_generic_path_information(paths, stat_prefix=''):\n \"\"\"\n Get an OrderedDict with a bunch of statistic names and values.\n \"\"\"\n statistics = OrderedDict()\n returns = [sum(path[\"rewards\"]) for path in paths]\n\n rewards = np.vstack([path[\"rewards\"] for path in paths])\n statistics.update(create_stats_ordered_dict('Rewards', rewards,\n stat_prefix=stat_prefix))\n statistics.update(create_stats_ordered_dict('Returns', returns,\n stat_prefix=stat_prefix))\n actions = [path[\"actions\"] for path in paths]\n if len(actions[0].shape) == 1:\n actions = np.hstack([path[\"actions\"] for path in paths])\n else:\n actions = np.vstack([path[\"actions\"] for path in paths])\n statistics.update(create_stats_ordered_dict(\n 'Actions', actions, stat_prefix=stat_prefix\n ))\n statistics['Num Paths'] = len(paths)\n statistics[stat_prefix + 'Average Returns'] = get_average_returns(paths)\n\n for info_key in ['env_infos', 'agent_infos']:\n if info_key in paths[0] and paths[0][info_key][0] != {}:\n all_env_infos = [\n ppp.list_of_dicts__to__dict_of_lists(p[info_key])\n for p in paths\n ]\n for k in all_env_infos[0].keys():\n final_ks = np.array([info[k][-1] for info in all_env_infos])\n first_ks = np.array([info[k][0] for info in all_env_infos])\n all_ks = np.concatenate([info[k] for info in all_env_infos])\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n final_ks,\n stat_prefix='{}/final/'.format(info_key),\n ))\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n first_ks,\n stat_prefix='{}/initial/'.format(info_key),\n ))\n statistics.update(create_stats_ordered_dict(\n stat_prefix + k,\n all_ks,\n stat_prefix='{}/'.format(info_key),\n ))\n\n return statistics\n\n\ndef get_average_returns(paths):\n returns = [sum(path[\"rewards\"]) for path in paths]\n return np.mean(returns)\n\n\ndef create_stats_ordered_dict(\n name,\n data,\n stat_prefix=None,\n always_show_all_stats=True,\n exclude_max_min=False,\n):\n if stat_prefix is not None:\n name = \"{}{}\".format(stat_prefix, name)\n if isinstance(data, Number):\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\n \"{0}_{1}\".format(name, number),\n d,\n )\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if (isinstance(data, np.ndarray) and data.size == 1\n and not always_show_all_stats):\n return OrderedDict({name: float(data)})\n\n stats = OrderedDict([\n (name + ' Mean', np.mean(data)),\n (name + ' Std', np.std(data)),\n ])\n if not exclude_max_min:\n stats[name + ' Max'] = np.max(data)\n stats[name + ' Min'] = np.min(data)\n return stats\n"
] | [
[
"numpy.max",
"numpy.concatenate",
"numpy.array",
"numpy.min",
"numpy.mean",
"numpy.std",
"numpy.hstack",
"numpy.vstack"
]
] |
qipanyang/DQN-tensorflow | [
"6514dfdb01f9b3dbfd53029f19aa35d5188fba2f"
] | [
"main.py"
] | [
"from __future__ import print_function\nimport random\nimport tensorflow as tf\n\nfrom dqn.agent import Agent\nfrom dqn.environment import GymEnvironment, SimpleGymEnvironment\nfrom config import get_config\n\nflags = tf.app.flags\n\n# Model\nflags.DEFINE_string('model', 'm1', 'Type of model')\nflags.DEFINE_boolean('dueling', False, 'Whether to use dueling deep q-network')\nflags.DEFINE_boolean('double_q', False, 'Whether to use double q-learning')\n\n# Environment\nflags.DEFINE_string('env_name', 'Breakout-v0', 'The name of gym environment to use')\nflags.DEFINE_integer('action_repeat', 4, 'The number of action to be repeated')\n\n# Etc\nflags.DEFINE_boolean('use_gpu', True, 'Whether to use gpu or not')\nflags.DEFINE_string('gpu_fraction', '1/1', 'idx / # of gpu fraction e.g. 1/3, 2/3, 3/3')\nflags.DEFINE_boolean('display', False, 'Whether to do display the game screen or not')\nflags.DEFINE_boolean('is_train', True, 'Whether to do training or testing')\nflags.DEFINE_integer('random_seed', 123, 'Value of random seed')\nflags.DEFINE_boolean('poison', False, 'Whether retrain with poisoned training set')\n\nFLAGS = flags.FLAGS\n\n# Set random seed\ntf.set_random_seed(FLAGS.random_seed)\nrandom.seed(FLAGS.random_seed)\n\nif FLAGS.gpu_fraction == '':\n raise ValueError(\"--gpu_fraction should be defined\")\nelse:\n print(FLAGS.gpu_fraction)\n\ndef calc_gpu_fraction(fraction_string):\n idx, num = fraction_string.split('/')\n idx, num = float(idx), float(num)\n\n fraction = 1 / (num - idx + 1)\n print(\" [*] GPU : %.4f\" % fraction)\n return fraction\n\ndef main(_):\n if FLAGS.gpu_fraction == \"1/1\":\n FLAGS.gpu_fraction = \"0.999/1.0\"\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction))\n\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n #Set ratio of usage for GPU or tensorflow would report error\n\n #config = tf.ConfigProto()\n #config.gpu_options.allow_growth = True\n #with tf.Session(config=config) as sess:\n\n config = get_config(FLAGS) or FLAGS\n\n if config.env_type == 'simple':\n env = SimpleGymEnvironment(config)\n else:\n env = GymEnvironment(config)\n\n if FLAGS.poison:\n config.poison_line = input(\"input the number of poison line:\")\n\n\n\n\n if not tf.test.is_gpu_available() and FLAGS.use_gpu:\n raise Exception(\"use_gpu flag is true when no GPUs are available\")\n\n if not FLAGS.use_gpu:\n config.cnn_format = 'NHWC'\n\n agent = Agent(config, env, sess)\n\n if FLAGS.is_train:\n if FLAGS.poison:\n \tagent.train_poison()\n else:\n \tagent.train()\n else:\n if FLAGS.poison:\n \tagent.play_poison()\n else:\n \tagent.play()\n \n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.set_random_seed",
"tensorflow.ConfigProto",
"tensorflow.app.run",
"tensorflow.test.is_gpu_available"
]
] |
bor9/estudiando_el_kay | [
"6e07908b8b0b5a5166dadce30001e6100e8304c3"
] | [
"figuras/problem_8_28_code.py"
] | [
"import numpy as np\nfrom scipy import signal, linalg, optimize\n\n# hd es la señal deseada dada, con hd.shape = (N, )\n# el filtro a diseñar tiene función de transferencia con q coeficientes\n# en el numerador y p coeficientes en el denominador, con p = q + 1.\n\n# función no lineal en a a optimizar: hd^T @ A @ (A^T @ A)^{-1} @ A^T @ hd\ndef ja(a):\n c = np.concatenate(([a[0]], np.zeros((N - (p + 1), ))))\n r = np.concatenate((a, [1], np.zeros((N - (p + 1), ))))\n AT = linalg.toeplitz(c, r)\n return hd @ AT.T @ linalg.inv(AT @ AT.T) @ AT @ hd\n\n## cálculo de a empleando scipy.optimize.minimize\n# a0: valor inicial para el algoritmo de optimización\na0 = np.zeros((p, ))\na0[0] = 0.2\nres = optimize.minimize(ja, a0)\na = res.x\na = np.concatenate(([1], a[::-1]))\n\n## cálculo de b\n# cálculo de la matriz G empleando el valor de a obtenido en la optimización\ndelta = np.zeros((N, ))\ndelta[0] = 1\ng = signal.lfilter([1], a, delta)\nG = linalg.toeplitz(g, np.concatenate(([g[0]], np.zeros((p - 1, )))))\nb = linalg.inv(G.T @ G) @ G.T @ hd\n"
] | [
[
"numpy.concatenate",
"scipy.linalg.toeplitz",
"numpy.zeros",
"scipy.signal.lfilter",
"scipy.linalg.inv",
"scipy.optimize.minimize"
]
] |
sekunder/ergm | [
"b60bc2b1cb64d2969bcab2dbe75511eb732a113d"
] | [
"util.py"
] | [
"\"\"\"\nutility functions for ergm\n\"\"\"\nimport numpy as np\nimport datetime\nimport sys\nimport networkx as nx\nfrom itertools import combinations\n\n\n# from scipy import sparse\n\ndef log_msg(*args, out=sys.stdout, **kwargs):\n \"\"\"Print message m with a timestamp if out is not None.\"\"\"\n if out:\n print(datetime.datetime.now().strftime(\"%Y %m %d %H:%M:%S \"), *args, **kwargs, file=out)\n\n\ndef index_to_edge(idx, n, directed=True, order=\"columns\"):\n \"\"\"\n Returns the ordered pair `(e0,e1)` for the edge which has linear index `idx`. This is essentially the linear\n index of an entry in a matrix, except shifts are included so the diagonal entries don't get indexed.\n\n :param idx: an integer between 0 and n*(n-1) (inclusive) for directed graphs, or 0 and n*(n-1)/2 for undirected.\n :param n: the number of nodes in the graph\n :param directed: whether to find the index for a directed (all off-diagonal entries used) or undirected\n (upper triangle only). Default: true\n :param order: Whether matrix entries are indexed in column order or row order. Default columns, so 0 maps to (1,0),\n and so on down the 0th column before moving to the 1th column. Options are \"columns\" (default)\n or \"rows\".\n :return: tuple of integers, the indices in the adjacency matrix.\n \"\"\"\n\n if directed:\n e1 = idx // (n - 1)\n e0 = idx % (n - 1) + (idx % (n - 1) >= e1)\n else:\n e1 = np.ceil(triangular_root(idx + 1)).astype(int)\n e0 = idx - (e1 - 1) * e1 // 2\n\n if order == \"columns\":\n return np.array([e0, e1])\n else:\n return np.array([e1, e0])\n\n\ndef triangular_root(x):\n \"\"\"Returns the triangular root of x. If this returns an integer, x is a triangular number; otherwise, it lies between two triangular numbers.\n\n See https://en.wikipedia.org/wiki/Triangular_number\"\"\"\n return (np.sqrt(8 * x + 1) - 1) / 2\n\n\ndef triangular_number(n):\n \"\"\"Returns the `n`th triangular number `n * (n + 1) // 2`\"\"\"\n return n * (n + 1) // 2\n\n\ndef ellipse(center, v1, v2, resolution=10):\n \"\"\"\n Returns two arrays, `x`, `y`, such that `plt.plot(x,y)` will be an ellipse with specified center and axes.\n The `resolution` parameter adjusts the number of points around the ellipse\n :param center: coordinates of center\n :param v1: 1st axis\n :param v2: 2nd axis\n :param resolution: number of samples points around ellipse\n :return: Two arrays\n \"\"\"\n ls = np.linspace(0, 2 * np.pi, num=resolution)\n x = center[0] + np.cos(ls) * v1[0] + np.sin(ls) * v2[0]\n y = center[1] + np.cos(ls) * v1[1] + np.sin(ls) * v2[1]\n return x, y\n\n\ndef networkx_graph_to_sparse_array(g):\n \"\"\"\n Convert networkx graph g into a binary matrix with 0s on the diagonal. Entry `i,j` indicates whether there is an\n edge from node `i` to node `j`. Indices are the order in which nodes are returned from `g.nodes`.\n\n This will generate a warning as it involves manually setting the diagonal to 0, which changes the sparsity\n structure of the matrix.\n\n :param g: `networkx` graph\n :return: a CSR sparse matrix\n \"\"\"\n sg = nx.convert_matrix.to_scipy_sparse_matrix(g, dtype=int, weight=None)\n sg.data = np.ones_like(sg.data)\n sg.setdiag(sg.diagonal() * 0)\n return sg\n\n\ndef flatten(t):\n return [item for sublist in t for item in sublist]\n\n\ndef directed_triplet_motif_index(G):\n \"\"\"Return the directed motif index of three-node graph G (G is a nx graph type)\n\n The motif index is then computed as follows:\n Each possible (undirected) edge on the nodes of G is sorted in lexicographic order.\n For each pair of vertices, two bits encode, in order, the presence of the edge from\n lower index to higher index and the edge from higher index to lower index. These bits\n are reversed and concatenated to form a single integer\n\n Example: G has three nodes, labeled i,j,k in sorted order. It has edge ij, ik, ki, and kj.\n The lex order for the pairs is ij, ik, jk. Pair ij has edge ij (low-high) but not ji (high-low),\n so the least significant bit is 1 and the second-least significant bit is 0. For pair ik, we have both\n directed edges so those bits are 11. Lastly, pair jk has only the high-low edge, so the higher-order\n bit is 1 while the lower bit is 0. Putting these bits together from right to left we get 101101,\n which is 45 in decimal.\n\n !!! Note that the order of vertice s in G has nothing to do with numerical order!\n See networkx documentation about classes OrderedGraph and OrderedDiGraph.\n\n Returns an integer between 0 and 63 (inclusive)\n \"\"\"\n bit_selector = np.array([[0, 1, 4], [2, 0, 16], [8, 32, 0]])\n return np.sum(np.multiply(bit_selector, nx.to_numpy_matrix(G).astype(int)))\n\n\ndef directed_triplet_motif_index_from_matrix(M):\n \"\"\"Same as directed_triplet_motif_index but accepts a numpy matrix as its argument\"\"\"\n bit_selector = np.array([[0, 1, 4], [2, 0, 16], [8, 32, 0]])\n return np.sum(np.multiply(bit_selector, M))\n\n\ndef binary_digits(n, d): # numpy-optimized\n \"\"\"Returns an n x d array of the binary digits of each entry of array n\n Parameters:\n n : array_like\n Integer values to be represented as binary digits\n d : the number of digits; zero padding and/or truncation if necessary\n Returns:\n digits : an n x d binary array; each row is the digits of the corresponding entry of n. Least significant bit has index 0.\n \"\"\"\n return ((n[:, None] & (1 << np.arange(d))) > 0).astype(int)\n\n\ndef index_to_directed_triplet_motif_matrix(n):\n \"\"\"Return the adjacency matrix corresponding to motif with index n, as defined by the function\n directed_triplet_motif_index\"\"\"\n digs = binary_digits(np.array([n]), 6)\n A = np.zeros((3, 3), dtype=int)\n A[tuple([[0, 1, 0, 2, 1, 2], [1, 0, 2, 0, 2, 1]])] = digs\n return A\n\n\ndef subsets(itr):\n \"\"\"\n Iterator over subsets of `itr`, including both empty set and all of `itr`.\n \"\"\"\n for r in range(len(itr) + 1):\n for c in combinations(itr, r):\n yield c\n\n\ndef bin_subsets(vec):\n \"\"\"Iterate over the subsets of the given binary tuple (i.e. over all vectors with support contained in the support of vec)\"\"\"\n for s in subsets(np.where(vec)[0]):\n v = [0] * len(vec)\n for idx in s:\n v[idx] = 1\n yield tuple(v)\n\n\ndef basis_vector(i, n=3, dtype=int):\n v = np.zeros(n, dtype=dtype)\n v[i] = 1\n return v\n\n\ndef last_sorted_index(itr, n):\n \"\"\"The greatest index `i` such that `itr[i] < n` is true\"\"\"\n for i, v in enumerate(itr):\n if v > n:\n return i - 1\n return len(itr)\n\n\ndef random_partition(n, p):\n \"\"\"Returns an array of the form [0, k_1, k_2, ..., k_p] with 0 <= k_1 <= k_2 <= ... <= k_p = n.\n This corresponds to a partition of n things into p parts.\"\"\"\n parts = np.random.choice(range(n + p - 1), size=p - 1, replace=False)\n parts.sort()\n full_parts = np.hstack([[-1], parts, [n + p - 1]])\n between_bars = np.diff(full_parts)\n partition = np.cumsum(np.hstack([[0], between_bars - np.ones_like(between_bars)]))\n return partition\n"
] | [
[
"numpy.array",
"numpy.ones_like",
"numpy.sin",
"numpy.zeros",
"numpy.diff",
"numpy.multiply",
"numpy.where",
"numpy.arange",
"numpy.sqrt",
"numpy.cos",
"numpy.hstack",
"numpy.linspace"
]
] |
kosho2013/pixelfly-master | [
"345db4fb9a4c5f36a85ff4a65434762545cca23c"
] | [
"src/models/modules/attention/blocksparse_matmul.py"
] | [
"# This is a copy of https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/matmul.py\r\n# with a one-line fix the bug https://github.com/openai/triton/issues/266\r\nimport triton\r\nimport triton.language as tl\r\nimport triton._C.libtriton as libtriton\r\nimport torch\r\n\r\n\r\[email protected]\r\ndef _kernel(\r\n A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc, stride_hc,\r\n stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta\r\n):\r\n TM = meta['TM']\r\n TN = meta['TN']\r\n TK = meta['TK']\r\n TZ = meta['TZ']\r\n BLOCK = meta['BLOCK']\r\n #------------#\r\n #- Prologue -#\r\n #------------#\r\n pid0 = tl.program_id(0)\r\n pid1 = tl.program_id(1)\r\n pidz = tl.program_id(2)\r\n if meta['SDD']:\r\n pid1 = pid1 + SDD_off_width\r\n blockidm = tl.arange(0, TM) // BLOCK\r\n blockidn = tl.arange(0, TN) // BLOCK\r\n offlutm = blockidm * (TN // BLOCK) * 4\r\n offlutn = blockidn * 4\r\n header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4\r\n z = tl.load(header + 0)\r\n i = tl.load(header + 1 + offlutm)\r\n j = tl.load(header + 2 + offlutn)\r\n AS1 = SDD_K // TZ\r\n lockid = tl.where(TZ > 1, 1, 0)\r\n offka = pid0 * AS1\r\n offkb = pid0 * AS1\r\n offmc = 0\r\n offnc = 0\r\n offpa = 0\r\n offpb = 0\r\n maxid = TZ\r\n offhc = 0\r\n offha = z\r\n offhb = z\r\n ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)\r\n rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)\r\n else:\r\n header = lut + pid0 * 6\r\n offset = tl.load(header + 0)\r\n AS1 = tl.load(header + 1)\r\n column = tl.load(header + 2)\r\n depth = tl.load(header + 3)\r\n lockid = tl.load(header + 4)\r\n maxid = tl.load(header + 5)\r\n pinc = lut + offset\r\n offhc = depth\r\n if meta['DSD']:\r\n # output offset\r\n offnc = pid1 * TN\r\n offmc = column * TM\r\n offpc = 0\r\n # dense input offset\r\n offnb = pid1 * TN\r\n offkb = tl.load(pinc)\r\n offkb = tl.multiple_of(offkb, 8) # compiler hint\r\n offpb = 0\r\n # sparse input offset\r\n offma = 0\r\n offka = 0\r\n offpa = tl.load(pinc + 1)\r\n offpa = tl.multiple_of(offpa, 8) # compiler hint\r\n offpa = offpa * BLOCK * BLOCK\r\n offha = 0\r\n offhb = depth\r\n else:\r\n # output offset\r\n offmc = pid1 * TM\r\n offnc = column * TN\r\n offpc = 0\r\n # dense input offset\r\n offma = pid1 * TM\r\n offka = tl.load(pinc)\r\n offka = tl.multiple_of(offka, 8) # compiler hint\r\n offpa = 0\r\n # sparse input offset\r\n offnb = 0\r\n offkb = 0\r\n offpb = tl.load(pinc + 1)\r\n offpb = tl.multiple_of(offpb, 8) # compiler hint\r\n offpb = offpb * BLOCK * BLOCK\r\n offha = depth\r\n offhb = 0\r\n ram = offma + tl.arange(0, TM)\r\n rbn = offnb + tl.arange(0, TN)\r\n\r\n # initialize a, b pointers\r\n rka = offka + tl.arange(0, TK)\r\n rkb = offkb + tl.arange(0, TK)\r\n pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka\r\n pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb\r\n if meta['DDS']:\r\n checkam = ram[:, None] < DS0\r\n else:\r\n checkam = AS1 > 0\r\n if meta['DSD']:\r\n checkbn = rbn[None, :] < DS0\r\n else:\r\n checkbn = AS1 > 0\r\n a = tl.load(pa, mask=checkam, other=0.)\r\n b = tl.load(pb, mask=checkbn, other=0.)\r\n\r\n ## ---------------- ##\r\n ## Inner Loop ##\r\n ## ---------------- ##\r\n acc = tl.zeros((TM, TN), dtype=tl.float32)\r\n for k in range(AS1, 0, -TK):\r\n acc += tl.dot(a, b)\r\n if meta['SDD']:\r\n inc_a = TK * stride_ka\r\n inc_b = TK * stride_kb\r\n else:\r\n pinc += 2\r\n if meta['DSD']:\r\n inc_b = tl.load(pinc)\r\n inc_a = tl.load(pinc + 1)\r\n inc_b = tl.multiple_of(inc_b, 8)\r\n inc_a = tl.multiple_of(inc_a, 8)\r\n inc_b = inc_b * stride_kb\r\n if meta['DDS']:\r\n inc_a = tl.load(pinc)\r\n inc_b = tl.load(pinc + 1)\r\n inc_a = tl.multiple_of(inc_a, 8)\r\n inc_b = tl.multiple_of(inc_b, 8)\r\n inc_a = inc_a * stride_ka\r\n pa += inc_a\r\n pb += inc_b\r\n # pre-fetch\r\n checkak = k > TK\r\n checkbk = k > TK\r\n checka = checkam & checkak\r\n checkb = checkbn & checkbk\r\n a = tl.load(pa, mask=checka)\r\n b = tl.load(pb, mask=checkb)\r\n c = acc.to(C.dtype.element_ty)\r\n\r\n if meta['SDD']:\r\n checkc = True\r\n rr_blockidm = tl.arange(0, TM) // BLOCK\r\n rr_blockidn = tl.arange(0, TN) // BLOCK\r\n rr_offlutm = rr_blockidm * (TN // BLOCK) * 4\r\n rr_offlutn = rr_blockidn * 4\r\n off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]\r\n bkid = tl.load(header + off_bkid)\r\n offpc = bkid * BLOCK * BLOCK\r\n rcm = tl.arange(0, TM) % BLOCK\r\n rcn = tl.arange(0, TN) % BLOCK\r\n else:\r\n rcm = offmc + tl.arange(0, TM)\r\n rcn = offnc + tl.arange(0, TN)\r\n if meta['DSD']:\r\n checkc = rcn[None, :] < DS0\r\n if meta['DDS']:\r\n checkc = rcm[:, None] < DS0\r\n\r\n pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc\r\n # write-back directly\r\n if lockid == 0:\r\n tl.store(pc, c, mask=checkc)\r\n # accumulate partial results using spin-locks\r\n else:\r\n plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1\r\n pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks\r\n while tl.atomic_cas(plock, 0, 1) == 1:\r\n pass\r\n count = tl.load(pcount)\r\n if count == 0:\r\n tl.store(pc, c, mask=checkc)\r\n else:\r\n d = tl.load(pc, mask=checkc)\r\n tl.store(pc, d + c, mask=checkc)\r\n tl.atomic_xchg(pcount, (count + 1) % maxid)\r\n tl.atomic_xchg(plock, 0)\r\n\r\n\r\n##############\r\n# MAIN API #\r\n##############\r\nclass _matmul(torch.autograd.Function):\r\n\r\n sdd_cache = dict()\r\n dsd_cache = dict()\r\n dds_cache = dict()\r\n locks = dict()\r\n\r\n # Given an array sizes representing reduction size for each\r\n # column of a block-mode matrix multiplication,\r\n # performs load-balancing to achieve more smaller reductions\r\n # between `seg_size` elements\r\n @staticmethod\r\n def load_balance(sizes):\r\n # segment size\r\n # heuristics taken from OpenAI blocksparse code\r\n # https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95\r\n max_size = sizes.max()\r\n min_size = sizes[sizes != 0].min()\r\n #if max_size > min_size * 2.0:\r\n # seg_max = max(triton.cdiv(max_size, 4), min_size*2)\r\n #else:\r\n # seg_max = max_size\r\n seg_max = max_size\r\n seg_min = max(triton.cdiv(seg_max, 4), 4)\r\n # split reduction into segments\r\n div = sizes // seg_max\r\n rem = sizes % seg_max\r\n packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()\r\n width = packs.sum()\r\n segments = torch.empty(width, dtype=sizes.dtype)\r\n column = torch.empty_like(segments)\r\n lockid = torch.zeros_like(segments)\r\n maxid = torch.zeros_like(segments)\r\n nlocks = 0\r\n current = 0\r\n col_idx = 0\r\n for i in range(len(sizes)):\r\n d, r = div[i], rem[i]\r\n isempty = sizes[i] < seg_min\r\n last = current + d + (r >= seg_min) + isempty\r\n # column id\r\n column[current:last] = col_idx\r\n # lock id\r\n if d > 1 or (d == 1 and r >= seg_min):\r\n nlocks += 1\r\n lockid[current:last] = nlocks\r\n maxid[current:last] = last - current\r\n # segment size\r\n segments[current:current + d] = seg_max\r\n if r < seg_min and not isempty:\r\n segments[current + d - 1] += r\r\n if r >= seg_min or isempty:\r\n segments[current + d] = r\r\n current = last\r\n col_idx += 1\r\n offsets = torch.zeros_like(segments)\r\n offsets[1:] = torch.cumsum(segments[:-1], dim=0)\r\n return segments, column, lockid, maxid, offsets\r\n\r\n @staticmethod\r\n def get_locks(size, dev):\r\n if dev not in _matmul.locks or \\\r\n size > _matmul.locks[dev].size(0):\r\n _matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)\r\n return _matmul.locks[dev]\r\n\r\n ##########################\r\n # SPARSE = DENSE x DENSE #\r\n ##########################\r\n\r\n @staticmethod\r\n def make_sdd_lut(layout, block, device):\r\n # start_width = 128 // block\r\n # [2021-09-23] TD: This seems to produce the wrong shape for certain cases\r\n start_width = 1\r\n layout = layout.type(torch.int32)\r\n superblocks = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2], start_width)\r\n luts, widths, packs = [], [], []\r\n for size, nnz in superblocks:\r\n nnz = nnz.reshape(-1, 4)\r\n width = nnz.shape[0] // (size * size)\r\n luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))\r\n widths.append(width)\r\n packs.append(size)\r\n\r\n # create locks\r\n return luts, None, widths, packs\r\n\r\n @staticmethod\r\n def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs):\r\n # (A * B)^T = (B^T * A^T)\r\n if trans_c:\r\n a, b = b, a\r\n trans_a, trans_b = not trans_b, not trans_a\r\n\r\n # Shape check\r\n a_dim = -2 if trans_a else -1\r\n b_dim = -1 if trans_b else -2\r\n a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]\r\n if a_inner != b_inner:\r\n raise ValueError(f\"Size of tensor A along the {_dim_to_name(a_dim)} dim ({a_inner}) must match size \"\r\n f\"of tensor B along the {_dim_to_name(b_dim)} dim ({b_inner})\")\r\n if a_inner % 16 != 0:\r\n raise ValueError('Reduction size for SDD must be a multiple of 16')\r\n\r\n batch_size = a.size(0)\r\n a_outer = a.size(3 if trans_a else 2)\r\n dtype = a.dtype\r\n device = a.device\r\n\r\n # create kernel\r\n total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])\r\n c = torch.zeros((batch_size, total_width, block, block), dtype=dtype, device=device)\r\n for lut, width, pack in zip(luts, widths, packs):\r\n num_lock = 1\r\n # [2021-09-06] TD: This line is the fix for the bug where the result is wrong if\r\n # block == 16 and the inner dimension is an odd multiple of 16.\r\n # https://github.com/openai/triton/issues/266\r\n TK = 16 if block == 16 and (a_inner // 16) % 2 == 1 else 32\r\n meta = {'TM': block * pack, 'TN': block * pack, 'BLOCK': block, 'TK': TK, 'TZ': 1,\r\n 'SDD': True, 'DSD': False, 'DDS': False}\r\n # create output\r\n locks = _matmul.get_locks(2 * width * batch_size * num_lock, a.device)\r\n # maximum grid size is 65535\r\n # so operation might be decomposed into multiple\r\n # kernel calls\r\n max_width = 49152\r\n for off_width in range(0, width, max_width):\r\n grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]\r\n _kernel[grid](\r\n a,\r\n b,\r\n c,\r\n a.stride(0),\r\n a.stride(1),\r\n a.stride(3 if trans_a else 2),\r\n a.stride(2 if trans_a else 3),\r\n b.stride(0),\r\n b.stride(1),\r\n b.stride(3 if trans_b else 2),\r\n b.stride(2 if trans_b else 3),\r\n c.stride(0),\r\n c.stride(0),\r\n c.stride(2),\r\n c.stride(3),\r\n a_outer,\r\n a_outer,\r\n a_inner,\r\n off_width,\r\n lut,\r\n locks,\r\n num_lock,\r\n num_warps=4,\r\n **meta\r\n )\r\n # save for backward pass\r\n return c\r\n\r\n ##########################\r\n # DENSE = DENSE x SPARSE #\r\n # DENSE = SPARSE x DENSE #\r\n ##########################\r\n\r\n # Given a binary layout of 0s and 1s,\r\n # Construct look-up table for efficient execution on GPUs\r\n @staticmethod\r\n def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):\r\n # load-balancing\r\n _empty = torch.tensor([], dtype=torch.int64, device=layout.device)\r\n segments = _empty.clone()\r\n column = _empty.clone()\r\n depth = _empty.clone()\r\n lockid = _empty.clone()\r\n maxid = _empty.clone()\r\n offsets = _empty.clone()\r\n current_offset = 0\r\n current_maxid = 0\r\n for z in range(layout.size(0)):\r\n if trans:\r\n sizes = torch.sum(layout[z, :, :], 1)\r\n else:\r\n sizes = torch.sum(layout[z, :, :], 0)\r\n z_segments, z_column, z_lockid, z_maxid, z_offsets = _matmul.load_balance(sizes)\r\n z_depth = z * torch.ones_like(z_segments)\r\n z_lockid[z_lockid > 0] += current_maxid\r\n current_maxid = z_lockid.max()\r\n # concatenate depth\r\n segments = torch.cat((segments, z_segments))\r\n column = torch.cat((column, z_column))\r\n depth = torch.cat((depth, z_depth))\r\n maxid = torch.cat((maxid, z_maxid))\r\n offsets = torch.cat((offsets, current_offset + z_offsets))\r\n lockid = torch.cat((lockid, z_lockid))\r\n current_offset += layout[z, :, :].sum()\r\n segments *= step\r\n # pointer increments\r\n if trans:\r\n nnz = layout.nonzero(as_tuple=False)\r\n else:\r\n nnz = layout.transpose(1, 2).nonzero(as_tuple=False)\r\n num_blocks = nnz.size(0)\r\n offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))\r\n idx = transform(nnz[:, 2] * block)\r\n xincs = idx.clone()\r\n xincs[1:] -= idx[:-1]\r\n # divide block into multiple steps\r\n div = block // step\r\n xincs = xincs.view(-1, 1).repeat(1, div)\r\n xincs[:, 1:] = step\r\n xincs[:, 0] -= (div - 1) * step\r\n # first increment for each reduction is actually the offset\r\n xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]\r\n xincs = xincs.view(-1)\r\n # block-mode input increments\r\n if trans:\r\n widx = torch.arange(num_blocks)\r\n else:\r\n widx = _empty.clone()\r\n current_offset = 0\r\n for z in range(layout.size(0)):\r\n layoutw = layout[z, :, :].clone()\r\n msum = layoutw.sum()\r\n layoutw[layoutw > 0] = 1 + torch.arange(msum)\r\n widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))\r\n current_offset += msum\r\n widx = widx\r\n wincs = widx * block * block\r\n wincs[1:] -= widx[:-1] * block * block\r\n wincs = wincs.view(-1, 1).repeat(1, div)\r\n if trans:\r\n wincs[:, 1:] = step\r\n wincs[:, 0] -= (div - 1) * step\r\n else:\r\n wincs[:, 1:] = step * block\r\n wincs[:, 0] -= (div - 1) * step * block\r\n wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]\r\n wincs = wincs.view(-1)\r\n # adjust offset and segment size\r\n offsets *= 2 * div\r\n segments *= div\r\n # create header\r\n width = column.size(0)\r\n offsets += 6 * width\r\n header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()\r\n incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()\r\n incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))\r\n # create lut\r\n lut = torch.cat((header, incs))\r\n lut = lut.type(torch.int32).to(device)\r\n # create locks\r\n num_locks = max(1, lockid.max())\r\n return lut, num_locks, width, None\r\n\r\n @staticmethod\r\n def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):\r\n # shapes / dtypes\r\n AS0 = a.size(0)\r\n AS1 = a.size(1)\r\n AS2 = a.size(3 if trans_a else 2)\r\n BS2 = block * spdims[1 if trans_b else 2]\r\n dtype = a.dtype\r\n # kernel\r\n meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,\r\n 'SDD': False, 'DSD': False, 'DDS': True}\r\n # output\r\n CS0 = AS0\r\n CS1 = AS1\r\n CS2 = BS2 if trans_c else AS2\r\n CS3 = AS2 if trans_c else BS2\r\n locks = _matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)\r\n c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)\r\n grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]\r\n _kernel[grid](\r\n a,\r\n b,\r\n c,\r\n a.stride(0),\r\n a.stride(1),\r\n a.stride(3 if trans_a else 2),\r\n a.stride(2 if trans_a else 3),\r\n b.stride(0),\r\n b.stride(1),\r\n b.stride(3 if trans_b else 2),\r\n b.stride(2 if trans_b else 3),\r\n c.stride(0),\r\n c.stride(1),\r\n c.stride(3 if trans_c else 2),\r\n c.stride(2 if trans_c else 3),\r\n AS2,\r\n BS2,\r\n 0,\r\n 0,\r\n lut,\r\n locks,\r\n num_locks,\r\n num_warps=4,\r\n **meta\r\n )\r\n return c\r\n\r\n @staticmethod\r\n def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):\r\n # shapes / dtypes\r\n AS1 = block * spdims[2 if trans_a else 1]\r\n BS0 = b.size(0)\r\n BS1 = b.size(1)\r\n BS3 = b.size(2 if trans_b else 3)\r\n dtype = a.dtype\r\n # kernel\r\n meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,\r\n 'SDD': False, 'DSD': True, 'DDS': False}\r\n # output\r\n CS0 = BS0\r\n CS1 = BS1\r\n CS2 = BS3 if trans_c else AS1\r\n CS3 = AS1 if trans_c else BS3\r\n locks = _matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)\r\n c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)\r\n grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]\r\n _kernel[grid](\r\n a,\r\n b,\r\n c,\r\n a.stride(0),\r\n a.stride(1),\r\n a.stride(3 if trans_a else 2),\r\n a.stride(2 if trans_a else 3),\r\n b.stride(0),\r\n b.stride(1),\r\n b.stride(3 if trans_b else 2),\r\n b.stride(2 if trans_b else 3),\r\n c.stride(0),\r\n c.stride(1),\r\n c.stride(3 if trans_c else 2),\r\n c.stride(2 if trans_c else 3),\r\n BS3,\r\n AS1,\r\n 0,\r\n 0,\r\n lut,\r\n locks,\r\n num_locks,\r\n num_warps=4,\r\n **meta\r\n )\r\n return c\r\n\r\n fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}\r\n\r\n @staticmethod\r\n def forward(\r\n ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs, da_lut, da_num_locks,\r\n da_width, da_packs, db_lut, db_num_locks, db_width, db_packs\r\n ):\r\n c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width, c_packs)\r\n # save for backward\r\n ctx.save_for_backward(a, b)\r\n ctx.da_num_locks = da_num_locks\r\n ctx.da_lut = da_lut\r\n ctx.da_width = da_width\r\n ctx.da_packs = da_packs\r\n ctx.db_lut = db_lut\r\n ctx.db_num_locks = db_num_locks\r\n ctx.db_width = db_width\r\n ctx.db_packs = db_packs\r\n ctx.mode = mode\r\n ctx.spdims = spdims\r\n ctx.block = block\r\n ctx.trans_a = trans_a\r\n ctx.trans_b = trans_b\r\n return c\r\n\r\n @staticmethod\r\n def backward(ctx, dc):\r\n # saved for backward\r\n a, b = ctx.saved_tensors\r\n da, db = None, None\r\n mode = ctx.mode\r\n\r\n # gradients w.r.t. a\r\n if ctx.needs_input_grad[0]:\r\n mode_da = mode[1] + mode[0] + mode[2]\r\n da = _matmul.fn[mode_da](\r\n dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_num_locks, ctx.da_width,\r\n ctx.da_packs\r\n )\r\n # gradients w.r.t. b\r\n if ctx.needs_input_grad[1]:\r\n mode_db = mode[2] + mode[1] + mode[0]\r\n db = _matmul.fn[mode_db](\r\n a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_num_locks, ctx.db_width,\r\n ctx.db_packs\r\n )\r\n return da, db, None, None, None,\\\r\n None, None, None, None,\\\r\n None, None, None, None, None, None,\\\r\n None, None, None, None, None, None,\\\r\n None, None, None, None, None, None\r\n\r\n\r\nclass matmul:\r\n def make_lut(self, dtype, device):\r\n key = (dtype, device)\r\n if key in self.lut_cache:\r\n return self.lut_cache[key]\r\n # C look-up table\r\n layout, block = self.layout, self.block\r\n step = 16\r\n if self.mode == 'sdd':\r\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_sdd_lut(layout, block, device)\r\n elif self.mode == 'dsd':\r\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)\r\n elif self.mode == 'dds':\r\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_b, device)\r\n # DA look-up table\r\n if self.mode == 'sdd':\r\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, True, device)\r\n elif self.mode == 'dsd':\r\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_sdd_lut(layout, block, device)\r\n elif self.mode == 'dds':\r\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)\r\n # DB look-up table\r\n if self.mode == 'sdd':\r\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, False, device)\r\n elif self.mode == 'dsd':\r\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_a, device)\r\n elif self.mode == 'dds':\r\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_sdd_lut(layout, block, device)\r\n self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\r\n da_lut, da_num_locks, da_width, da_packs,\r\n db_lut, db_num_locks, db_width, db_packs)\r\n return self.lut_cache[key]\r\n\r\n def __init__(self, layout, block, mode, trans_a=False, trans_b=False):\r\n if mode not in ['sdd', 'dsd', 'dds']:\r\n raise NotImplementedError('Supported modes are: sdd, dsd, dds')\r\n # look-up table cache\r\n self.lut_cache = dict()\r\n # attributes\r\n self.block = block\r\n self.mode = mode\r\n self.trans_a = trans_a\r\n self.trans_b = trans_b\r\n\r\n layout_dim = layout.ndim\r\n assert layout_dim in (2, 3), \"Layout should be a 2 or 3 dimensional tensor of 0s and 1s\"\r\n\r\n if not mode == 'sdd':\r\n # Dims to be reduced on the 'inside' of the matmul, either -1 or -2\r\n trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, -2)\r\n self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner\r\n sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)\r\n\r\n # Inner dim of the dense input should be equal to the inner dim of the sparse input\r\n self.dense_inner_size = layout.shape[sparse_inner] * block\r\n # Expected shape for sparse inputs\r\n self.sparse_shape = (layout.sum().item(), block, block)\r\n\r\n # Support using the same layout across attention heads etc.\r\n if layout_dim == 2:\r\n layout = layout.unsqueeze(0)\r\n\r\n layout = layout.long() # Above code assumes the layout tensor is an integral type\r\n self.layout = layout\r\n self.spdims = layout.shape\r\n\r\n def __call__(self, a, b):\r\n c_lut, c_num_locks, c_width, c_packs,\\\r\n da_lut, da_num_locks, da_width, da_packs,\\\r\n db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)\r\n\r\n # If we don't check for invalid shapes, devices, & dtypes here, they will lead to undefined behavior\r\n # and potential illegal memory accesses\r\n original_dims = max(a.ndim, b.ndim)\r\n a, b = self._validate_inputs(a, b)\r\n\r\n # execute\r\n c = _matmul.apply(\r\n a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut, c_num_locks, c_width,\r\n c_packs, da_lut, da_num_locks, da_width, da_packs, db_lut, db_num_locks, db_width, db_packs\r\n )\r\n # This removes any leading singleton dimensions we may have added to the tensor that weren't in the input\r\n dims_to_trim = c.ndim - original_dims\r\n for _ in range(dims_to_trim):\r\n c = c.squeeze(0)\r\n\r\n return c\r\n\r\n def _validate_inputs(self, a, b):\r\n if a.device != b.device:\r\n raise ValueError(f\"Inputs must be on the same device; got {a.device} for tensor A \"\r\n f\"and {b.device} for tensor B\")\r\n if not a.is_cuda:\r\n raise ValueError(\"Only GPU devices are supported for now\")\r\n\r\n # When autocast is enabled, torch.matmul autocasts to float16, so we do the same here\r\n if torch.is_autocast_enabled():\r\n a, b = a.half(), b.half()\r\n elif a.dtype != b.dtype:\r\n raise ValueError(f\"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B\")\r\n\r\n mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b\r\n if mode != 'sdd':\r\n # One input is sparse\r\n dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')\r\n dense_inner = dense.shape[self.dense_inner_dim]\r\n if dense_inner != self.dense_inner_size:\r\n raise ValueError(f\"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim \"\r\n f\"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.\")\r\n\r\n if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:\r\n raise ValueError(f\"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument \"\r\n f\"{sparse_name}, got {sparse.shape}\")\r\n\r\n def add_extra_dims(x):\r\n # Add extra leading singleton dimensions if needed\r\n dims_needed = 4 - x.ndim\r\n if dims_needed > 0:\r\n singletons = [1] * dims_needed\r\n x = x.view(*singletons, *x.shape)\r\n elif dims_needed < 0:\r\n raise ValueError(\"Tensors with more than 4 dimensions are not currently supported\")\r\n\r\n return x\r\n\r\n # Pad shapes with leading singleton dimensions\r\n a = add_extra_dims(a)\r\n b = add_extra_dims(b)\r\n\r\n return a, b\r\n\r\ndef _dim_to_name(x):\r\n # assert x in (-1, -2)\r\n return \"last\" if x == -1 else \"second to last\""
] | [
[
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.empty_like",
"torch.from_numpy",
"torch.sum",
"torch.is_autocast_enabled",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.empty",
"torch.cumsum"
]
] |
shiquanyang/MINERVA | [
"6cabd380a9e7114c26c10ef3fd74050ec036d547"
] | [
"code/model/trainer.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom tqdm import tqdm\nimport json\nimport time\nimport os\nimport logging\nimport numpy as np\n# import tensorflow as tf\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom code.model.agent import Agent\nfrom code.options import read_options\nfrom code.model.environment import env\nimport codecs\nfrom collections import defaultdict\nimport gc\nimport resource\nimport sys\nfrom code.model.baseline import ReactiveBaseline\nfrom code.model.nell_eval import nell_eval\nfrom scipy.special import logsumexp as lse\n\nlogger = logging.getLogger()\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\nclass Trainer(object):\n def __init__(self, params):\n\n # transfer parameters to self\n for key, val in params.items(): setattr(self, key, val);\n\n self.agent = Agent(params)\n self.save_path = None\n self.train_environment = env(params, 'train')\n self.dev_test_environment = env(params, 'dev')\n self.test_test_environment = env(params, 'test')\n self.test_environment = self.dev_test_environment\n self.rev_relation_vocab = self.train_environment.grapher.rev_relation_vocab\n self.rev_entity_vocab = self.train_environment.grapher.rev_entity_vocab\n self.max_hits_at_10 = 0\n self.ePAD = self.entity_vocab['PAD']\n self.rPAD = self.relation_vocab['PAD']\n # optimize\n self.baseline = ReactiveBaseline(l=self.Lambda)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n\n\n def calc_reinforce_loss(self):\n loss = tf.stack(self.per_example_loss, axis=1) # [B, T]\n\n self.tf_baseline = self.baseline.get_baseline_value()\n # self.pp = tf.Print(self.tf_baseline)\n # multiply with rewards\n final_reward = self.cum_discounted_reward - self.tf_baseline\n # reward_std = tf.sqrt(tf.reduce_mean(tf.square(final_reward))) + 1e-5 # constant addded for numerical stability\n reward_mean, reward_var = tf.nn.moments(final_reward, axes=[0, 1])\n # Constant added for numerical stability\n reward_std = tf.sqrt(reward_var) + 1e-6\n final_reward = tf.div(final_reward - reward_mean, reward_std)\n\n loss = tf.multiply(loss, final_reward) # [B, T]\n self.loss_before_reg = loss\n\n total_loss = tf.reduce_mean(loss) - self.decaying_beta * self.entropy_reg_loss(self.per_example_logits) # scalar\n\n return total_loss\n\n def entropy_reg_loss(self, all_logits):\n all_logits = tf.stack(all_logits, axis=2) # [B, MAX_NUM_ACTIONS, T]\n entropy_policy = - tf.reduce_mean(tf.reduce_sum(tf.multiply(tf.exp(all_logits), all_logits), axis=1)) # scalar\n return entropy_policy\n\n def initialize(self, restore=None, sess=None):\n\n logger.info(\"Creating TF graph...\")\n self.candidate_relation_sequence = []\n self.candidate_entity_sequence = []\n self.input_path = []\n self.first_state_of_test = tf.placeholder(tf.bool, name=\"is_first_state_of_test\")\n self.query_relation = tf.placeholder(tf.int32, [None], name=\"query_relation\")\n self.range_arr = tf.placeholder(tf.int32, shape=[None, ])\n self.global_step = tf.Variable(0, trainable=False)\n self.decaying_beta = tf.train.exponential_decay(self.beta, self.global_step,\n 200, 0.90, staircase=False)\n self.entity_sequence = []\n\n # to feed in the discounted reward tensor\n self.cum_discounted_reward = tf.placeholder(tf.float32, [None, self.path_length],\n name=\"cumulative_discounted_reward\")\n\n\n\n for t in range(self.path_length):\n next_possible_relations = tf.placeholder(tf.int32, [None, self.max_num_actions],\n name=\"next_relations_{}\".format(t))\n next_possible_entities = tf.placeholder(tf.int32, [None, self.max_num_actions],\n name=\"next_entities_{}\".format(t))\n input_label_relation = tf.placeholder(tf.int32, [None], name=\"input_label_relation_{}\".format(t))\n start_entities = tf.placeholder(tf.int32, [None, ])\n self.input_path.append(input_label_relation)\n self.candidate_relation_sequence.append(next_possible_relations)\n self.candidate_entity_sequence.append(next_possible_entities)\n self.entity_sequence.append(start_entities)\n self.loss_before_reg = tf.constant(0.0)\n self.per_example_loss, self.per_example_logits, self.action_idx = self.agent(\n self.candidate_relation_sequence,\n self.candidate_entity_sequence, self.entity_sequence,\n self.input_path,\n self.query_relation, self.range_arr, self.first_state_of_test, self.path_length)\n\n\n self.loss_op = self.calc_reinforce_loss()\n\n # backprop\n self.train_op = self.bp(self.loss_op)\n\n # Building the test graph\n self.prev_state = tf.placeholder(tf.float32, self.agent.get_mem_shape(), name=\"memory_of_agent\")\n self.prev_relation = tf.placeholder(tf.int32, [None, ], name=\"previous_relation\")\n self.query_embedding = tf.nn.embedding_lookup(self.agent.relation_lookup_table, self.query_relation) # [B, 2D]\n layer_state = tf.unstack(self.prev_state, self.LSTM_layers)\n formated_state = [tf.unstack(s, 2) for s in layer_state]\n self.next_relations = tf.placeholder(tf.int32, shape=[None, self.max_num_actions])\n self.next_entities = tf.placeholder(tf.int32, shape=[None, self.max_num_actions])\n\n self.current_entities = tf.placeholder(tf.int32, shape=[None,])\n\n\n\n with tf.variable_scope(\"policy_steps_unroll\") as scope:\n scope.reuse_variables()\n self.test_loss, test_state, self.test_logits, self.test_action_idx, self.chosen_relation = self.agent.step(\n self.next_relations, self.next_entities, formated_state, self.prev_relation, self.query_embedding,\n self.current_entities, self.input_path[0], self.range_arr, self.first_state_of_test)\n self.test_state = tf.stack(test_state)\n\n logger.info('TF Graph creation done..')\n self.model_saver = tf.train.Saver(max_to_keep=2)\n\n # return the variable initializer Op.\n if not restore:\n return tf.global_variables_initializer()\n else:\n return self.model_saver.restore(sess, restore)\n\n\n\n def initialize_pretrained_embeddings(self, sess):\n if self.pretrained_embeddings_action != '':\n embeddings = np.loadtxt(open(self.pretrained_embeddings_action))\n _ = sess.run((self.agent.relation_embedding_init),\n feed_dict={self.agent.action_embedding_placeholder: embeddings})\n if self.pretrained_embeddings_entity != '':\n embeddings = np.loadtxt(open(self.pretrained_embeddings_entity))\n _ = sess.run((self.agent.entity_embedding_init),\n feed_dict={self.agent.entity_embedding_placeholder: embeddings})\n\n def bp(self, cost):\n self.baseline.update(tf.reduce_mean(self.cum_discounted_reward))\n tvars = tf.trainable_variables()\n grads = tf.gradients(cost, tvars)\n grads, _ = tf.clip_by_global_norm(grads, self.grad_clip_norm)\n train_op = self.optimizer.apply_gradients(zip(grads, tvars))\n with tf.control_dependencies([train_op]): # see https://github.com/tensorflow/tensorflow/issues/1899\n self.dummy = tf.constant(0)\n return train_op\n\n\n def calc_cum_discounted_reward(self, rewards):\n \"\"\"\n calculates the cumulative discounted reward.\n :param rewards:\n :param T:\n :param gamma:\n :return:\n \"\"\"\n running_add = np.zeros([rewards.shape[0]]) # [B]\n cum_disc_reward = np.zeros([rewards.shape[0], self.path_length]) # [B, T]\n cum_disc_reward[:,\n self.path_length - 1] = rewards # set the last time step to the reward received at the last state\n for t in reversed(range(self.path_length)):\n running_add = self.gamma * running_add + cum_disc_reward[:, t]\n cum_disc_reward[:, t] = running_add\n return cum_disc_reward\n\n def gpu_io_setup(self):\n # create fetches for partial_run_setup\n fetches = self.per_example_loss + self.action_idx + [self.loss_op] + self.per_example_logits + [self.dummy]\n feeds = [self.first_state_of_test] + self.candidate_relation_sequence+ self.candidate_entity_sequence + self.input_path + \\\n [self.query_relation] + [self.cum_discounted_reward] + [self.range_arr] + self.entity_sequence\n\n\n feed_dict = [{} for _ in range(self.path_length)]\n\n feed_dict[0][self.first_state_of_test] = False\n feed_dict[0][self.query_relation] = None\n feed_dict[0][self.range_arr] = np.arange(self.batch_size*self.num_rollouts)\n for i in range(self.path_length):\n feed_dict[i][self.input_path[i]] = np.zeros(self.batch_size * self.num_rollouts) # placebo\n feed_dict[i][self.candidate_relation_sequence[i]] = None\n feed_dict[i][self.candidate_entity_sequence[i]] = None\n feed_dict[i][self.entity_sequence[i]] = None\n\n return fetches, feeds, feed_dict\n\n def train(self, sess):\n # import pdb\n # pdb.set_trace()\n fetches, feeds, feed_dict = self.gpu_io_setup()\n\n train_loss = 0.0\n start_time = time.time()\n self.batch_counter = 0\n for episode in self.train_environment.get_episodes():\n\n self.batch_counter += 1\n h = sess.partial_run_setup(fetches=fetches, feeds=feeds)\n feed_dict[0][self.query_relation] = episode.get_query_relation()\n\n # get initial state\n state = episode.get_state()\n # for each time step\n loss_before_regularization = []\n logits = []\n for i in range(self.path_length):\n feed_dict[i][self.candidate_relation_sequence[i]] = state['next_relations']\n feed_dict[i][self.candidate_entity_sequence[i]] = state['next_entities']\n feed_dict[i][self.entity_sequence[i]] = state['current_entities']\n per_example_loss, per_example_logits, idx = sess.partial_run(h, [self.per_example_loss[i], self.per_example_logits[i], self.action_idx[i]],\n feed_dict=feed_dict[i])\n loss_before_regularization.append(per_example_loss)\n logits.append(per_example_logits)\n # action = np.squeeze(action, axis=1) # [B,]\n state = episode(idx)\n loss_before_regularization = np.stack(loss_before_regularization, axis=1)\n\n # get the final reward from the environment\n rewards = episode.get_reward()\n\n # computed cumulative discounted reward\n cum_discounted_reward = self.calc_cum_discounted_reward(rewards) # [B, T]\n\n\n # backprop\n batch_total_loss, _ = sess.partial_run(h, [self.loss_op, self.dummy],\n feed_dict={self.cum_discounted_reward: cum_discounted_reward})\n\n # print statistics\n train_loss = 0.98 * train_loss + 0.02 * batch_total_loss\n avg_reward = np.mean(rewards)\n # now reshape the reward to [orig_batch_size, num_rollouts], I want to calculate for how many of the\n # entity pair, atleast one of the path get to the right answer\n reward_reshape = np.reshape(rewards, (self.batch_size, self.num_rollouts)) # [orig_batch, num_rollouts]\n reward_reshape = np.sum(reward_reshape, axis=1) # [orig_batch]\n reward_reshape = (reward_reshape > 0)\n num_ep_correct = np.sum(reward_reshape)\n if np.isnan(train_loss):\n raise ArithmeticError(\"Error in computing loss\")\n\n logger.info(\"batch_counter: {0:4d}, num_hits: {1:7.4f}, avg. reward per batch {2:7.4f}, \"\n \"num_ep_correct {3:4d}, avg_ep_correct {4:7.4f}, train loss {5:7.4f}\".\n format(self.batch_counter, np.sum(rewards), avg_reward, num_ep_correct,\n (num_ep_correct / self.batch_size),\n train_loss))\n\n if self.batch_counter%self.eval_every == 0:\n with open(self.output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Score for iteration \" + str(self.batch_counter) + \"\\n\")\n os.mkdir(self.path_logger_file + \"/\" + str(self.batch_counter))\n self.path_logger_file_ = self.path_logger_file + \"/\" + str(self.batch_counter) + \"/paths\"\n\n\n\n self.test(sess, beam=True, print_paths=False)\n\n logger.info('Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n\n gc.collect()\n if self.batch_counter >= self.total_iterations:\n break\n\n def test(self, sess, beam=False, print_paths=False, save_model = True, auc = False):\n batch_counter = 0\n paths = defaultdict(list)\n answers = []\n feed_dict = {}\n all_final_reward_1 = 0\n all_final_reward_3 = 0\n all_final_reward_5 = 0\n all_final_reward_10 = 0\n all_final_reward_20 = 0\n auc = 0\n\n total_examples = self.test_environment.total_no_examples\n for episode in tqdm(self.test_environment.get_episodes()):\n batch_counter += 1\n\n temp_batch_size = episode.no_examples\n\n self.qr = episode.get_query_relation()\n feed_dict[self.query_relation] = self.qr\n # set initial beam probs\n beam_probs = np.zeros((temp_batch_size * self.test_rollouts, 1))\n # get initial state\n state = episode.get_state()\n mem = self.agent.get_mem_shape()\n agent_mem = np.zeros((mem[0], mem[1], temp_batch_size*self.test_rollouts, mem[3]) ).astype('float32')\n previous_relation = np.ones((temp_batch_size * self.test_rollouts, ), dtype='int64') * self.relation_vocab[\n 'DUMMY_START_RELATION']\n feed_dict[self.range_arr] = np.arange(temp_batch_size * self.test_rollouts)\n feed_dict[self.input_path[0]] = np.zeros(temp_batch_size * self.test_rollouts)\n\n ####logger code####\n if print_paths:\n self.entity_trajectory = []\n self.relation_trajectory = []\n ####################\n\n self.log_probs = np.zeros((temp_batch_size*self.test_rollouts,)) * 1.0\n\n # for each time step\n for i in range(self.path_length):\n if i == 0:\n feed_dict[self.first_state_of_test] = True\n feed_dict[self.next_relations] = state['next_relations']\n feed_dict[self.next_entities] = state['next_entities']\n feed_dict[self.current_entities] = state['current_entities']\n feed_dict[self.prev_state] = agent_mem\n feed_dict[self.prev_relation] = previous_relation\n\n loss, agent_mem, test_scores, test_action_idx, chosen_relation = sess.run(\n [ self.test_loss, self.test_state, self.test_logits, self.test_action_idx, self.chosen_relation],\n feed_dict=feed_dict)\n\n\n if beam:\n k = self.test_rollouts\n new_scores = test_scores + beam_probs\n if i == 0:\n idx = np.argsort(new_scores)\n idx = idx[:, -k:]\n ranged_idx = np.tile([b for b in range(k)], temp_batch_size)\n idx = idx[np.arange(k*temp_batch_size), ranged_idx]\n else:\n idx = self.top_k(new_scores, k)\n\n y = idx//self.max_num_actions\n x = idx%self.max_num_actions\n\n y += np.repeat([b*k for b in range(temp_batch_size)], k)\n state['current_entities'] = state['current_entities'][y]\n state['next_relations'] = state['next_relations'][y,:]\n state['next_entities'] = state['next_entities'][y, :]\n agent_mem = agent_mem[:, :, y, :]\n test_action_idx = x\n chosen_relation = state['next_relations'][np.arange(temp_batch_size*k), x]\n beam_probs = new_scores[y, x]\n beam_probs = beam_probs.reshape((-1, 1))\n if print_paths:\n for j in range(i):\n self.entity_trajectory[j] = self.entity_trajectory[j][y]\n self.relation_trajectory[j] = self.relation_trajectory[j][y]\n previous_relation = chosen_relation\n\n ####logger code####\n if print_paths:\n self.entity_trajectory.append(state['current_entities'])\n self.relation_trajectory.append(chosen_relation)\n ####################\n state = episode(test_action_idx)\n self.log_probs += test_scores[np.arange(self.log_probs.shape[0]), test_action_idx]\n if beam:\n self.log_probs = beam_probs\n\n ####Logger code####\n\n if print_paths:\n self.entity_trajectory.append(\n state['current_entities'])\n\n\n # ask environment for final reward\n rewards = episode.get_reward() # [B*test_rollouts]\n reward_reshape = np.reshape(rewards, (temp_batch_size, self.test_rollouts)) # [orig_batch, test_rollouts]\n self.log_probs = np.reshape(self.log_probs, (temp_batch_size, self.test_rollouts))\n sorted_indx = np.argsort(-self.log_probs)\n final_reward_1 = 0\n final_reward_3 = 0\n final_reward_5 = 0\n final_reward_10 = 0\n final_reward_20 = 0\n AP = 0\n ce = episode.state['current_entities'].reshape((temp_batch_size, self.test_rollouts))\n se = episode.start_entities.reshape((temp_batch_size, self.test_rollouts))\n for b in range(temp_batch_size):\n answer_pos = None\n seen = set()\n pos=0\n if self.pool == 'max':\n for r in sorted_indx[b]:\n if reward_reshape[b,r] == self.positive_reward:\n answer_pos = pos\n break\n if ce[b, r] not in seen:\n seen.add(ce[b, r])\n pos += 1\n if self.pool == 'sum':\n scores = defaultdict(list)\n answer = ''\n for r in sorted_indx[b]:\n scores[ce[b,r]].append(self.log_probs[b,r])\n if reward_reshape[b,r] == self.positive_reward:\n answer = ce[b,r]\n final_scores = defaultdict(float)\n for e in scores:\n final_scores[e] = lse(scores[e])\n sorted_answers = sorted(final_scores, key=final_scores.get, reverse=True)\n if answer in sorted_answers:\n answer_pos = sorted_answers.index(answer)\n else:\n answer_pos = None\n\n\n if answer_pos != None:\n if answer_pos < 20:\n final_reward_20 += 1\n if answer_pos < 10:\n final_reward_10 += 1\n if answer_pos < 5:\n final_reward_5 += 1\n if answer_pos < 3:\n final_reward_3 += 1\n if answer_pos < 1:\n final_reward_1 += 1\n if answer_pos == None:\n AP += 0\n else:\n AP += 1.0/((answer_pos+1))\n if print_paths:\n qr = self.train_environment.grapher.rev_relation_vocab[self.qr[b * self.test_rollouts]]\n start_e = self.rev_entity_vocab[episode.start_entities[b * self.test_rollouts]]\n end_e = self.rev_entity_vocab[episode.end_entities[b * self.test_rollouts]]\n paths[str(qr)].append(str(start_e) + \"\\t\" + str(end_e) + \"\\n\")\n paths[str(qr)].append(\"Reward:\" + str(1 if answer_pos != None and answer_pos < 10 else 0) + \"\\n\")\n for r in sorted_indx[b]:\n indx = b * self.test_rollouts + r\n if rewards[indx] == self.positive_reward:\n rev = 1\n else:\n rev = -1\n answers.append(self.rev_entity_vocab[se[b,r]]+'\\t'+ self.rev_entity_vocab[ce[b,r]]+'\\t'+ str(self.log_probs[b,r])+'\\n')\n paths[str(qr)].append(\n '\\t'.join([str(self.rev_entity_vocab[e[indx]]) for e in\n self.entity_trajectory]) + '\\n' + '\\t'.join(\n [str(self.rev_relation_vocab[re[indx]]) for re in self.relation_trajectory]) + '\\n' + str(\n rev) + '\\n' + str(\n self.log_probs[b, r]) + '\\n___' + '\\n')\n paths[str(qr)].append(\"#####################\\n\")\n\n all_final_reward_1 += final_reward_1\n all_final_reward_3 += final_reward_3\n all_final_reward_5 += final_reward_5\n all_final_reward_10 += final_reward_10\n all_final_reward_20 += final_reward_20\n auc += AP\n\n all_final_reward_1 /= total_examples\n all_final_reward_3 /= total_examples\n all_final_reward_5 /= total_examples\n all_final_reward_10 /= total_examples\n all_final_reward_20 /= total_examples\n auc /= total_examples\n if save_model:\n if all_final_reward_10 >= self.max_hits_at_10:\n self.max_hits_at_10 = all_final_reward_10\n self.save_path = self.model_saver.save(sess, self.model_dir + \"model\" + '.ckpt')\n\n if print_paths:\n logger.info(\"[ printing paths at {} ]\".format(self.output_dir+'/test_beam/'))\n for q in paths:\n j = q.replace('/', '-')\n with codecs.open(self.path_logger_file_ + '_' + j, 'a', 'utf-8') as pos_file:\n for p in paths[q]:\n pos_file.write(p)\n with open(self.path_logger_file_ + 'answers', 'w') as answer_file:\n for a in answers:\n answer_file.write(a)\n\n with open(self.output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Hits@1: {0:7.4f}\".format(all_final_reward_1))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@3: {0:7.4f}\".format(all_final_reward_3))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@5: {0:7.4f}\".format(all_final_reward_5))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@10: {0:7.4f}\".format(all_final_reward_10))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@20: {0:7.4f}\".format(all_final_reward_20))\n score_file.write(\"\\n\")\n score_file.write(\"auc: {0:7.4f}\".format(auc))\n score_file.write(\"\\n\")\n score_file.write(\"\\n\")\n\n logger.info(\"Hits@1: {0:7.4f}\".format(all_final_reward_1))\n logger.info(\"Hits@3: {0:7.4f}\".format(all_final_reward_3))\n logger.info(\"Hits@5: {0:7.4f}\".format(all_final_reward_5))\n logger.info(\"Hits@10: {0:7.4f}\".format(all_final_reward_10))\n logger.info(\"Hits@20: {0:7.4f}\".format(all_final_reward_20))\n logger.info(\"auc: {0:7.4f}\".format(auc))\n\n def top_k(self, scores, k):\n scores = scores.reshape(-1, k * self.max_num_actions) # [B, (k*max_num_actions)]\n idx = np.argsort(scores, axis=1)\n idx = idx[:, -k:] # take the last k highest indices # [B , k]\n return idx.reshape((-1))\n\nif __name__ == '__main__':\n\n # read command line options\n options = read_options()\n # Set logging\n logger.setLevel(logging.INFO)\n fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',\n '%m/%d/%Y %I:%M:%S %p')\n console = logging.StreamHandler()\n console.setFormatter(fmt)\n logger.addHandler(console)\n logfile = logging.FileHandler(options['log_file_name'], 'w')\n logfile.setFormatter(fmt)\n logger.addHandler(logfile)\n # read the vocab files, it will be used by many classes hence global scope\n logger.info('reading vocab files...')\n options['relation_vocab'] = json.load(open(options['vocab_dir'] + '/relation_vocab.json'))\n options['entity_vocab'] = json.load(open(options['vocab_dir'] + '/entity_vocab.json'))\n logger.info('Reading mid to name map')\n mid_to_word = {}\n # with open('/iesl/canvas/rajarshi/data/RL-Path-RNN/FB15k-237/fb15k_names', 'r') as f:\n # mid_to_word = json.load(f)\n logger.info('Done..')\n logger.info('Total number of entities {}'.format(len(options['entity_vocab'])))\n logger.info('Total number of relations {}'.format(len(options['relation_vocab'])))\n save_path = ''\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = False\n config.log_device_placement = False\n\n\n #Training\n if not options['load_model']:\n trainer = Trainer(options)\n with tf.Session(config=config) as sess:\n sess.run(trainer.initialize())\n trainer.initialize_pretrained_embeddings(sess=sess)\n\n trainer.train(sess)\n save_path = trainer.save_path\n path_logger_file = trainer.path_logger_file\n output_dir = trainer.output_dir\n\n tf.reset_default_graph()\n #Testing on test with best model\n else:\n logger.info(\"Skipping training\")\n logger.info(\"Loading model from {}\".format(options[\"model_load_dir\"]))\n\n trainer = Trainer(options)\n if options['load_model']:\n save_path = options['model_load_dir']\n path_logger_file = trainer.path_logger_file\n output_dir = trainer.output_dir\n with tf.Session(config=config) as sess:\n trainer.initialize(restore=save_path, sess=sess)\n\n trainer.test_rollouts = 100\n\n os.mkdir(path_logger_file + \"/\" + \"test_beam\")\n trainer.path_logger_file_ = path_logger_file + \"/\" + \"test_beam\" + \"/paths\"\n with open(output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Test (beam) scores with best model from \" + save_path + \"\\n\")\n trainer.test_environment = trainer.test_test_environment\n trainer.test_environment.test_rollouts = 100\n\n trainer.test(sess, beam=True, print_paths=True, save_model=False)\n\n\n print(options['nell_evaluation'])\n if options['nell_evaluation'] == 1:\n nell_eval(path_logger_file + \"/\" + \"test_beam/\" + \"pathsanswers\", trainer.data_input_dir+'/sort_test.pairs' )\n\n"
] | [
[
"tensorflow.compat.v1.nn.moments",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.clip_by_global_norm",
"numpy.mean",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.global_variables_initializer",
"scipy.special.logsumexp",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.sqrt",
"numpy.arange",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.reduce_mean",
"numpy.reshape",
"numpy.zeros",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.control_dependencies",
"numpy.stack",
"numpy.argsort",
"tensorflow.compat.v1.div",
"numpy.isnan",
"numpy.sum",
"tensorflow.compat.v1.nn.embedding_lookup",
"numpy.ones",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.compat.v1.Variable"
]
] |
ZJU-Fangyin/KCL | [
"004f5681b77e4e75c791c909696fdb8a208501a2"
] | [
"code/initial/dataloader.py"
] | [
"#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import Dataset\n\n\nclass TrainDataset(Dataset):\n def __init__(self, triples, nentity, nrelation, negative_sample_size, mode):\n self.len = len(triples)\n self.triples = triples\n self.triple_set = set(triples)\n self.nentity = nentity\n self.nrelation = nrelation\n self.negative_sample_size = negative_sample_size\n self.mode = mode\n self.count = self.count_frequency(triples)\n self.true_head, self.true_tail = self.get_true_head_and_tail(self.triples)\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n positive_sample = self.triples[idx]\n\n head, relation, tail = positive_sample\n\n subsampling_weight = self.count[(head, relation)] + self.count[(tail, -relation - 1)]\n subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))\n\n negative_sample_list = []\n negative_sample_size = 0\n\n while negative_sample_size < self.negative_sample_size:\n negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size * 2)\n if self.mode == 'head-batch':\n mask = np.in1d(\n negative_sample,\n self.true_head[(relation, tail)],\n assume_unique=True,\n invert=True\n )\n elif self.mode == 'tail-batch':\n mask = np.in1d(\n negative_sample,\n self.true_tail[(head, relation)],\n assume_unique=True,\n invert=True\n )\n else:\n raise ValueError('Training batch mode %s not supported' % self.mode)\n negative_sample = negative_sample[mask]\n negative_sample_list.append(negative_sample)\n negative_sample_size += negative_sample.size\n\n negative_sample = np.concatenate(negative_sample_list)[:self.negative_sample_size]\n\n negative_sample = torch.LongTensor(negative_sample)\n\n positive_sample = torch.LongTensor(positive_sample)\n\n return positive_sample, negative_sample, subsampling_weight, self.mode\n\n @staticmethod\n def collate_fn(data):\n positive_sample = torch.stack([_[0] for _ in data], dim=0)\n negative_sample = torch.stack([_[1] for _ in data], dim=0)\n subsample_weight = torch.cat([_[2] for _ in data], dim=0)\n mode = data[0][3]\n return positive_sample, negative_sample, subsample_weight, mode\n\n @staticmethod\n def count_frequency(triples, start=4):\n '''\n Get frequency of a partial triple like (head, relation) or (relation, tail)\n The frequency will be used for subsampling like word2vec\n '''\n count = {}\n for head, relation, tail in triples:\n if (head, relation) not in count:\n count[(head, relation)] = start\n else:\n count[(head, relation)] += 1\n\n if (tail, -relation - 1) not in count:\n count[(tail, -relation - 1)] = start\n else:\n count[(tail, -relation - 1)] += 1\n return count\n\n @staticmethod\n def get_true_head_and_tail(triples):\n '''\n Build a dictionary of true triples that will\n be used to filter these true triples for negative sampling\n '''\n\n true_head = {}\n true_tail = {}\n\n for head, relation, tail in triples:\n if (head, relation) not in true_tail:\n true_tail[(head, relation)] = []\n true_tail[(head, relation)].append(tail)\n if (relation, tail) not in true_head:\n true_head[(relation, tail)] = []\n true_head[(relation, tail)].append(head)\n\n for relation, tail in true_head:\n true_head[(relation, tail)] = np.array(list(set(true_head[(relation, tail)])))\n for head, relation in true_tail:\n true_tail[(head, relation)] = np.array(list(set(true_tail[(head, relation)])))\n\n return true_head, true_tail\n\n\nclass TestDataset(Dataset):\n def __init__(self, triples, all_true_triples, nentity, nrelation, mode):\n self.len = len(triples)\n self.triple_set = set(all_true_triples)\n self.triples = triples\n self.nentity = nentity\n self.nrelation = nrelation\n self.mode = mode\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n head, relation, tail = self.triples[idx]\n\n if self.mode == 'head-batch':\n tmp = [(0, rand_head) if (rand_head, relation, tail) not in self.triple_set\n else (-1, head) for rand_head in range(self.nentity)]\n tmp[head] = (0, head)\n elif self.mode == 'tail-batch':\n tmp = [(0, rand_tail) if (head, relation, rand_tail) not in self.triple_set\n else (-1, tail) for rand_tail in range(self.nentity)]\n tmp[tail] = (0, tail)\n else:\n raise ValueError('negative batch mode %s not supported' % self.mode)\n\n tmp = torch.LongTensor(tmp)\n filter_bias = tmp[:, 0].float()\n negative_sample = tmp[:, 1]\n\n positive_sample = torch.LongTensor((head, relation, tail))\n\n return positive_sample, negative_sample, filter_bias, self.mode\n\n @staticmethod\n def collate_fn(data):\n positive_sample = torch.stack([_[0] for _ in data], dim=0)\n negative_sample = torch.stack([_[1] for _ in data], dim=0)\n filter_bias = torch.stack([_[2] for _ in data], dim=0)\n mode = data[0][3]\n return positive_sample, negative_sample, filter_bias, mode\n\n\nclass BidirectionalOneShotIterator(object):\n def __init__(self, dataloader_head, dataloader_tail):\n self.iterator_head = self.one_shot_iterator(dataloader_head)\n self.iterator_tail = self.one_shot_iterator(dataloader_tail)\n self.step = 0\n\n def __next__(self):\n self.step += 1\n if self.step % 2 == 0:\n data = next(self.iterator_head)\n else:\n data = next(self.iterator_tail)\n return data\n\n @staticmethod\n def one_shot_iterator(dataloader):\n '''\n Transform a PyTorch Dataloader into python iterator\n '''\n while True:\n for data in dataloader:\n yield data"
] | [
[
"numpy.concatenate",
"torch.cat",
"torch.stack",
"numpy.random.randint",
"torch.LongTensor",
"numpy.in1d",
"torch.Tensor"
]
] |
HsinYiHung/HARK_HY | [
"086c46af5bd037fe1ced6906c6ea917ed58b134f"
] | [
"CGMPortfolio/Code/Python/Simulations/AgeMeans.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 10 15:10:36 2019\n\n@author: mateo\n\"\"\"\n\nimport HARK.ConsumptionSaving.ConsPortfolioModel as cpm\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# %% Set up figure path\nimport sys,os\n\n# Determine if this is being run as a standalone script\nif __name__ == '__main__':\n # Running as a script\n my_file_path = os.path.abspath(\"../\")\nelse:\n # Running from do_ALL\n my_file_path = os.path.dirname(os.path.abspath(\"do_ALL.py\"))\n my_file_path = os.path.join(my_file_path,\"Code/Python/\")\n\nFigPath = os.path.join(my_file_path,\"Figures/\")\n\n# %% Calibration and solution\nsys.path.append(my_file_path)\n# Loading the parameters from the ../Code/Calibration/params.py script\nfrom Calibration.params import dict_portfolio, time_params\n\nagent = cpm.PortfolioConsumerType(**dict_portfolio)\nagent.solve()\n\n# %% Run simulation and store results in a data frame\n\n# Number of agents and periods in the simulation.\nagent.AgentCount = 50 # Number of instances of the class to be simulated.\n# Since agents can die, they are replaced by a new agent whenever they do.\n\n# Number of periods to be simulated\nagent.T_sim = agent.T_cycle*50\n\n# Set up the variables we want to keep track of.\nagent.track_vars = ['aNrmNow','cNrmNow', 'pLvlNow',\n 't_age', 'ShareNow','mNrmNow']\n\n\n# Run the simulations\nagent.initializeSim()\nagent.simulate()\n\nraw_data = {'Age': agent.history['t_age'].flatten()+time_params['Age_born'] - 1,\n 'pIncome': agent.history['pLvlNow'].flatten(),\n 'rShare': agent.history['ShareNow'].flatten(),\n 'nrmM': agent.history['mNrmNow'].flatten(),\n 'nrmC': agent.history['cNrmNow'].flatten()}\n\nData = pd.DataFrame(raw_data)\nData['Cons'] = Data.nrmC * Data.pIncome\nData['M'] = Data.nrmM * Data.pIncome\n\n# Find the mean of each variable at every age\nAgeMeans = Data.groupby(['Age']).mean().reset_index()\n\n# %% Wealth income and consumption\n\nplt.figure()\nplt.plot(AgeMeans.Age, AgeMeans.pIncome,\n label = 'Income')\nplt.plot(AgeMeans.Age, AgeMeans.M,\n label = 'Market resources')\nplt.plot(AgeMeans.Age, AgeMeans.Cons,\n label = 'Consumption')\nplt.legend()\nplt.xlabel('Age')\nplt.title('Variable Means Conditional on Survival')\nplt.grid()\n\n# Save figure\nfigname = 'YMC_Means'\nplt.savefig(os.path.join(FigPath, figname + '.png'))\nplt.savefig(os.path.join(FigPath, figname + '.jpg'))\nplt.savefig(os.path.join(FigPath, figname + '.pdf'))\nplt.savefig(os.path.join(FigPath, figname + '.svg'))\n\nplt.ioff()\nplt.draw()\nplt.pause(1)\n\n# %% Risky Share\n\n# Find age percentiles\nAgePC5 = Data.groupby(['Age']).quantile(0.05).reset_index()\nAgePC95 = Data.groupby(['Age']).quantile(0.95).reset_index()\n\n# plot till death - 1 \nage_1 = time_params['Age_death'] - time_params['Age_born']\n\nplt.figure()\nplt.ylim([0, 1.1])\nplt.plot(AgeMeans.Age[:age_1], AgeMeans.rShare[:age_1], label = 'Mean')\nplt.plot(AgePC5.Age[:age_1], AgePC5.rShare[:age_1], '--r', label='Perc. 5')\nplt.plot(AgePC95.Age[:age_1], AgePC95.rShare[:age_1], '--g', label = 'Perc. 95')\nplt.legend()\n\nplt.xlabel('Age')\nplt.ylabel('Risky Share')\nplt.title('Risky Portfolio Share Mean Conditional on Survival')\nplt.grid()\n\n# Save figure\nfigname = 'RShare_Means'\nplt.savefig(os.path.join(FigPath, figname + '.png'))\nplt.savefig(os.path.join(FigPath, figname + '.jpg'))\nplt.savefig(os.path.join(FigPath, figname + '.pdf'))\nplt.savefig(os.path.join(FigPath, figname + '.svg'))\n\nplt.ioff()\nplt.draw()\nplt.pause(1)\n\n\n# %% Risky Share with 100-age rule\n\n# Find age percentiles\nAgePC5 = Data.groupby(['Age']).quantile(0.05).reset_index()\nAgePC95 = Data.groupby(['Age']).quantile(0.95).reset_index()\n\nplt.figure()\nplt.ylim([0, 1.1])\nplt.plot(AgeMeans.Age[:age_1], AgeMeans.rShare[:age_1], label = 'Mean')\nplt.plot(AgePC5.Age[:age_1], AgePC5.rShare[:age_1], '--r', label='Perc. 5')\nplt.plot(AgePC95.Age[:age_1], AgePC95.rShare[:age_1], '--g', label = 'Perc. 95')\n# 100 age rule\nx = range(time_params['Age_born'], time_params['Age_death'])\ny = range(100 - time_params['Age_death'] + 1, 100 - time_params['Age_born'] + 1)[::-1]\ny = np.array(y)/100\nplt.plot(x, y, '--', color='orange', label = '100-age rule')\nplt.legend()\n\nplt.xlabel('Age')\nplt.ylabel('Risky Share')\nplt.title('Risky Portfolio Share Mean Conditional on Survival')\nplt.grid()\n\n# Save figure\nfigname = 'RShare_Means_100_age'\nplt.savefig(os.path.join(FigPath, figname + '.png'))\nplt.savefig(os.path.join(FigPath, figname + '.jpg'))\nplt.savefig(os.path.join(FigPath, figname + '.pdf'))\nplt.savefig(os.path.join(FigPath, figname + '.svg'))\n\nplt.ioff()\nplt.draw()\nplt.pause(1)\n\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ioff"
]
] |
stephen-derosa/opencv-velocity-prediction | [
"96c5738d6424334def0ce9c4ea06fac9dfbecc27"
] | [
"old/vel_calc_sample.py"
] | [
"import numpy as np\nimport cv2 as cv\nimport argparse\nimport os\nfrom time import sleep\n\ncount = 0\ntotal_x = 0\ntotal_y = 0\n\nfps = 60\nin_per_pixel = (11+8*12)/(805-455)\n\nmax_value = 255\n\nlow_H = 0\nlow_S = 0\nlow_V = 0\nhigh_H = 255\nhigh_S = 255\nhigh_V = 255\nframe_num = 0\n\nwindow_capture_name = 'Frame Capture'\nwindow_detection_name = 'Thresholding Bar'\n\nlow_H_name = 'Low H'\nlow_S_name = 'Low S'\nlow_V_name = 'Low V'\nhigh_H_name = 'High H'\nhigh_S_name = 'High S'\nhigh_V_name = 'High V'\n\ndef on_low_H_thresh_trackbar(val):\n global low_H\n global high_H\n low_H = val\n low_H = min(high_H - 1, low_H)\n cv.setTrackbarPos(low_H_name,window_detection_name,low_H)\n return(low_H)\n\ndef on_high_H_thresh_trackbar(val):\n global low_H\n global high_H\n high_H = val\n high_H = max(high_H, low_H+1)\n cv.setTrackbarPos(high_H_name, window_detection_name, high_H)\n return(high_H)\n\ndef on_low_S_thresh_trackbar(val):\n global low_S\n global high_S\n low_S = val\n low_S = min(high_S-1, low_S)\n cv.setTrackbarPos(low_S_name, window_detection_name, low_S)\n return(low_S)\n\ndef on_high_S_thresh_trackbar(val):\n global low_S\n global high_S\n high_S = val\n high_S = max(high_S, low_S+1)\n cv.setTrackbarPos(high_S_name, window_detection_name, high_S)\n return(high_S)\n\n\ndef on_low_V_thresh_trackbar(val):\n global low_V\n global high_V\n low_V = val\n low_V = min(high_V-1, low_V)\n cv.setTrackbarPos(low_V_name, window_detection_name, low_V)\n return(low_V)\n \ndef on_high_V_thresh_trackbar(val):\n global low_V\n global high_V\n high_V = val\n high_V = max(high_V, low_V+1)\n cv.setTrackbarPos(high_V_name, window_detection_name, high_V)\n return(high_V)\n\n#parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.')\n#parser.add_argument('--camera', help='Camera devide number.', default=0, type=int)\n#args = parser.parse_args()\n\n#cap = cv.VideoCapture(args.camera)\ncap = cv.VideoCapture(\"lin_vel_2/lin_vel_2.mp4\")\n\n#cv.namedWindow(window_capture_name)\ncv.namedWindow(window_detection_name)\n\ncv.createTrackbar(low_H_name, window_detection_name , low_H, max_value, on_low_H_thresh_trackbar)\ncv.createTrackbar(high_H_name, window_detection_name , high_H, max_value, on_high_H_thresh_trackbar)\ncv.createTrackbar(low_S_name, window_detection_name , low_S, max_value, on_low_S_thresh_trackbar)\ncv.createTrackbar(high_S_name, window_detection_name , high_S, max_value, on_high_S_thresh_trackbar)\ncv.createTrackbar(low_V_name, window_detection_name , low_V, max_value, on_low_V_thresh_trackbar)\ncv.createTrackbar(high_V_name, window_detection_name , high_V, max_value, on_high_V_thresh_trackbar)\n\nwhile True:\n low_H = 150\n low_S = 200\n low_V = 110\n # high_H = 236\n # high_S = 255\n # high_V = 255\n ret, frame = cap.read()\n cv.imshow(\"real\",frame)\n cv.line(frame,(455,0),(455,1080),(255,0,0),5)\n cv.line(frame,(805,0),(805,1080),(255,0,0),5)\n cv.line(frame,(1168,0),(1168,1080),(255,0,0),5)\n cv.imshow(\"real\",frame)\n if frame is None:\n break\n frame = frame[400:650, 150:1300]\n hsv= cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n mask = cv.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))\n res = cv.bitwise_and(frame, frame, mask=mask)\n locs = np.argwhere(res==255)\n #print(mask.tolist())\n \n #print(mask[0]) #(height,width)\n\n #res= cv.cvtColor(res, cv.COLOR_BGR2HSV)\n\n kernel = np.ones((5,5), np.uint8)\n #erosion = cv.erode(hsv,kernel,iterations=1)\n blur = cv.medianBlur(mask,25)\n dilation = cv.dilate(blur,kernel,iterations=1)\n\n #opening - goal false positives and false negatives from image\n opening = cv.morphologyEx(mask,cv.MORPH_OPEN,kernel)\n #closing = cv.morphologyEx(mask,cv.MORPH_CLOSE,kernel)\n \n res = cv.bitwise_and(frame, frame, mask=opening)\n\n #rect,gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)\n \n M = cv.moments(dilation)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # print(cX)\n # print(cY)\n cv.circle(res, (cX,cY), 10, (255, 255, 255), -1)\n cv.imshow(\"result\",res)\n\n #speed, calculated every .10 second\n frame_num = frame_num +1\n\n\n\n #smoothed = cv.filter2D(res, -1, kernel)\n #opening = cv.morphologyEx(smoothed,cv.MORPH_OPEN,kernel)\n # for x in erosion[0]:\n # for y in erosion[1]:\n # print(erosion[x,y])\n # if erosion[x,y]>255:\n # total_x = x + x\n # total_y = y + y\n # count = count + 1 \n # print(erosion[x,y])\n\n # for c in cnts:\n # M = cv.moments(c)\n # cX = int(M[\"m10\"] / M[\"m00\"])\n # cY = int(M[\"m01\"] / M[\"m00\"])\n # print(cX)\n # print(cY)\n #avg_x = total_x/count\n #avg_y = total_y/count\n\n #cv.circle(erosion, (avg_x,avg_y), 30, (255,255,255), thickness=4, lineType=8, shift=0)\n\n #cv.imshow(\"result\",res)\n\n #gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)\n\n #cv.imshow(\"gray\", gray)\n #circles = cv.HoughCircles(opening, cv.HOUGH_GRADIENT, 1, 50)\n #median = cv.medianBlur(res,5)\n\n # circles = np.uint16(np.around(circles))\n # if circles is not None:\n # for i in circles[0,:]:\n\t# #draw the circle in the output image, then draw a rectangle\n\t# #corresponding to the center of the circle\n\t# cv.circle(opening,(i[0],i[1]),i[2],(255,255,255),3)\n # # print(circles)\n # cv.imshow(\"result\",opening)\n # #cv.imshow(window_capture_name, hsv)\n #cv.imshow(window_detection_name, mask)\n #cv.imshow(\"frame\", frame)\n #cv.imshow(\"median\", median)\n #cv.imshow(\"smoothed\", smoothed)\n #cv.imshow(\"blurred\", blur)\n\n\n #cv.imshow(\"erosion\", erosion)\n #cv.imshow(\"dilation\", dilation)\n #cv.imshow(\"open\", opening)\n #cv.imshow(\"closing\", closing)\n\n #sleep(.5)\\\n\n key = cv.waitKey(5)\n if key == ord('s'):\n if count == 0:\n f= open(\"HSV_vals.txt\",\"w+\")\n f.write(\"[Low H, Low S, Low V] - [High H, High S, High V] \\r\\n\")\n f.write(\"[%d,%d,%d]-\" % (low_H, low_S, low_V))\n f.write(\"[%d,%d,%d]\\n\" % (high_H, high_S, high_V))\n f.close()\n print(\"File saved!\")\n count = count + 1\n else:\n f= open(\"HSV_vals.txt\",\"a\")\n f.write(\"[%d,%d,%d]-\" % (low_H, low_S, low_V))\n f.write(\"[%d,%d,%d]\\n\" % (high_H, high_S, high_V))\n f.close()\n count = count + 1\n print(\"File saved!\")\n elif key == ord('p'):\n print(low_H, low_S,low_V, \"\\n\")\n print(high_H, high_S, high_V,\"\\n\")\n elif key == ord('d'):\n os.remove(\"HSV_vals.txt\")\n elif key == ord('q') or key == 27:\n break\n "
] | [
[
"numpy.ones",
"numpy.argwhere"
]
] |
Srijay-lab/segment2tissue | [
"d3cd837f4381eba58df798800bdc5503bdf6db22"
] | [
"backup_files/segment2tissue_9d1g.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport json\nimport glob\nimport random\nimport collections\nimport math\nimport time\n# visualize image\nimport matplotlib.pyplot as plt\n\n# enable eager execution\n# tf.compat.v1.enable_eager_execution()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_dir\", help=\"path to folder containing images\")\nparser.add_argument(\"--mode\", required=True, choices=[\"train\", \"test\", \"export\"])\nparser.add_argument(\"--output_dir\", required=True, help=\"where to put output files\")\nparser.add_argument(\"--seed\", type=int)\nparser.add_argument(\"--checkpoint\", default=None,\n help=\"directory with checkpoint to resume training from or use for testing\")\n\nparser.add_argument(\"--max_steps\", type=int, help=\"number of training steps (0 to disable)\")\nparser.add_argument(\"--max_epochs\", type=int, help=\"number of training epochs\")\nparser.add_argument(\"--summary_freq\", type=int, default=10000, help=\"update summaries every summary_freq steps\")\nparser.add_argument(\"--progress_freq\", type=int, default=50, help=\"display progress every progress_freq steps\")\nparser.add_argument(\"--trace_freq\", type=int, default=0, help=\"trace execution every trace_freq steps\")\nparser.add_argument(\"--display_freq\", type=int, default=1000, help=\"write current training images every display_freq steps\")\nparser.add_argument(\"--save_freq\", type=int, default=1000, help=\"save model every save_freq steps, 0 to disable\")\nparser.add_argument(\"--separable_conv\", action=\"store_true\", help=\"use separable convolutions in the generator\")\nparser.add_argument(\"--aspect_ratio\", type=float, default=1.0, help=\"aspect ratio of output images (width/height)\")\nparser.add_argument(\"--lab_colorization\", action=\"store_true\", help=\"split input image into brightness (A) and color (B)\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"number of images in batch\")\nparser.add_argument(\"--which_direction\", type=str, default=\"BtoA\", choices=[\"AtoB\", \"BtoA\"])\nparser.add_argument(\"--ngf\", type=int, default=64, help=\"number of generator filters in first conv layer\")\nparser.add_argument(\"--ndf\", type=int, default=64, help=\"number of discriminator filters in first conv layer\")\nparser.add_argument(\"--scale_size\", type=int, default=728, help=\"scale images to this size before cropping to 256x256\")\nparser.add_argument(\"--flip\", dest=\"flip\", action=\"store_true\", help=\"flip images horizontally\")\nparser.add_argument(\"--no_flip\", dest=\"flip\", action=\"store_false\", help=\"don't flip images horizontally\")\nparser.set_defaults(flip=True)\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"initial learning rate for adam\")\nparser.add_argument(\"--beta1\", type=float, default=0.5, help=\"momentum term of adam\")\nparser.add_argument(\"--l1_weight\", type=float, default=100.0, help=\"weight on L1 term for generator gradient\")\nparser.add_argument(\"--gan_weight\", type=float, default=1.0, help=\"weight on GAN term for generator gradient\")\n\n# export options\nparser.add_argument(\"--output_filetype\", default=\"png\", choices=[\"png\", \"jpeg\"])\na = parser.parse_args()\n\nEPS = 1e-12\nCROP_SIZE = 256\n\nExamples = collections.namedtuple(\"Examples\", \"paths, inputs, targets, count, steps_per_epoch\")\nModel = collections.namedtuple(\"Model\",\n \"outputs, predict_real, predict_fake, discrim_loss, discrim_grads_and_vars, gen_loss_GAN, gen_loss_L1, gen_grads_and_vars, train\")\n\ndef preprocess(image):\n with tf.name_scope(\"preprocess\"):\n # [0, 1] => [-1, 1]\n return image * 2 - 1\n\n\ndef deprocess(image):\n with tf.name_scope(\"deprocess\"):\n # [-1, 1] => [0, 1]\n return (image + 1) / 2\n\n\ndef preprocess_lab(lab):\n with tf.name_scope(\"preprocess_lab\"):\n L_chan, a_chan, b_chan = tf.unstack(lab, axis=2)\n # L_chan: black and white with input range [0, 100]\n # a_chan/b_chan: color channels with input range ~[-110, 110], not exact\n # [0, 100] => [-1, 1], ~[-110, 110] => [-1, 1]\n return [L_chan / 50 - 1, a_chan / 110, b_chan / 110]\n\n\ndef deprocess_lab(L_chan, a_chan, b_chan):\n with tf.name_scope(\"deprocess_lab\"):\n # this is axis=3 instead of axis=2 because we process individual images but deprocess batches\n return tf.stack([(L_chan + 1) / 2 * 100, a_chan * 110, b_chan * 110], axis=3)\n\n\ndef augment(image, brightness):\n # (a, b) color channels, combine with L channel and convert to rgb\n a_chan, b_chan = tf.unstack(image, axis=3)\n L_chan = tf.squeeze(brightness, axis=3)\n lab = deprocess_lab(L_chan, a_chan, b_chan)\n rgb = lab_to_rgb(lab)\n return rgb\n\n\ndef discrim_conv(batch_input, out_channels, stride):\n padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=\"CONSTANT\")\n return tf.layers.conv2d(padded_input, out_channels, kernel_size=4, strides=(stride, stride), padding=\"valid\",\n kernel_initializer=tf.random_normal_initializer(0, 0.02))\n\n\ndef gen_conv(batch_input, out_channels):\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\n initializer = tf.random_normal_initializer(0, 0.02)\n if a.separable_conv:\n return tf.layers.separable_conv2d(batch_input, out_channels, kernel_size=4, strides=(2, 2), padding=\"same\",\n depthwise_initializer=initializer, pointwise_initializer=initializer)\n else:\n return tf.layers.conv2d(batch_input, out_channels, kernel_size=4, strides=(2, 2), padding=\"same\",\n kernel_initializer=initializer)\n\n\ndef gen_deconv(batch_input, out_channels):\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\n initializer = tf.random_normal_initializer(0, 0.02)\n if a.separable_conv:\n _b, h, w, _c = batch_input.shape\n resized_input = tf.image.resize_images(batch_input, [h * 2, w * 2],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n return tf.layers.separable_conv2d(resized_input, out_channels, kernel_size=4, strides=(1, 1), padding=\"same\",\n depthwise_initializer=initializer, pointwise_initializer=initializer)\n else:\n return tf.layers.conv2d_transpose(batch_input, out_channels, kernel_size=4, strides=(2, 2), padding=\"same\",\n kernel_initializer=initializer)\n\n\ndef lrelu(x, a):\n with tf.name_scope(\"lrelu\"):\n # adding these together creates the leak part and linear part\n # then cancels them out by subtracting/adding an absolute value term\n # leak: a*x/2 - a*abs(x)/2\n # linear: x/2 + abs(x)/2\n\n # this block looks like it has 2 inputs on the graph unless we do this\n x = tf.identity(x)\n return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)\n\n\ndef batchnorm(inputs):\n return tf.layers.batch_normalization(inputs, axis=3, epsilon=1e-5, momentum=0.1, training=True,\n gamma_initializer=tf.random_normal_initializer(1.0, 0.02))\n\n\ndef check_image(image):\n assertion = tf.assert_equal(tf.shape(image)[-1], 3, message=\"image must have 3 color channels\")\n with tf.control_dependencies([assertion]):\n image = tf.identity(image)\n\n if image.get_shape().ndims not in (3, 4):\n raise ValueError(\"image must be either 3 or 4 dimensions\")\n\n # make the last dimension 3 so that you can unstack the colors\n shape = list(image.get_shape())\n shape[-1] = 3\n image.set_shape(shape)\n return image\n\n\ndef load_examples():\n\n if a.input_dir is None or not os.path.exists(a.input_dir):\n raise Exception(\"input_dir does not exist\")\n\n input_paths = glob.glob(os.path.join(a.input_dir, \"*.jpg\"))\n decode = tf.image.decode_jpeg\n if len(input_paths) == 0:\n input_paths = glob.glob(os.path.join(a.input_dir, \"*.png\"))\n decode = tf.image.decode_png\n\n if len(input_paths) == 0:\n raise Exception(\"input_dir contains no image files\")\n\n def get_name(path):\n name, _ = os.path.splitext(os.path.basename(path))\n return name\n\n # if the image names are numbers, sort by the value rather than asciibetically\n # having sorted inputs means that the outputs are sorted in test mode\n if all(get_name(path).isdigit() for path in input_paths):\n input_paths = sorted(input_paths, key=lambda path: int(get_name(path)))\n else:\n input_paths = sorted(input_paths)\n\n with tf.name_scope(\"load_images\"):\n path_queue = tf.train.string_input_producer(input_paths, shuffle=a.mode == \"train\")\n reader = tf.WholeFileReader()\n paths, contents = reader.read(path_queue)\n raw_input = decode(contents)\n raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)\n\n assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message=\"image does not have 3 channels\")\n\n with tf.control_dependencies([assertion]):\n raw_input = tf.identity(raw_input)\n\n raw_input.set_shape([None, None, 3])\n\n # break apart image pair and move to range [-1, 1]\n width = tf.shape(raw_input)[1] # [height, width, channels]\n a_images = preprocess(raw_input[:, :width // 2, :])\n b_images = preprocess(raw_input[:, width // 2:, :])\n\n if a.which_direction == \"AtoB\":\n inputs, targets = [a_images, b_images]\n elif a.which_direction == \"BtoA\":\n inputs, targets = [b_images, a_images]\n else:\n raise Exception(\"invalid direction\")\n\n # synchronize seed for image operations so that we do the same operations to both\n # input and output images\n seed = random.randint(0, 2 ** 31 - 1)\n\n def transform(image):\n r = image\n if a.flip:\n r = tf.image.random_flip_left_right(r, seed=seed)\n r = tf.image.resize_images(r, [a.scale_size, a.scale_size], method=tf.image.ResizeMethod.AREA)\n return r\n\n with tf.name_scope(\"input_images\"):\n input_images = transform(inputs)\n\n with tf.name_scope(\"target_images\"):\n target_images = transform(targets)\n\n paths_batch, inputs_batch, targets_batch = tf.train.batch([paths, input_images, target_images],\n batch_size=a.batch_size)\n\n steps_per_epoch = int(math.ceil(len(input_paths) / a.batch_size))\n\n return Examples(\n paths=paths_batch,\n inputs=inputs_batch,\n targets=targets_batch,\n count=len(input_paths),\n steps_per_epoch=steps_per_epoch,\n )\n\n\ndef create_generator(generator_inputs, generator_outputs_channels):\n layers = []\n\n #Add Filter to detect edges\n filter_shape = [41,41,3,a.ngf]\n with tf.variable_scope(\"encoder_1\"):\n filter = tf.get_variable('edge_detector', filter_shape, initializer=tf.random_normal_initializer(stddev=0.02))\n strides = [1, 1, 1, 1]\n output = tf.nn.conv2d(generator_inputs, filter, strides=strides, padding='VALID')\n output = lrelu(output, 0.2)\n layers.append(output)\n\n # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]\n with tf.variable_scope(\"encoder_1\"):\n output = gen_conv(output, a.ngf)\n layers.append(output)\n\n layer_specs = [\n a.ngf * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]\n a.ngf * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]\n a.ngf * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]\n a.ngf * 8, # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]\n a.ngf * 8, # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]\n a.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]\n a.ngf * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]\n ]\n\n for out_channels in layer_specs:\n with tf.variable_scope(\"encoder_%d\" % (len(layers) + 1)):\n rectified = lrelu(layers[-1], 0.2)\n # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]\n convolved = gen_conv(rectified, out_channels)\n output = batchnorm(convolved)\n layers.append(output)\n\n layer_specs = [\n (a.ngf * 8, 0.5), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]\n (a.ngf * 8, 0.5), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]\n (a.ngf * 8, 0.5), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]\n (a.ngf * 8, 0.0), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]\n (a.ngf * 4, 0.0), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]\n (a.ngf * 2, 0.0), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]\n (a.ngf, 0.0), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]\n ]\n\n num_encoder_layers = len(layers)\n for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):\n skip_layer = num_encoder_layers - decoder_layer - 1\n with tf.variable_scope(\"decoder_%d\" % (skip_layer + 1)):\n if decoder_layer == 0:\n # first decoder layer doesn't have skip connections\n # since it is directly connected to the skip_layer\n input = layers[-1]\n else:\n input = tf.concat([layers[-1], layers[skip_layer]], axis=3)\n\n rectified = tf.nn.relu(input)\n # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]\n output = gen_deconv(rectified, out_channels)\n output = batchnorm(output)\n\n if dropout > 0.0:\n output = tf.nn.dropout(output, keep_prob=1 - dropout)\n\n layers.append(output)\n\n # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]\n with tf.variable_scope(\"decoder_1\"):\n input = tf.concat([layers[-1], layers[1]], axis=3)\n rectified = tf.nn.relu(input)\n output = gen_deconv(rectified, generator_outputs_channels)\n output = tf.tanh(output)\n layers.append(output)\n\n return layers[-1]\n\n\nksize_rows = 296\nksize_cols = 296\nstrides_rows = 236\nstrides_cols = 236\nnum_patches = int(a.scale_size/strides_rows)\n#num_patches = 3 #int(a.scale_size/200)\nksizes = [1, ksize_rows, ksize_cols, 1]\nksizes_output = [1, 256, 256, 1]\nstrides = [1, strides_rows, strides_cols, 1]\nrates = [1, 1, 1, 1]\npadding='VALID'\n\n\ndef extract_patches(x, ksizes, strides, rates):\n return tf.extract_image_patches(\n x,\n ksizes, strides, rates,\n padding=\"VALID\"\n )\n\n\ndef extract_patches_inverse(x, y):\n _x = tf.zeros_like(x)\n _y = extract_patches(_x, ksizes_output, strides, rates)\n grad = tf.gradients(_y, _x)[0]\n # Divide by grad, to \"average\" together the overlapping patches\n # otherwise they would simply sum up\n return tf.gradients(_y, _x, grad_ys=y)[0] / grad\n\n\ndef create_model(inputs, targets):\n\n #Pad inputs to make shape to handle edge patches, so as we can give context as input to generator (context around target patch)\n inputs_bounded = tf.image.pad_to_bounding_box(inputs, 20, 20, a.scale_size+40, a.scale_size+40)\n\n out_channels = int(targets.get_shape()[-1])\n\n def create_discriminator(discrim_inputs, discrim_targets):\n n_layers = 5\n layers = []\n\n # 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2]\n input = tf.concat([discrim_inputs, discrim_targets], axis=3)\n\n # layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf]\n with tf.variable_scope(\"layer_1\"):\n convolved = discrim_conv(input, a.ndf, stride=2)\n rectified = lrelu(convolved, 0.2)\n layers.append(rectified)\n\n # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]\n # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]\n # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]\n for i in range(n_layers):\n with tf.variable_scope(\"layer_%d\" % (len(layers) + 1)):\n out_channels = a.ndf * min(2 ** (i + 1), 8)\n stride = 1 if i == n_layers - 1 else 2 # last layer here has stride 1\n convolved = discrim_conv(layers[-1], out_channels, stride=stride)\n normalized = batchnorm(convolved)\n rectified = lrelu(normalized, 0.2)\n layers.append(rectified)\n\n # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]\n with tf.variable_scope(\"layer_%d\" % (len(layers) + 1)):\n convolved = discrim_conv(rectified, out_channels=1, stride=1)\n output = tf.sigmoid(convolved)\n layers.append(output)\n print(layers)\n return layers[-1]\n\n #Extract Patches\n with tf.variable_scope(\"extract_patches\"):\n inputs_patches = extract_patches(inputs_bounded, ksizes, strides, rates)\n\n with tf.name_scope(\"patches_generator\"):\n with tf.variable_scope(\"generator\", reuse=tf.AUTO_REUSE):\n output_patches = []\n patch_size_len = 256*256*3\n #Get output from each patch via same generator\n for i in range(0, num_patches):\n for j in range(0, num_patches):\n patch = inputs_patches[0, i, j,]\n #patch_size_len = int(patch.get_shape()[0]) #defined above\n # reshape\n patch = tf.reshape(patch, [ksize_rows, ksize_cols, 3])\n patch = tf.expand_dims(patch,0)\n patch_output = create_generator(patch, out_channels)\n output_patches.append(tf.reshape(patch_output,[patch_size_len]))\n output_patches = tf.stack(output_patches)\n output_patches = tf.reshape(output_patches, [1, num_patches, num_patches, patch_size_len])\n\n #Join all patches back\n k = tf.constant(0.1, shape=[1, a.scale_size, a.scale_size, 3])\n outputs = extract_patches_inverse(k, output_patches)\n\n # create two copies of discriminator, one for real pairs and one for fake pairs\n # they share the same underlying variables\n with tf.name_scope(\"real_discriminator\"):\n with tf.variable_scope(\"discriminator\"):\n # 2x [batch, height, width, channels] => [batch, 30, 30, 1]\n predict_real = create_discriminator(inputs, targets)\n\n with tf.name_scope(\"fake_discriminator\"):\n with tf.variable_scope(\"discriminator\", reuse=True):\n # 2x [batch, height, width, channels] => [batch, 30, 30, 1]\n predict_fake = create_discriminator(inputs, outputs)\n\n with tf.name_scope(\"discriminator_loss\"):\n # minimizing -tf.log will try to get inputs to 1\n # predict_real => 1\n # predict_fake => 0\n discrim_loss = tf.reduce_mean(-(tf.log(tf.clip_by_value((predict_real + EPS),1e-12,1.0)) + tf.log(tf.clip_by_value((1 - predict_fake + EPS),1e-12,1.0))))\n\n with tf.name_scope(\"generator_loss\"):\n # predict_fake => 1\n # abs(targets - outputs) => 0\n gen_loss_GAN = tf.reduce_mean(-tf.log(tf.clip_by_value((predict_fake + EPS),1e-12,1.0)))\n gen_loss_L1 = tf.reduce_mean(tf.abs(targets - outputs))\n gen_loss = gen_loss_GAN * a.gan_weight + gen_loss_L1 * a.l1_weight\n\n with tf.name_scope(\"discriminator_train\"):\n discrim_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"discriminator\")]\n discrim_optim = tf.train.AdamOptimizer(a.lr, a.beta1)\n discrim_grads_and_vars = discrim_optim.compute_gradients(discrim_loss, var_list=discrim_tvars)\n discrim_train = discrim_optim.apply_gradients(discrim_grads_and_vars)\n\n with tf.name_scope(\"generator_train\"):\n with tf.control_dependencies([discrim_train]):\n gen_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"generator\")]\n gen_optim = tf.train.AdamOptimizer(a.lr, a.beta1)\n gen_grads_and_vars = gen_optim.compute_gradients(gen_loss, var_list=gen_tvars)\n gen_train = gen_optim.apply_gradients(gen_grads_and_vars)\n\n ema = tf.train.ExponentialMovingAverage(decay=0.99)\n update_losses = ema.apply([discrim_loss, gen_loss_GAN, gen_loss_L1] + 8*[discrim_loss])\n global_step = tf.train.get_or_create_global_step()\n incr_global_step = tf.assign(global_step, global_step + 1)\n\n return Model(\n predict_real=predict_real,\n predict_fake=predict_fake,\n discrim_loss=ema.average(discrim_loss),\n discrim_grads_and_vars=discrim_grads_and_vars,\n gen_loss_GAN=ema.average(gen_loss_GAN),\n gen_loss_L1=ema.average(gen_loss_L1),\n gen_grads_and_vars=gen_grads_and_vars,\n outputs=outputs,\n train=tf.group(update_losses, incr_global_step, gen_train),\n )\n\n\ndef save_images(fetches, step=None):\n image_dir = os.path.join(a.output_dir, \"images\")\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n filesets = []\n k=1\n for i, in_path in enumerate(fetches[\"paths\"]):\n k+=1\n if(k>10):\n break\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"inputs\", \"outputs\", \"targets\"]:\n filename = name + \"-\" + kind + \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\n\ndef append_index(filesets, step=False):\n index_path = os.path.join(a.output_dir, \"index.html\")\n if os.path.exists(index_path):\n index = open(index_path, \"a\")\n else:\n index = open(index_path, \"w\")\n index.write(\"<html><body><table><tr>\")\n if step:\n index.write(\"<th>step</th>\")\n index.write(\"<th>name</th><th>input</th><th>output</th><th>target</th></tr>\")\n\n for fileset in filesets:\n index.write(\"<tr>\")\n\n if step:\n index.write(\"<td>%d</td>\" % fileset[\"step\"])\n index.write(\"<td>%s</td>\" % fileset[\"name\"])\n\n for kind in [\"inputs\", \"outputs\", \"targets\"]:\n index.write(\"<td><img src='images/%s'></td>\" % fileset[kind])\n\n index.write(\"</tr>\")\n return index_path\n\n\ndef main():\n if a.seed is None:\n a.seed = random.randint(0, 2 ** 31 - 1)\n\n tf.set_random_seed(a.seed)\n np.random.seed(a.seed)\n random.seed(a.seed)\n\n if not os.path.exists(a.output_dir):\n os.makedirs(a.output_dir)\n\n if a.mode == \"test\" or a.mode == \"export\":\n if a.checkpoint is None:\n raise Exception(\"checkpoint required for test mode\")\n\n # load some options from the checkpoint\n options = {\"which_direction\", \"ngf\", \"ndf\", \"lab_colorization\"}\n with open(os.path.join(a.checkpoint, \"options.json\")) as f:\n for key, val in json.loads(f.read()).items():\n if key in options:\n print(\"loaded\", key, \"=\", val)\n setattr(a, key, val)\n # disable these features in test mode\n #a.scale_size = CROP_SIZE\n a.flip = False\n\n for k, v in a._get_kwargs():\n print(k, \"=\", v)\n\n with open(os.path.join(a.output_dir, \"options.json\"), \"w\") as f:\n f.write(json.dumps(vars(a), sort_keys=True, indent=4))\n\n if a.mode == \"export\":\n # export the generator to a meta graph that can be imported later for standalone generation\n if a.lab_colorization:\n raise Exception(\"export not supported for lab_colorization\")\n\n input = tf.placeholder(tf.string, shape=[1])\n input_data = tf.decode_base64(input[0])\n input_image = tf.image.decode_png(input_data)\n\n # remove alpha channel if present\n input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:, :, :3], lambda: input_image)\n # convert grayscale to RGB\n input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image),\n lambda: input_image)\n\n input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)\n input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])\n batch_input = tf.expand_dims(input_image, axis=0)\n\n with tf.variable_scope(\"generator\"):\n batch_output = deprocess(create_generator(preprocess(batch_input), 3))\n\n output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]\n if a.output_filetype == \"png\":\n output_data = tf.image.encode_png(output_image)\n elif a.output_filetype == \"jpeg\":\n output_data = tf.image.encode_jpeg(output_image, quality=80)\n else:\n raise Exception(\"invalid filetype\")\n output = tf.convert_to_tensor([tf.encode_base64(output_data)])\n\n key = tf.placeholder(tf.string, shape=[1])\n inputs = {\n \"key\": key.name,\n \"input\": input.name\n }\n tf.add_to_collection(\"inputs\", json.dumps(inputs))\n outputs = {\n \"key\": tf.identity(key).name,\n \"output\": output.name,\n }\n tf.add_to_collection(\"outputs\", json.dumps(outputs))\n\n init_op = tf.global_variables_initializer()\n restore_saver = tf.train.Saver()\n export_saver = tf.train.Saver()\n\n with tf.Session() as sess:\n sess.run(init_op)\n print(\"loading model from checkpoint\")\n checkpoint = tf.train.latest_checkpoint(a.checkpoint)\n restore_saver.restore(sess, checkpoint)\n print(\"exporting model\")\n export_saver.export_meta_graph(filename=os.path.join(a.output_dir, \"export.meta\"))\n export_saver.save(sess, os.path.join(a.output_dir, \"export\"), write_meta_graph=False)\n\n return\n\n examples = load_examples()\n\n # inputs and targets are [batch_size, height, width, channels]\n model = create_model(examples.inputs, examples.targets)\n\n # undo colorization splitting on images that we use for display/output\n if a.lab_colorization:\n if a.which_direction == \"AtoB\":\n # inputs is brightness, this will be handled fine as a grayscale image\n # need to augment targets and outputs with brightness\n targets = augment(examples.targets, examples.inputs)\n outputs = augment(model.outputs, examples.inputs)\n # inputs can be deprocessed normally and handled as if they are single channel\n # grayscale images\n inputs = deprocess(examples.inputs)\n elif a.which_direction == \"BtoA\":\n # inputs will be color channels only, get brightness from targets\n inputs = augment(examples.inputs, examples.targets)\n targets = deprocess(examples.targets)\n outputs = deprocess(model.outputs)\n else:\n raise Exception(\"invalid direction\")\n else:\n inputs = deprocess(examples.inputs)\n targets = deprocess(examples.targets)\n if(a.mode == \"train\"):\n outputs = deprocess(model.outputs)\n else:\n outputs = deprocess(model.outputs)\n\n\n def convert(image):\n if a.aspect_ratio != 1.0:\n # upscale to correct aspect ratio\n size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]\n image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)\n\n return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)\n\n # reverse any processing on images so they can be written to disk or displayed to user\n with tf.name_scope(\"convert_inputs\"):\n converted_inputs = convert(inputs)\n\n with tf.name_scope(\"convert_targets\"):\n converted_targets = convert(targets)\n\n with tf.name_scope(\"convert_outputs\"):\n converted_outputs = convert(outputs)\n\n with tf.name_scope(\"encode_images\"):\n display_fetches = {\n \"paths\": examples.paths,\n \"inputs\": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name=\"input_pngs\"),\n \"targets\": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name=\"target_pngs\"),\n \"outputs\": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name=\"output_pngs\"),\n }\n\n # summaries\n '''\n with tf.name_scope(\"inputs_summary\"):\n tf.summary.image(\"inputs\", converted_inputs)\n\n with tf.name_scope(\"targets_summary\"):\n tf.summary.image(\"targets\", converted_targets)\n\n with tf.name_scope(\"outputs_summary\"):\n tf.summary.image(\"outputs\", converted_outputs)\n\n with tf.name_scope(\"predict_real_summary\"):\n tf.summary.image(\"predict_real\", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))\n\n with tf.name_scope(\"predict_fake_summary\"):\n tf.summary.image(\"predict_fake\", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))\n '''\n\n tf.summary.scalar(\"discriminator_loss\", model.discrim_loss)\n tf.summary.scalar(\"generator_loss_GAN\", model.gen_loss_GAN)\n tf.summary.scalar(\"generator_loss_L1\", model.gen_loss_L1)\n\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name + \"/values\", var)\n\n for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:\n tf.summary.histogram(var.op.name + \"/gradients\", grad)\n\n with tf.name_scope(\"parameter_count\"):\n parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])\n\n saver = tf.train.Saver(max_to_keep=1)\n\n logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None\n sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)\n with sv.managed_session() as sess:\n print(\"parameter_count =\", sess.run(parameter_count))\n\n if a.checkpoint is not None:\n print(\"loading model from checkpoint\")\n print(a.checkpoint)\n checkpoint = tf.train.latest_checkpoint(a.checkpoint)\n saver.restore(sess, checkpoint)\n\n max_steps = 2 ** 32\n\n if a.max_epochs is not None:\n max_steps = examples.steps_per_epoch * a.max_epochs\n if a.max_steps is not None:\n max_steps = a.max_steps\n\n if a.mode == \"test\":\n # testing\n # at most, process the test data once\n start = time.time()\n max_steps = min(examples.steps_per_epoch, max_steps)\n for step in range(max_steps):\n results = sess.run(display_fetches)\n filesets = save_images(results)\n for i, f in enumerate(filesets):\n print(\"evaluated image\", f[\"name\"])\n index_path = append_index(filesets)\n print(\"wrote index at\", index_path)\n print(\"rate\", (time.time() - start) / max_steps)\n else:\n # training\n start = time.time()\n\n for step in range(max_steps):\n def should(freq):\n return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)\n\n options = None\n run_metadata = None\n if should(a.trace_freq):\n options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n fetches = {\n \"train\": model.train,\n \"global_step\": sv.global_step,\n }\n\n if should(a.progress_freq):\n fetches[\"discrim_loss\"] = model.discrim_loss\n fetches[\"gen_loss_GAN\"] = model.gen_loss_GAN\n fetches[\"gen_loss_L1\"] = model.gen_loss_L1\n\n if should(a.summary_freq):\n fetches[\"summary\"] = sv.summary_op\n\n if should(a.display_freq):\n fetches[\"display\"] = display_fetches\n\n results = sess.run(fetches, options=options, run_metadata=run_metadata)\n\n if should(a.summary_freq):\n print(\"recording summary\")\n sv.summary_writer.add_summary(results[\"summary\"], results[\"global_step\"])\n\n if should(a.display_freq):\n print(\"saving display images\")\n filesets = save_images(results[\"display\"], step=results[\"global_step\"])\n append_index(filesets, step=True)\n\n if should(a.trace_freq):\n print(\"recording trace\")\n sv.summary_writer.add_run_metadata(run_metadata, \"step_%d\" % results[\"global_step\"])\n\n if should(a.progress_freq):\n # global_step will have the correct step count if we resume from a checkpoint\n train_epoch = math.ceil(results[\"global_step\"] / examples.steps_per_epoch)\n train_step = (results[\"global_step\"] - 1) % examples.steps_per_epoch + 1\n rate = (step + 1) * a.batch_size / (time.time() - start)\n remaining = (max_steps - step) * a.batch_size / rate\n print(\"progress epoch %d step %d image/sec %0.1f remaining %dm\" % (\n train_epoch, train_step, rate, remaining / 60))\n print(\"discrim_loss\", results[\"discrim_loss\"])\n print(\"gen_loss_GAN\", results[\"gen_loss_GAN\"])\n print(\"gen_loss_L1\", results[\"gen_loss_L1\"])\n\n if should(a.save_freq):\n print(\"saving model\")\n saver.save(sess, os.path.join(a.output_dir, \"model\"), global_step=sv.global_step)\n\n if sv.should_stop():\n break\n\n\nmain()"
] | [
[
"tensorflow.nn.conv2d",
"tensorflow.decode_base64",
"tensorflow.image.random_flip_left_right",
"tensorflow.group",
"tensorflow.encode_base64",
"tensorflow.extract_image_patches",
"tensorflow.gradients",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.control_dependencies",
"tensorflow.tanh",
"tensorflow.global_variables_initializer",
"tensorflow.identity",
"tensorflow.random_normal_initializer",
"tensorflow.set_random_seed",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.train.latest_checkpoint",
"tensorflow.concat",
"tensorflow.sigmoid",
"tensorflow.summary.histogram",
"tensorflow.train.Saver",
"tensorflow.constant",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.image.pad_to_bounding_box",
"tensorflow.pad",
"tensorflow.train.Supervisor",
"tensorflow.layers.conv2d_transpose",
"tensorflow.nn.dropout",
"tensorflow.abs",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.relu",
"tensorflow.train.batch",
"tensorflow.summary.scalar",
"tensorflow.expand_dims",
"tensorflow.image.convert_image_dtype",
"tensorflow.Session",
"tensorflow.layers.separable_conv2d",
"tensorflow.image.encode_jpeg",
"tensorflow.train.string_input_producer",
"tensorflow.map_fn",
"tensorflow.layers.conv2d",
"tensorflow.name_scope",
"tensorflow.placeholder",
"tensorflow.image.encode_png",
"tensorflow.RunOptions",
"tensorflow.image.resize_images",
"tensorflow.unstack",
"tensorflow.assign",
"numpy.random.seed",
"tensorflow.WholeFileReader",
"tensorflow.RunMetadata",
"tensorflow.image.decode_png",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.get_or_create_global_step",
"tensorflow.image.grayscale_to_rgb"
]
] |
hsspratt/Nott-Hawkeye1 | [
"178f4f0fef62e8699f6057d9d50adfd61a851047"
] | [
"VectorFunctions.py"
] | [
"# %% Imports\n\nfrom sympy import Matrix, init_printing\nimport sympy as sym\nimport sympy.printing as printing\nfrom sympy import Integral, Matrix, pi, pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport functions as f\n\n# %% All Functions and Definitions relating to working with vectors\n\ndef division(n1, n2):\n try:\n return n1/n2\n except ZeroDivisionError:\n return 0\n\ndef normalised(v):\n \"\"\"Normalises vector - ensure vector direction is of unit length\n\n Parameters\n ----------\n v : np.array (N,1)\n np.array of vector direction\n\n Returns\n -------\n norm_v : np.array (N,1)\n Normalised vector - of unit length 1\n \"\"\" \n\n norm_v = v / (np.sqrt(np.sum(v**2)))\n\n return norm_v\n\n\ndef EqOfLine(r0,r1):\n \"\"\"Creates a vector equation of the line in the form r = vt + r_0\n\n Parameters\n ----------\n r0 : np.array (N,1)\n Numpy array of a origin point (can also be a point which lies on the line)\n r1 : np.array (N,1)\n Numpy array of a point which lies on the line\n \n - N is the dimensions of the vector\n\n Returns\n -------\n vector : np.array (N,1)\n vector that indicates the direction of the line - which is normalised\n r0 : np.array (N,1)\n Rereturns r0, so that in can be easily found\n \"\"\" \n vector = normalised(r1 - r0)\n r0 = r0\n\n return vector, r0\n\ndef FindShortestDistance(r1, r2, v1, v2):\n \"\"\"Finds the shortest distance between two vectors\n\n Parameters\n ----------\n r1 : np.array (N,1)\n Numpy array of a point which lies on the line 1\n r2 : np.array (N,1)\n Numpy array of a point which lies on the line 2\n v1 : np.array (N,1)\n vector that indicates the direction of the line 1\n v2 : np.array (N,1)\n vector that indicates the direction of the line 2\n\n Returns\n -------\n distance : float\n simply the shortest distance between the two lines\n \"\"\" \n n = np.cross(v1,v2)\n\n if n.all == 0:\n # folling eq only works IF parallel\n distance = np.sqrt((np.cross((r2-r1),v1))**2/(np.linalg.norm(v2,axis=0))**2)\n print(\"The two lines are parallel\")\n else:\n # folling eq only works if NOT parallel\n distance = (np.dot(n,(r1-r2)))/(np.linalg.norm(n,axis=0))\n print(\"The two lines are not parallel\")\n\n return distance\n\ndef LocShortestDistance(r1, r2, v1, v2):\n \"\"\"Finds the location of the shortest distance between two lines on the respective lines\n\n Parameters\n ----------\n r1 : np.array (N,1)\n Numpy array of a point which lies on the line 1\n r2 : np.array (N,1)\n Numpy array of a point which lies on the line 2\n v1 : np.array (N,1)\n vector that indicates the direction of the line 1\n v2 : np.array (N,1)\n vector that indicates the direction of the line 2\n\n Returns\n -------\n c1 : np.array (N,1)\n Location on line 1 where it is at a shortest distance to line 2\n c2 : np.array (N,1)\n Location on line 2 where it is at a shortest distance to line 1\n dist : float\n simply the shortest distance between the two lines \n\n - should be noted dist is less accurate then dedicated function FindShortestDistance\n\n \"\"\" \n\n n = np.cross(v1,v2)\n\n n1 = np.cross(v1, n)\n n2 = np.cross(v2, n)\n\n c1 = r1 + v1*(division(np.dot((r2-r1),n2),(np.dot(v1,n2))))\n c2 = r2 + v2*(division(np.dot((r1-r2),n1),(np.dot(v2,n1))))\n\n dist = np.linalg.norm(c1-c2,axis=0)\n\n return [c1, c2, dist]\n\ndef Polar2Vector(r0, angle, axis, camera):\n \"\"\"Converts a polar line - 1D line in 2D space (a plane) into a 1D vector line in 3D space\n\n Parameters\n ----------\n r0 : np.array (N,1)\n Numpy array of a origin point (can also be a point which lies on the line)\n angle : radians (-π ≤ θ ≤ π)\n Angle from a line normal to the lens to the angle made with the pixel\n axis : string (\"xz\" or \"yz\")\n Specify the axis in question - \"xs\" is the horizontal axis and \"yz\" is the vertical\n camera : string (\"1\" or \"2\")\n Specify the camera in question, \"1\" is at the orgin and \"2\" is at (x1, 0, z1)\n\n Returns\n -------\n r0 : np.array (N,1)\n Numpy array of a origin point (can also be a point which lies on the line)\n vector : np.array (N,1)\n vector that indicates the direction of the line\n \"\"\" \n if np.any(angle) == 0:\n angle = 0.001\n\n if camera == \"1\":\n if axis == \"xz\":\n vector = np.array([[1, 0, np.tan(angle)]])\n return r0, vector\n if axis == \"yz\":\n vector = np.array([[0, 1, np.tan(angle)]])\n return r0, vector\n else:\n print(\"Axis not specified correctly\")\n if camera==\"2\":\n if axis == \"xz\":\n vector = np.array([[1, 0, np.tan(angle)]])\n return r0, vector\n if axis == \"yz\":\n vector = np.array([[0, 1, np.tan(angle)]])\n return r0, vector\n else:\n print(\"Axis not specified correctly\")\n\n\ndef sph2cart(r, theta, phi, cameras_r0):\n \"\"\"Converts spherical co-ordinates (r,θ,φ) into cartesian (x,y,z)\n\n Parameters\n ----------\n r : float\n radial distance\n theta : radians\n Angle from a line normal to the lens to the angle made with the pixel (xz plane)\n phi : radians\n Angle from a line normal to the lens to the angle made with the pixel (yz plane)\n\n Returns\n -------\n [x,y,z] : np.array of floats\n Denotes the x, y and z co-ordinate\n \"\"\" \n x0, y0, z0 = cameras_r0\n\n x = r * np.sin(theta) * np.cos(phi) + x0\n y = r * np.sin(theta) * np.sin(phi) + y0\n z = r * np.cos(theta) + z0\n\n return x,y,z\n\ndef new_sph2cart(r, theta, phi, cameras_r0):\n \"\"\"Converts spherical co-ordinates (r,θ,φ) into cartesian (x,y,z)\n\n Parameters\n ----------\n r : float\n radial distance\n theta : radians\n Angle from a line normal to the lens to the angle made with the pixel (xz plane)\n phi : radians\n Angle from a line normal to the lens to the angle made with the pixel (yz plane)\n\n Returns\n -------\n [x,y,z] : np.array of floats\n Denotes the x, y and z co-ordinate\n \"\"\" \n x0, y0, z0 = np.array([0,0,0]) # cameras_r0\n\n x = r * np.cos(theta) * np.sin(phi) + x0\n z = r * np.sin(theta) * np.sin(phi) + z0\n y = r * np.cos(theta) + y0\n\n return x,y,z\n\ndef Find3DPosition(cameras_r0, cameras_angles, delta, args):\n\n camera1_r0 = cameras_r0[0]\n camera2_r0 = cameras_r0[1]\n\n dt_A, dt_B, dp_B = delta\n\n camera1_theta = (np.pi/2 + dt_A) - cameras_angles[0]\n camera1_phi = (np.pi/2) - cameras_angles[1]\n camera2_theta = (np.pi/2 + dt_B) - cameras_angles[2]\n camera2_phi = cameras_angles[3] - dp_B\n\n _,camera1_vector_xz = Polar2Vector(camera1_r0, camera1_theta, axis=\"xz\", camera=\"1\")\n _,camera1_vector_yz = Polar2Vector(camera1_r0, camera1_phi, axis=\"yz\", camera=\"1\")\n _,camera2_vector_xz = Polar2Vector(camera2_r0, camera2_theta, axis=\"xz\", camera=\"2\")\n _,camera2_vector_yz = Polar2Vector(camera2_r0, camera2_phi, axis=\"yz\", camera=\"2\")\n\n camera1_cart = np.array(new_sph2cart(1, camera1_theta, camera1_phi, camera1_r0))\n camera2_cart = np.array(new_sph2cart(1, camera2_theta, camera2_phi, camera2_r0))\n\n camera1_vector = normalised(camera1_cart - camera1_r0)\n camera2_vector = normalised(camera2_cart - camera2_r0)\n\n tlim = np.max(cameras_r0)*2\n t = np.linspace(0,tlim,5000)\n \n camera1_line = t*np.array([camera1_vector]).T[0]\n camera2_line = t*np.array([camera2_vector]).T[0]\n\n if args == \"Line\":\n return [camera1_line, camera2_line], [camera1_vector, camera2_vector]\n\n c1, c2, dist = LocShortestDistance(camera1_r0, camera2_r0, camera1_vector, camera2_vector)\n # rel_position_shortest = np.vstack((c1, c2))\n cart_position = (c1 + c2) / 2.0\n print(cart_position)\n #print(rel_position_shortest)\n\n \"\"\"Takes the inputs from the cameras in terms of positions and angles\n generate a 3D position of the ball\n\n Parameters\n ----------\n cameras_r0 : turple of np.arrays\n Two numpy arrays for the two initial positions of the cameras\n cameras_angles : turple of np.arrays\n Four angles theta and phi from camera 1 and the same from camera 2\n args : string\n if \"lines\" chosen it returns the whole line instead of the individual point\n\n Returns\n -------\n cart_position\n Returns the 3D position of the ball in cartesian co-ordinates\n \"\"\" \n\n return camera1_vector, camera2_vector, c1, c2, cart_position\n\n# def Camera2_Camera1_axis(phi,z1,x1):\n# \"\"\"Converts the co-ordinates from that of camera 2 to camera 1 - therefore one co-ordinate system\n\n# Parameters\n# ----------\n# phi : radian\n# Angle from a line normal to the lens to the angle made with the pixel\n# z1 : float\n# z position of camera 2\n# x1 : [type]\n# x position of camera 2\n\n# Returns\n# -------\n# r0 : np.array\n# [description]\n# \"\"\" \n# r0 = np.array([z1 - np.tan(phi)*x1, 0])\n# vector = np.array([[np.tan(phi), 1]])\n# return r0, vector\n\n# def Camera1toCamera2_1(phi,z1,x1):\n# r0 = np.array([x1, z1])\n# vector = np.array([[np.tan(phi), 1]])\n# return r0, vector\n\n\n\n# %%\n\n# %%\n\ndef p4(p1, p2, p3):\n x1, y1 = p1\n x2, y2 = p2\n x3, y3 = p3\n dx, dy = x2-x1, y2-y1\n det = dx*dx + dy*dy\n a = (dy*(y3-y1)+dx*(x3-x1))/det\n x= x1+a*dx, y1+a*dy\n # print(x)\n if x[0]<x1 or x[1]<y1:\n return p1\n elif x[0]>x2 or x[1]>y2:\n return p2\n else:\n return x\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.sin",
"numpy.sum",
"numpy.tan",
"numpy.any",
"numpy.cos",
"numpy.linspace",
"numpy.cross"
]
] |
mengban/traffic-src-spyder | [
"7517e850469daa524228201cf14e56fa2fa3885a"
] | [
"src/data_pro.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 14:53:58 2018\n\n@author: cadu\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\ndef loaddata(Filename):\n data = pd.read_csv(Filename,sep=',',header = None)\n return np.array(data)\ndef get_tt():\n# dataset\n print(\"Data loading...\")\n startT=-time.time()\n abspath=os.path.abspath(\"..\")\n data1 = loaddata(abspath+\"/data/dataout2018-05-03_win16_0_1.csv\")\n data2 = loaddata(abspath+\"/data/dataout2018-05-03_win11_1_1.csv\")\n data3 = loaddata(abspath+\"/data/dataout2018-04-04_win16_2_1.csv\")\n data4 = loaddata(abspath+\"/data/dataoutcapture_win11_3_1.csv\")\n data5 = loaddata(abspath+\"/data/dataout2018-03-01_win11_4_1.csv\") \n #data6 = loaddata(abspath+\"/data/dataoutcapture_win15_5_1.csv\")\n data7 = loaddata(abspath+\"/data/dataoutcapture_win12_6_1.csv\")\n data8 = loaddata(abspath+\"/data/dataout2018-01-30_win10_7_1.csv\")\n data9 = loaddata(abspath+\"/data/dataout2018-01-30_win9_8_1.csv\")\n data10 = loaddata(abspath+\"/data/dataout2018-01-29_win7_9_1.csv\")\n \n print(\"src data is:\",data1.shape,data2.shape,data3.shape,data4.shape,data5.shape)\n print(\"src data is:\",data5.shape,data7.shape,data8.shape,data9.shape,data10.shape)\n data_train = np.vstack((data1[:len(data1)-2],data2[:len(data2)-2]))\n data_train = np.vstack((data_train,data3[:len(data3)-2]))\n data_train = np.vstack((data_train,data4[:len(data4)-2]))\n data_train = np.vstack((data_train,data5[:len(data5)-2]))\n #data_train = np.vstack((data_train,data6[:len(data6)-1]))\n data_train = np.vstack((data_train,data7[:len(data7)-2]))\n data_train = np.vstack((data_train,data8[:len(data8)-2]))\n data_train = np.vstack((data_train,data9[:len(data9)-2]))\n data_train = np.vstack((data_train,data10[:len(data10)-2]))\n \n print('This is data_train',type(data_train),data_train.shape)\n #label\n data1_ = loaddata(abspath+\"/data/labelout2018-05-03_win16_0_1.csv\")\n data2_ = loaddata(abspath+\"/data/labelout2018-05-03_win11_1_1.csv\")\n data3_ = loaddata(abspath+\"/data/labelout2018-04-04_win16_2_1.csv\")\n data4_ = loaddata(abspath+\"/data/labeloutcapture_win11_3_1.csv\")\n data5_ = loaddata(abspath+\"/data/labelout2018-03-01_win11_4_1.csv\")\n #data6_ = loaddata(abspath+\"/data/labeloutcapture_win15_5_1.csv\")\n data7_ = loaddata(abspath+\"/data/labeloutcapture_win12_6_1.csv\")\n data8_ = loaddata(abspath+\"/data/labelout2018-01-30_win10_7_1.csv\")\n data9_ = loaddata(abspath+\"/data/labelout2018-01-30_win9_8_1.csv\")\n data10_ = loaddata(abspath+\"/data/labelout2018-01-29_win7_9_1.csv\")\n #print(data1_.shape,data2_.shape,data3_.shape,data4_.shape,data5_.shape)\n \n \n label_train = np.vstack((data1_[:len(data1_)-2],data2_[:len(data2_)-2]))\n label_train = np.vstack((label_train,data3_[:len(data3_)-2]))\n label_train = np.vstack((label_train,data4_[:len(data4_)-2]))\n label_train = np.vstack((label_train,data5_[:len(data5_)-2]))\n #label_train = np.vstack((label_train,data6_[:len(data6)-1]))\n label_train = np.vstack((label_train,data7_[:len(data7)-2]-1))\n label_train = np.vstack((label_train,data8_[:len(data8)-2]-1))\n label_train = np.vstack((label_train,data9_[:len(data9)-2]-1))\n label_train = np.vstack((label_train,data10_[:len(data10)-2]-1))\n print(\"Data loading is ...OK.\")\n print(\"The total time is :\",time.time()+startT)\n print(label_train.shape)\n return data_train,label_train\nif __name__=='__main__':\n X,Y=get_tt()\n print(X.shape,Y.shape)"
] | [
[
"numpy.array",
"pandas.read_csv"
]
] |
ZerounNet/envpool | [
"49780e7caceda5f781072d3ef0cbb8aae082595f"
] | [
"envpool/atari/api_test.py"
] | [
"# Copyright 2021 Garena Online Private Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit test for atari envpool and speed benchmark.\"\"\"\n\nimport dm_env\nimport gym\nimport numpy as np\nfrom absl import logging\nfrom absl.testing import absltest\n\nfrom envpool.atari import AtariDMEnvPool, AtariEnvSpec, AtariGymEnvPool\n\n\nclass _SpecTest(absltest.TestCase):\n\n def test_spec(self) -> None:\n action_nums = {\"pong\": 6, \"breakout\": 4}\n for task in [\"pong\", \"breakout\"]:\n action_num = action_nums[task]\n config = AtariEnvSpec.gen_config(task=task)\n spec = AtariEnvSpec(config)\n logging.info(spec)\n self.assertEqual(\n spec.action_array_spec[\"action\"].maximum + 1, action_num\n )\n # check dm spec\n dm_obs_spec: dm_env.specs.BoundedArray = spec.observation_spec(\n ).obs # type: ignore\n dm_act_spec: dm_env.specs.DiscreteArray = spec.action_spec()\n self.assertEqual(len(spec.action_array_spec), 3)\n self.assertIsInstance(dm_obs_spec, dm_env.specs.BoundedArray)\n self.assertEqual(dm_obs_spec.dtype, np.uint8)\n self.assertEqual(dm_obs_spec.maximum, 255)\n self.assertIsInstance(dm_act_spec, dm_env.specs.DiscreteArray)\n self.assertEqual(dm_act_spec.num_values, action_num)\n # check gym space\n gym_obs_space: gym.spaces.Box = spec.observation_space\n gym_act_space: gym.spaces.Discrete = spec.action_space\n self.assertEqual(len(spec.action_array_spec), 3)\n self.assertIsInstance(gym_obs_space, gym.spaces.Box)\n self.assertEqual(gym_obs_space.dtype, np.uint8)\n np.testing.assert_allclose(gym_obs_space.high, 255)\n self.assertIsInstance(gym_act_space, gym.spaces.Discrete)\n self.assertEqual(gym_act_space.n, action_num)\n\n\nclass _DMSyncTest(absltest.TestCase):\n\n def test_spec(self) -> None:\n action_nums = {\"pong\": 6, \"breakout\": 4}\n for task in [\"pong\", \"breakout\"]:\n action_num = action_nums[task]\n config = AtariEnvSpec.gen_config(task=task)\n spec = AtariEnvSpec(config)\n env = AtariDMEnvPool(spec)\n self.assertIsInstance(env, dm_env.Environment)\n logging.info(env)\n # check dm spec\n dm_obs_spec: dm_env.specs.BoundedArray = env.observation_spec(\n ).obs # type: ignore\n dm_act_spec: dm_env.specs.DiscreteArray = env.action_spec()\n self.assertIsInstance(dm_obs_spec, dm_env.specs.BoundedArray)\n self.assertEqual(dm_obs_spec.dtype, np.uint8)\n self.assertEqual(dm_obs_spec.maximum, 255)\n self.assertIsInstance(dm_act_spec, dm_env.specs.DiscreteArray)\n self.assertEqual(dm_act_spec.num_values, action_num)\n\n def test_lowlevel_step(self) -> None:\n num_envs = 4\n config = AtariEnvSpec.gen_config(task=\"pong\", num_envs=num_envs)\n spec = AtariEnvSpec(config)\n env = AtariDMEnvPool(spec)\n logging.info(env)\n env.async_reset()\n ts: dm_env.TimeStep = env.recv()\n # check ts structure\n self.assertTrue(np.all(ts.first()))\n np.testing.assert_allclose(ts.step_type.shape, (num_envs,))\n np.testing.assert_allclose(ts.reward.shape, (num_envs,))\n self.assertEqual(ts.reward.dtype, np.float32)\n np.testing.assert_allclose(ts.discount.shape, (num_envs,))\n self.assertEqual(ts.discount.dtype, np.float32)\n np.testing.assert_allclose(ts.observation.obs.shape, (num_envs, 4, 84, 84))\n self.assertEqual(ts.observation.obs.dtype, np.uint8)\n np.testing.assert_allclose(ts.observation.lives.shape, (num_envs,))\n self.assertEqual(ts.observation.lives.dtype, np.int32)\n np.testing.assert_allclose(ts.observation.env_id, np.arange(num_envs))\n self.assertEqual(ts.observation.env_id.dtype, np.int32)\n np.testing.assert_allclose(\n ts.observation.players.env_id.shape, (num_envs,)\n )\n self.assertEqual(ts.observation.players.env_id.dtype, np.int32)\n action = {\n \"env_id\": np.arange(num_envs),\n \"players.env_id\": np.arange(num_envs),\n \"action\": np.ones(num_envs, int)\n }\n # because in c++ side we define action is int32 instead of int64\n self.assertRaises(RuntimeError, env.send, action)\n action = {\n \"env_id\": np.arange(num_envs, dtype=np.int32),\n \"players.env_id\": np.arange(num_envs, dtype=np.int32),\n \"action\": np.ones(num_envs, np.int32)\n }\n env.send(action)\n ts1: dm_env.TimeStep = env.recv()\n self.assertTrue(np.all(ts1.mid()))\n action = np.ones(num_envs)\n env.send(action)\n ts2: dm_env.TimeStep = env.recv()\n self.assertTrue(np.all(ts2.mid()))\n while np.all(ts2.mid()):\n env.send(np.random.randint(6, size=num_envs))\n ts2 = env.recv()\n env.send(np.random.randint(6, size=num_envs))\n tsp1: dm_env.TimeStep = env.recv()\n index = np.where(ts2.last())\n np.testing.assert_allclose(ts2.discount[index], 0)\n np.testing.assert_allclose(tsp1.step_type[index], dm_env.StepType.FIRST)\n np.testing.assert_allclose(tsp1.discount[index], 1)\n\n def test_highlevel_step(self) -> None:\n num_envs = 4\n # defender game hangs infinitely in gym.make(\"Defender-v0\")\n config = AtariEnvSpec.gen_config(task=\"defender\", num_envs=num_envs)\n spec = AtariEnvSpec(config)\n env = AtariDMEnvPool(spec)\n logging.info(env)\n ts: dm_env.TimeStep = env.reset()\n # check ts structure\n self.assertTrue(np.all(ts.first()))\n np.testing.assert_allclose(ts.step_type.shape, (num_envs,))\n np.testing.assert_allclose(ts.reward.shape, (num_envs,))\n self.assertEqual(ts.reward.dtype, np.float32)\n np.testing.assert_allclose(ts.discount.shape, (num_envs,))\n self.assertEqual(ts.discount.dtype, np.float32)\n np.testing.assert_allclose(ts.observation.obs.shape, (num_envs, 4, 84, 84))\n self.assertEqual(ts.observation.obs.dtype, np.uint8)\n np.testing.assert_allclose(ts.observation.lives.shape, (num_envs,))\n self.assertEqual(ts.observation.lives.dtype, np.int32)\n np.testing.assert_allclose(ts.observation.env_id, np.arange(num_envs))\n self.assertEqual(ts.observation.env_id.dtype, np.int32)\n np.testing.assert_allclose(\n ts.observation.players.env_id.shape, (num_envs,)\n )\n self.assertEqual(ts.observation.players.env_id.dtype, np.int32)\n action = {\n \"env_id\": np.arange(num_envs),\n \"players.env_id\": np.arange(num_envs),\n \"action\": np.ones(num_envs, int)\n }\n # because in c++ side we define action is int32 instead of int64\n self.assertRaises(RuntimeError, env.step, action)\n action = {\n \"env_id\": np.arange(num_envs, dtype=np.int32),\n \"players.env_id\": np.arange(num_envs, dtype=np.int32),\n \"action\": np.ones(num_envs, np.int32)\n }\n ts1: dm_env.TimeStep = env.step(action)\n self.assertTrue(np.all(ts1.mid()))\n action = np.ones(num_envs)\n ts2: dm_env.TimeStep = env.step(action)\n self.assertTrue(np.all(ts2.mid()))\n while np.all(ts2.mid()):\n ts2 = env.step(np.random.randint(18, size=num_envs))\n tsp1: dm_env.TimeStep = env.step(np.random.randint(18, size=num_envs))\n index = np.where(ts2.last())\n np.testing.assert_allclose(ts2.discount[index], 0)\n np.testing.assert_allclose(tsp1.step_type[index], dm_env.StepType.FIRST)\n np.testing.assert_allclose(tsp1.discount[index], 1)\n\n\nclass _GymSyncTest(absltest.TestCase):\n\n def test_spec(self) -> None:\n action_nums = {\"pong\": 6, \"breakout\": 4}\n for task in [\"pong\", \"breakout\"]:\n action_num = action_nums[task]\n config = AtariEnvSpec.gen_config(task=task)\n spec = AtariEnvSpec(config)\n env = AtariGymEnvPool(spec)\n self.assertIsInstance(env, gym.Env)\n logging.info(env)\n # check gym space\n gym_obs_space: gym.spaces.Box = env.observation_space\n gym_act_space: gym.spaces.Discrete = env.action_space\n self.assertEqual(len(spec.action_array_spec), 3)\n self.assertIsInstance(gym_obs_space, gym.spaces.Box)\n self.assertEqual(gym_obs_space.dtype, np.uint8)\n np.testing.assert_allclose(gym_obs_space.high, 255)\n self.assertIsInstance(gym_act_space, gym.spaces.Discrete)\n self.assertEqual(gym_act_space.n, action_num)\n\n def test_lowlevel_step(self) -> None:\n num_envs = 4\n config = AtariEnvSpec.gen_config(task=\"breakout\", num_envs=num_envs)\n spec = AtariEnvSpec(config)\n env = AtariGymEnvPool(spec)\n self.assertTrue(isinstance(env, gym.Env))\n logging.info(env)\n env.async_reset()\n obs, rew, done, info = env.recv()\n # check shape\n self.assertIsInstance(obs, np.ndarray)\n self.assertEqual(obs.dtype, np.uint8)\n np.testing.assert_allclose(rew.shape, (num_envs,))\n self.assertEqual(rew.dtype, np.float32)\n np.testing.assert_allclose(done.shape, (num_envs,))\n self.assertEqual(done.dtype, np.bool_)\n self.assertIsInstance(info, dict)\n self.assertEqual(len(info), 4)\n self.assertEqual(info[\"env_id\"].dtype, np.int32)\n self.assertEqual(info[\"lives\"].dtype, np.int32)\n self.assertEqual(info[\"players\"][\"env_id\"].dtype, np.int32)\n self.assertEqual(info[\"TimeLimit.truncated\"].dtype, np.bool_)\n np.testing.assert_allclose(info[\"env_id\"], np.arange(num_envs))\n np.testing.assert_allclose(info[\"lives\"].shape, (num_envs,))\n np.testing.assert_allclose(info[\"players\"][\"env_id\"].shape, (num_envs,))\n np.testing.assert_allclose(info[\"TimeLimit.truncated\"].shape, (num_envs,))\n while not np.any(done):\n env.send(np.random.randint(6, size=num_envs))\n obs, rew, done, info = env.recv()\n env.send(np.random.randint(6, size=num_envs))\n obs1, rew1, done1, info1 = env.recv()\n index = np.where(done)[0]\n self.assertTrue(np.all(~done1[index]))\n\n def test_highlevel_step(self) -> None:\n num_envs = 4\n config = AtariEnvSpec.gen_config(task=\"pong\", num_envs=num_envs)\n spec = AtariEnvSpec(config)\n env = AtariGymEnvPool(spec)\n self.assertTrue(isinstance(env, gym.Env))\n logging.info(env)\n obs = env.reset()\n # check shape\n self.assertIsInstance(obs, np.ndarray)\n self.assertEqual(obs.dtype, np.uint8) # type: ignore\n obs, rew, done, info = env.step(np.random.randint(6, size=num_envs))\n self.assertIsInstance(obs, np.ndarray)\n self.assertEqual(obs.dtype, np.uint8)\n np.testing.assert_allclose(rew.shape, (num_envs,))\n self.assertEqual(rew.dtype, np.float32)\n np.testing.assert_allclose(done.shape, (num_envs,))\n self.assertEqual(done.dtype, np.bool_)\n self.assertIsInstance(info, dict)\n self.assertEqual(len(info), 4)\n self.assertEqual(info[\"env_id\"].dtype, np.int32)\n self.assertEqual(info[\"lives\"].dtype, np.int32)\n self.assertEqual(info[\"players\"][\"env_id\"].dtype, np.int32)\n self.assertEqual(info[\"TimeLimit.truncated\"].dtype, np.bool_)\n np.testing.assert_allclose(info[\"env_id\"], np.arange(num_envs))\n np.testing.assert_allclose(info[\"lives\"].shape, (num_envs,))\n np.testing.assert_allclose(info[\"players\"][\"env_id\"].shape, (num_envs,))\n np.testing.assert_allclose(info[\"TimeLimit.truncated\"].shape, (num_envs,))\n while not np.any(done):\n obs, rew, done, info = env.step(np.random.randint(6, size=num_envs))\n obs1, rew1, done1, info1 = env.step(np.random.randint(6, size=num_envs))\n index = np.where(done)[0]\n self.assertTrue(np.all(~done1[index]))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.ones",
"numpy.any",
"numpy.where",
"numpy.random.randint",
"numpy.arange",
"numpy.all"
]
] |
minrk/ggplot | [
"c90ab65b959172c4a3488893e395dc3749dd1830"
] | [
"ggplot/scales/scale_colour_gradient.py"
] | [
"from .scale import scale\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter\nimport numpy as np\n\n\ndef colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):\n return [rgb2hex(cmap(bb)[:3]) for bb in breaks]\n\n\nclass scale_colour_gradient(scale):\n VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']\n\n def __radd__(self, gg):\n gg = deepcopy(gg)\n if self.name:\n gg.color_label = self.name\n if self.limits:\n gg.color_limits = self.limits\n color_spectrum = []\n if self.low:\n color_spectrum.append(self.low)\n if self.mid:\n color_spectrum.append(self.mid)\n if self.high:\n color_spectrum.append(self.high)\n\n if self.low and self.high:\n gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)\n plt.cm.register_cmap(cmap=gradient2n)\n # add them back to ggplot\n gg.color_scale = colors_at_breaks(gradient2n)\n gg.colormap = gradient2n\n\n return gg\n\n"
] | [
[
"matplotlib.pyplot.cm.register_cmap",
"matplotlib.colors.LinearSegmentedColormap.from_list"
]
] |
firmiana/sl-quant | [
"2ef962244f66eba8ebab62aeee8f0df694f12c55"
] | [
"ex1-self_learning_quant.py"
] | [
"\"\"\"\nName: The Self Learning Quant, Example 1\n\nAuthor: Daniel Zakrisson\n\nCreated: 30/03/2016\nCopyright: (c) Daniel Zakrisson 2016\nLicence: BSD\n\nRequirements:\nNumpy\nPandas\nMatplotLib\nscikit-learn\nKeras, https://keras.io/\nbacktest.py from the TWP library. Download backtest.py and put in the same folder\n\n/plt create a subfolder in the same directory where plot files will be saved\n\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn import preprocessing\n\nimport backtest as twp\n\nnp.random.seed(1335) # for reproducibility\nnp.set_printoptions(precision=5, suppress=True, linewidth=150)\n\n\n\n# Load data\ndef load_data():\n price = np.arange(200 / 10.0) # linearly increasing prices\n return price\n\n\n# Initialize first state, all items are placed deterministically\ndef init_state(data):\n \"\"\"init state\"\"\"\n close = data\n diff = np.diff(data)\n diff = np.insert(diff, 0, 0)\n\n # --- Preprocess data\n xdata = np.column_stack((close, diff / close))\n xdata = np.nan_to_num(xdata)\n scaler = preprocessing.StandardScaler()\n xdata = scaler.fit_transform(xdata)\n\n state = xdata[0:1, :]\n return state, xdata\n\n\n# Take Action\ndef take_action(state, xdata, action, signal, time_step):\n # this should generate a list of trade signals that at evaluation time are fed to the backtester\n # the backtester should get a list of trade signals and a list of price data for the assett\n\n # make necessary adjustments to state and then return it\n time_step += 1\n\n # if the current iteration is the last state (\"terminal state\") then set terminal_state to 1\n if time_step == xdata.shape[0]:\n state = xdata[time_step - 1:time_step, :]\n terminal_state = True\n signal.loc[time_step] = 0\n return state, time_step, signal, terminal_state\n\n # move the market data window one step forward\n state = xdata[time_step - 1:time_step, :]\n # take action\n if action != 0:\n if action == 1:\n signal.loc[time_step] = 100\n elif action == 2:\n signal.loc[time_step] = -100\n elif action == 3:\n signal.loc[time_step] = 0\n terminal_state = False\n\n return state, time_step, signal, terminal_state\n\n\n# Get Reward, the reward is returned at the end of an episode\ndef get_reward(new_state, time_step, action, xdata, signal, terminal_state, epoch=0):\n reward = 0\n signal.fillna(value=0, inplace=True)\n if not terminal_state:\n # get reward for the most current action\n if signal[time_step] != signal[time_step - 1]:\n i = 1\n while signal[time_step - i] == signal[time_step - 1 - i] and time_step - 1 - i > 0:\n i += 1\n reward = (xdata[time_step - 1, 0] - xdata[time_step - i - 1, 0]) * signal[\n time_step - 1] * -100 + i * np.abs(signal[time_step - 1]) / 10.0\n if signal[time_step] == 0 and signal[time_step - 1] == 0:\n reward -= 10\n\n # calculate the reward for all actions if the last iteration in set\n if terminal_state:\n # run backtest, send list of trade signals and asset data to backtest function\n bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata]), signal, signalType='shares')\n reward = bt.pnl.iloc[-1]\n\n return reward\n\n\ndef evaluate_Q(eval_data, eval_model):\n # This function is used to evaluate the perofrmance of the system each epoch, without the influence of epsilon and random actions\n signal = pd.Series(index=np.arange(len(eval_data)))\n state, xdata = init_state(eval_data)\n # status = 1\n terminal_state = False\n time_step = 1\n while not terminal_state:\n # We start in state S\n # Run the Q function on S to get predicted reward values on all the possible actions\n qval = eval_model.predict(state.reshape(1, 2), batch_size=1)\n action = (np.argmax(qval))\n # Take action, observe new state S'\n new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)\n # Observe reward\n eval_reward = get_reward(new_state, time_step, action, xdata, signal, terminal_state, i)\n state = new_state\n return eval_reward\n\n\n# This neural network is the the Q-function, run it like this:\n# model.predict(state.reshape(1,64), batch_size=1)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation\nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(Dense(4, kernel_initializer='lecun_uniform', input_shape=(2,)))\nmodel.add(Activation('relu'))\n# model.add(Dropout(0.2)) I'm not using dropout in this example\n\nmodel.add(Dense(4, kernel_initializer='lecun_uniform'))\nmodel.add(Activation('relu'))\n# model.add(Dropout(0.2))\n\nmodel.add(Dense(4, kernel_initializer='lecun_uniform'))\nmodel.add(Activation('linear')) # linear output so we can have range of real-valued outputs\n\nrms = RMSprop()\nmodel.compile(loss='mse', optimizer=rms)\n\nimport random, timeit\n\nstart_time = timeit.default_timer()\n\nindata = load_data()\nepochs = 10\ngamma = 0.9 # a high gamma makes a long term reward more valuable\nalpha = 0.9\nepsilon = 1\nlearning_progress = []\n# stores tuples of (S, A, R, S')\nh = 0\nsignal = pd.Series(index=np.arange(len(indata)))\nfor i in range(epochs):\n\n state, xdata = init_state(indata)\n print(\"state\", state)\n print(\"xdata\", xdata[:5])\n status = 1\n terminal = False\n time_step = 1\n update = 0\n # while learning is still in progress\n while not terminal:\n # We start in state S\n # Run the Q function on S to get predicted reward values on all the possible actions\n qval = model.predict(state.reshape(1, 2), batch_size=1)\n if (random.random() < epsilon) and i != epochs - 1: # maybe choose random action if not the last epoch\n action = np.random.randint(0, 4) # assumes 4 different actions\n else: # choose best action from Q(s,a) values\n action = (np.argmax(qval))\n # Take action, observe new state S'\n new_state, time_step, signal, terminal = take_action(state, xdata, action, signal, time_step)\n # Observe reward\n reward = get_reward(new_state, time_step, action, xdata, signal, terminal, i)\n # Get max_Q(S',a)\n newQ = model.predict(new_state.reshape(1, 2), batch_size=1)\n maxQ = np.max(newQ)\n y = np.zeros((1, 4))\n y[:] = qval[:]\n if not terminal: # non-terminal state\n # update = (reward + (gamma * maxQ))\n update = (1 - alpha) * update + alpha * (reward + (gamma * maxQ))\n else: # terminal state (means that it is the last state)\n update = reward\n y[0][action] = update # target output\n model.fit(state.reshape(1, 2), y, batch_size=1, epochs=1, verbose=0)\n state = new_state\n eval_reward = evaluate_Q(indata, model)\n print(\"Epoch #: %s Reward: %f Epsilon: %f\" % (i, eval_reward, epsilon))\n learning_progress.append((eval_reward))\n if epsilon > 0.1:\n epsilon -= (1.0 / epochs)\n\nelapsed = np.round(timeit.default_timer() - start_time, decimals=2)\nprint(\"Completed in %f\" % (elapsed,))\n\n# plot results\nbt = twp.Backtest(pd.Series(data=[x[0] for x in xdata]), signal, signalType='shares')\nbt.data['delta'] = bt.data['shares'].diff().fillna(0)\n\nprint(bt.data)\n\nplt.figure()\nbt.plotTrades()\nplt.suptitle('epoch' + str(i))\nplt.savefig('plt/final_trades' + '.png', bbox_inches='tight', pad_inches=1, dpi=72) # assumes there is a ./plt dir\nplt.close('all')\n\nplt.figure()\nplt.subplot(3, 1, 1)\nbt.plotTrades()\nplt.subplot(3, 1, 2)\nbt.pnl.plot(style='x-')\nplt.subplot(3, 1, 3)\nplt.plot(learning_progress)\n\nplt.show()\n"
] | [
[
"numpy.set_printoptions",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"numpy.max",
"numpy.nan_to_num",
"matplotlib.pyplot.savefig",
"numpy.arange",
"numpy.argmax",
"numpy.random.randint",
"numpy.column_stack",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.diff",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.show",
"numpy.insert",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"numpy.abs",
"pandas.Series"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.