repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
PiotrowskiD/tensor2tensor
[ "2451614b930c73b2b8dd891b4fc5838d99a151a6" ]
[ "tensor2tensor/data_generators/problem.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for problem/dataset definitions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport functools\nimport multiprocessing\nimport os\nimport random\nimport six\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import data_reader\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import mlperf_log\n\nimport tensorflow as tf\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\n\n\nclass DatasetSplit(object):\n TRAIN = tf.estimator.ModeKeys.TRAIN\n EVAL = tf.estimator.ModeKeys.EVAL\n TEST = \"test\"\n\n\nclass SpaceID(object):\n \"\"\"Input and target space ids. Add more as needed.\"\"\"\n # Generic / unknown output space (default)\n GENERIC = 0\n # Image labels\n IMAGE_LABEL = 1\n # English characters\n EN_CHR = 2\n # English tokens\n EN_TOK = 3\n # English bpe tokens\n EN_BPE_TOK = 4\n # French characters\n FR_CHR = 5\n # French tokens\n FR_TOK = 6\n # German characters\n DE_CHR = 7\n # German tokens\n DE_TOK = 8\n # German bpe tokens\n DE_BPE_TOK = 9\n # Digit cipher lexicon 0\n DIGIT_0 = 10\n # Digit cipher lexicon 1\n DIGIT_1 = 11\n # Audio waveform domain\n AUDIO_WAV = 12\n # Audio spectral domain\n AUDIO_SPECTRAL = 13\n # Parse characters\n PARSE_CHR = 14\n # Parse tokens\n PARSE_TOK = 15\n # Chinese tokens\n ZH_TOK = 16\n # Icelandic characters\n ICE_CHAR = 17\n # Icelandic tokens\n ICE_TOK = 18\n # Icelandic parse tokens\n ICE_PARSE_TOK = 19\n # Macedonian tokens\n MK_TOK = 20\n # Czech tokens\n CS_TOK = 21\n # Czech characters\n CS_CHR = 22\n # Genetic bases (ACTG)\n DNA = 23\n # Real numbers\n REAL = 24\n # Images\n IMAGE = 25\n # Peptide\n PEPTIDE = 26\n # Python\n PY_TOK = 27\n # C++\n CPP_TOK = 28\n # Strokes\n STROKES = 29\n # Pickled Python\n PICKLED_PYTHON = 30\n\n\nclass TaskID(object):\n \"\"\"Problem specific task ids. Add more as needed.\"\"\"\n # English characters\n EN_CHR = 2\n # English characters sentiment\n EN_CHR_SENT = 3\n # English Premise Hypothesis pair\n EN_PR_HYP = 4\n # English NLI\n EN_NLI = 5\n # COLA\n COLA = 6\n # Enligh Question Context pair\n EN_Q_CONT = 7\n # English similarity task\n EN_SIM = 8\n # English sentence pair\n EN_SENT_PAIR = 9\n # 3 class NLI\n THREE_CL_NLI = 10\n\n\ndef default_model_hparams():\n return tf.contrib.training.HParams(\n max_input_seq_length=0,\n max_target_seq_length=0,\n prepend_mode=\"none\",\n split_to_length=0,\n data_dir=None)\n\n\ndef preprocess_example_common(example, hparams, mode):\n \"\"\"Preprocessing steps common to all models.\"\"\"\n if hparams.max_input_seq_length > 0:\n example[\"inputs\"] = example[\"inputs\"][:hparams.max_input_seq_length]\n if hparams.prepend_mode != \"none\":\n if mode == tf.estimator.ModeKeys.PREDICT:\n example[\"partial_targets\"] = tf.concat([example[\"inputs\"], [0]], 0)\n else:\n example[\"targets\"] = tf.concat(\n [example[\"inputs\"], [0], example[\"targets\"]], 0)\n if hparams.max_target_seq_length > 0:\n example[\"targets\"] = example[\"targets\"][:hparams.max_target_seq_length]\n if hparams.split_to_length:\n new_example = {}\n for k, v in six.iteritems(example):\n if k == \"targets\" or k == \"inputs\":\n new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1])\n else:\n tf.logging.warning(\"Dropping feature %s\" % k)\n return tf.data.Dataset.from_tensor_slices(new_example)\n return example\n\n\ndef _file_num_records_cached(filename):\n \"\"\"Return the number of TFRecords in a file.\"\"\"\n # Cache the result, as this is expensive to compute\n if filename in _file_num_records_cache:\n return _file_num_records_cache[filename]\n ret = 0\n for _ in tf.python_io.tf_record_iterator(filename):\n ret += 1\n _file_num_records_cache[filename] = ret\n return ret\n\n\n_file_num_records_cache = {}\n\n\ndef cpu_count():\n \"\"\"Return the number of available cores.\"\"\"\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores\n\n\nclass Problem(object):\n \"\"\"Problem base class. Specifies a T2T problem.\n\n Problems unify the specification of a problem for data generation, training,\n and inference.\n\n New problems are specified by the following methods:\n\n Data generation:\n * generate_data(data_dir, tmp_dir)\n - Generate training and dev datasets into data_dir.\n - Additional files, e.g. vocabulary files, should also be written to\n data_dir. Vocab files are newline-separated files with each line\n containing a token. The standard convention for the filename is to\n set it to be\n ${Problem.vocab_filename}.${Problem.targeted_vocab_size}\n - Downloads and other files can be written to tmp_dir\n - If you have a training and dev generator, you can generate the\n training and dev datasets with\n generator_utils.generate_dataset_and_shuffle.\n - Use the self.training_filepaths and self.dev_filepaths functions to\n get sharded filenames. If shuffled=False, the filenames will contain\n an \"unshuffled\" suffix; you should then shuffle the data\n shard-by-shard with generator_utils.shuffle_dataset.\n - Allows to specify the number of shards, optionally (can be omitted).\n - Subclasses must override\n * dataset_filename()\n - Base filename for problem.\n - Defaults to registered name (self.name).\n\n Training:\n * hparams(defaults, model_hparams)\n - Specify the problem hyperparameters (see _default_hparams)\n - Mutate defaults as needed\n * example_reading_spec\n - Specify the names and types of the features on disk.\n - Specify tf.contrib.slim.tfexample_decoder\n * preprocess_example(example, mode)\n - Preprocess the example feature dict from feature name to Tensor or\n SparseTensor.\n - Used in training, eval, and inference (specified by mode).\n\n Eval:\n * eval_metrics\n - Specify the set of evaluation metrics for this problem.\n * eval_hooks\n - Specify the set of evalueation hooks for this problem.\n\n Inference:\n * feature_encoders(data_dir)\n - Return a dict of <feature name, TextEncoder> for encoding and decoding\n inference input/output.\n - Defaults to TextEncoder for inputs and targets.\n \"\"\"\n\n # ============================================================================\n # BEGIN SUBCLASS INTERFACE\n # ============================================================================\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n raise NotImplementedError()\n\n @property\n def multiprocess_generate(self):\n \"\"\"Whether to generate the data in multiple parallel processes.\"\"\"\n return False\n\n @property\n def num_generate_tasks(self):\n \"\"\"Needed if multiprocess_generate is True.\"\"\"\n raise NotImplementedError()\n\n def prepare_to_generate(self, data_dir, tmp_dir):\n \"\"\"Prepare to generate data in parallel on different processes.\n\n This function is called if multiprocess_generate is True.\n\n Some things that might need to be done once are downloading the data\n if it is not yet downloaded, and building the vocabulary.\n\n Args:\n data_dir: a string\n tmp_dir: a string\n \"\"\"\n raise NotImplementedError()\n\n def hparams(self, defaults, model_hparams):\n pass\n\n def max_length(self, model_hparams):\n \"\"\"Maximum sequence length.\n\n Problems with fixed length should override.\n\n Args:\n model_hparams: model hyperparameters\n Returns:\n an integer\n \"\"\"\n return (model_hparams.split_to_length or model_hparams.max_length or\n model_hparams.batch_size)\n\n def tpu_batch_size_per_shard(self, model_hparams):\n \"\"\"Batch size in examples per TPU core.\n\n Args:\n model_hparams: model hyperparameters\n Returns:\n an integer\n \"\"\"\n if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size:\n return model_hparams.batch_size // self.max_length(model_hparams)\n else:\n return model_hparams.batch_size\n\n @property\n def batch_size_means_tokens(self):\n \"\"\"Do we specify hparams.batch_size in tokens per datashard per batch.\n\n This is generally done for text problems.\n\n If False, we assume that batch sizes are specified in examples per\n datashard per batch.\n\n TODO(noam): we should be more explicit and replace the hyperparameter\n batch size with two hyperparameters:\n hparams.examples_per_batch_per_datashard\n hparams.tokens_per_batch_per_datashard\n\n Returns:\n a boolean\n \"\"\"\n return False\n\n @property\n def skip_random_fraction_when_training(self):\n \"\"\"Skip a random number of examples at the beginning of training.\"\"\"\n # Skip a random fraction at the beginning of the stream. The skip is\n # essential for synchronous highly-parallel training to avoid multiple\n # replicas reading the same data in lock-step. So keep this true unless\n # you have a very specific setting in which it needs to be turned off.\n return True\n\n def dataset_filename(self):\n return self.name\n\n def feature_encoders(self, data_dir):\n del data_dir\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": text_encoder.TextEncoder()\n }\n\n def example_reading_spec(self):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def preprocess_example(self, example, mode, hparams):\n \"\"\"Runtime preprocessing.\n\n Return a dict or a tf.Data.Datset.from_tensor_slices (if you want each\n example to turn into multiple).\n\n Args:\n example: dict, features\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hyperparameters\n\n Returns:\n dict or Dataset\n \"\"\"\n return preprocess_example_common(example, hparams, mode)\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY\n ]\n\n def eval_hooks(self, features, logits, hparams):\n del features, logits, hparams\n return []\n\n @property\n def task_id(self):\n if self._task_id == -1 and hasattr(self, \"global_task_id\"):\n self._task_id = self.global_task_id()\n return self._task_id\n\n def set_task_id(self, new_task_id):\n self._task_id = new_task_id\n\n # ============================================================================\n # END SUBCLASS INTERFACE\n # ============================================================================\n\n def preprocess(self, dataset, mode, hparams, interleave=True):\n \"\"\"Runtime preprocessing on the whole dataset.\n\n Return a tf.data.Datset -- the preprocessed version of the given one.\n By default this function calls preprocess_example.\n\n Args:\n dataset: the Dataset of already decoded but not yet preprocessed features.\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hyperparameters\n interleave: bool, whether to use parallel_interleave, which is faster\n but will alter the order of samples non-deterministically, or flat_map,\n which is slower but will preserve the sample order.\n\n Returns:\n a Dataset\n \"\"\"\n def _preprocess(example):\n examples = self.preprocess_example(example, mode, hparams)\n if not isinstance(examples, tf.data.Dataset):\n examples = tf.data.Dataset.from_tensors(examples)\n return examples\n\n if interleave:\n dataset = dataset.apply(\n tf.contrib.data.parallel_interleave(\n _preprocess, sloppy=True, cycle_length=8))\n else:\n dataset = dataset.flat_map(_preprocess)\n\n return dataset\n\n def training_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.train_data_filenames(file_basename, data_dir,\n num_shards)\n\n def dev_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.dev_data_filenames(file_basename, data_dir,\n num_shards)\n\n def test_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.test_data_filenames(file_basename, data_dir,\n num_shards)\n\n def filepattern(self, data_dir, mode, shard=None):\n \"\"\"Get filepattern for data files for mode.\n\n Matches mode to a suffix.\n * DatasetSplit.TRAIN: train\n * DatasetSplit.EVAL: dev\n * DatasetSplit.TEST: test\n * tf.estimator.ModeKeys.PREDICT: dev\n\n Args:\n data_dir: str, data directory.\n mode: DatasetSplit\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n filepattern str\n \"\"\"\n path = os.path.join(data_dir, self.dataset_filename())\n shard_str = \"-%05d\" % shard if shard is not None else \"\"\n if mode == DatasetSplit.TRAIN:\n suffix = \"train\"\n elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:\n suffix = \"dev\"\n else:\n assert mode == DatasetSplit.TEST\n suffix = \"test\"\n\n return \"%s-%s%s*\" % (path, suffix, shard_str)\n\n def __init__(self, was_reversed=False, was_copy=False):\n \"\"\"Create a Problem.\n\n Args:\n was_reversed: bool, whether to reverse inputs and targets.\n was_copy: bool, whether to copy inputs to targets. Can be composed with\n was_reversed so that if both are true, the targets become the inputs,\n which are then copied to targets so that the task is targets->targets.\n \"\"\"\n self._was_reversed = was_reversed\n self._was_copy = was_copy\n self._encoders = None\n self._hparams = None\n self._feature_info = None\n self._task_id = -1\n\n @property\n def was_reversed(self):\n \"\"\"Whether the problem was reversed.\"\"\"\n return self._was_reversed\n\n def get_feature_encoders(self, data_dir=None):\n if self._encoders is None:\n self._encoders = self.feature_encoders(data_dir)\n return self._encoders\n\n def get_hparams(self, model_hparams=None):\n \"\"\"Returns problem_hparams.\"\"\"\n if model_hparams is None:\n model_hparams = default_model_hparams()\n if self._hparams is not None:\n return self._hparams\n\n if self._encoders is None:\n data_dir = (model_hparams and hasattr(model_hparams, \"data_dir\") and\n model_hparams.data_dir) or None\n self.get_feature_encoders(data_dir)\n\n hp = _default_hparams()\n ret = self.hparams(hp, model_hparams)\n if ret is not None:\n raise ValueError(\"The Problem subclass hparams function should mutate \"\n \"the defaults passed in and return None.\")\n\n hp.add_hparam(\"vocabulary\", self._encoders)\n hp.add_hparam(\"was_reversed\", self._was_reversed)\n hp.add_hparam(\"was_copy\", self._was_copy)\n\n if self._was_reversed:\n _reverse_problem_hparams(hp)\n if self._was_copy:\n _copy_problem_hparams(hp)\n\n model_hparams = copy.copy(model_hparams)\n _create_modalities(hp, model_hparams)\n\n self._hparams = hp\n return self._hparams\n\n def maybe_reverse_features(self, feature_map):\n \"\"\"Reverse features between inputs and targets if the problem is '_rev'.\"\"\"\n if not self._was_reversed:\n return\n inputs = feature_map.pop(\"inputs\", None)\n targets = feature_map.pop(\"targets\", None)\n inputs_seg = feature_map.pop(\"inputs_segmentation\", None)\n targets_seg = feature_map.pop(\"targets_segmentation\", None)\n inputs_pos = feature_map.pop(\"inputs_position\", None)\n targets_pos = feature_map.pop(\"targets_position\", None)\n if inputs is not None:\n feature_map[\"targets\"] = inputs\n if targets is not None:\n feature_map[\"inputs\"] = targets\n if inputs_seg is not None:\n feature_map[\"targets_segmentation\"] = inputs_seg\n if targets_seg is not None:\n feature_map[\"inputs_segmentation\"] = targets_seg\n if inputs_pos is not None:\n feature_map[\"targets_position\"] = inputs_pos\n if targets_pos is not None:\n feature_map[\"inputs_position\"] = targets_pos\n\n def maybe_copy_features(self, feature_map):\n if not self._was_copy:\n return\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n if (\"inputs_segmentation\" in feature_map and\n \"targets_segmentation\" not in feature_map):\n feature_map[\"targets_segmentation\"] = feature_map[\"inputs_segmentation\"]\n if (\"inputs_position\" in feature_map and\n \"targets_position\" not in feature_map):\n feature_map[\"targets_position\"] = feature_map[\"inputs_position\"]\n\n def maybe_reverse_and_copy(self, example):\n self.maybe_reverse_features(example)\n self.maybe_copy_features(example)\n return example\n\n def dataset(self,\n mode,\n data_dir=None,\n num_threads=None,\n output_buffer_size=None,\n shuffle_files=None,\n hparams=None,\n preprocess=True,\n dataset_split=None,\n shard=None,\n partition_id=0,\n num_partitions=1,\n shuffle_buffer_size=1024,\n max_records=-1):\n \"\"\"Build a Dataset for this problem.\n\n Args:\n mode: tf.estimator.ModeKeys; determines which files to read from.\n data_dir: directory that contains data files.\n num_threads: int, number of threads to use for decode and preprocess\n Dataset.map calls.\n output_buffer_size: int, how many elements to prefetch at end of pipeline.\n shuffle_files: whether to shuffle input files. Default behavior (i.e. when\n shuffle_files=None) is to shuffle if mode == TRAIN.\n hparams: tf.contrib.training.HParams; hparams to be passed to\n Problem.preprocess_example and Problem.hparams. If None, will use a\n default set that is a no-op.\n preprocess: bool, whether to map the Dataset through\n Problem.preprocess_example.\n dataset_split: DatasetSplit, which split to read data\n from (TRAIN:\"-train\", EVAL:\"-dev\", \"test\":\"-test\"). Defaults to mode.\n shard: int, if provided, will only read data from the specified shard.\n partition_id: integer - which partition of the dataset to read from\n num_partitions: how many partitions in the dataset\n shuffle_buffer_size: if shuffle_files is True, this is the buffer size\n used to shuffle records.\n max_records: int, number of records to truncate to.\n\n Returns:\n Dataset containing dict<feature name, Tensor>.\n\n Raises:\n ValueError: if num_partitions is greater than the number of data files.\n \"\"\"\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n shuffle_files = shuffle_files or shuffle_files is None and is_training\n\n dataset_split = dataset_split or mode\n assert data_dir\n\n if hparams is None:\n hparams = default_model_hparams()\n\n if not hasattr(hparams, \"data_dir\"):\n hparams.add_hparam(\"data_dir\", data_dir)\n if not hparams.data_dir:\n hparams.data_dir = data_dir\n # Construct the Problem's hparams so that items within it are accessible\n _ = self.get_hparams(hparams)\n\n data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)\n tf.logging.info(\"Reading data files from %s\", data_filepattern)\n data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files(\n data_filepattern))\n\n # Functions used in dataset transforms below. `filenames` can be either a\n # `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.\n def _load_records_and_preprocess(filenames):\n \"\"\"Reads files from a string tensor or a dataset of filenames.\"\"\"\n # Load records from file(s) with an 8MiB read buffer.\n dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024)\n # Decode.\n dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)\n # Preprocess if requested.\n # Note that preprocessing should happen per-file as order may matter.\n if preprocess:\n dataset = self.preprocess(dataset, mode, hparams,\n interleave=shuffle_files)\n return dataset\n\n if len(data_files) < num_partitions:\n raise ValueError(\n \"number of data files (%d) must be at least the number of hosts (%d)\"\n % (len(data_files), num_partitions))\n data_files = [f for (i, f) in enumerate(data_files)\n if i % num_partitions == partition_id]\n tf.logging.info(\n \"partition: %d num_data_files: %d\" % (partition_id, len(data_files)))\n if shuffle_files:\n mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)\n random.shuffle(data_files)\n\n dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))\n # Create data-set from files by parsing, pre-processing and interleaving.\n if shuffle_files:\n dataset = dataset.apply(\n tf.contrib.data.parallel_interleave(\n _load_records_and_preprocess, sloppy=True, cycle_length=8))\n else:\n dataset = _load_records_and_preprocess(dataset)\n\n dataset = dataset.map(\n self.maybe_reverse_and_copy, num_parallel_calls=num_threads)\n dataset = dataset.take(max_records)\n\n ## Shuffle records only for training examples.\n if shuffle_files and is_training:\n dataset = dataset.shuffle(shuffle_buffer_size)\n if output_buffer_size:\n dataset = dataset.prefetch(output_buffer_size)\n\n return dataset\n\n def decode_example(self, serialized_example):\n \"\"\"Return a dict of Tensors from a serialized tensorflow.Example.\"\"\"\n data_fields, data_items_to_decoders = self.example_reading_spec()\n # Necessary to rejoin examples in the correct order with the Cloud ML Engine\n # batch prediction API.\n data_fields[\"batch_prediction_key\"] = tf.FixedLenFeature([1], tf.int64, 0)\n if data_items_to_decoders is None:\n data_items_to_decoders = {\n field: tf.contrib.slim.tfexample_decoder.Tensor(field)\n for field in data_fields\n }\n\n decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(\n data_fields, data_items_to_decoders)\n\n decode_items = list(sorted(data_items_to_decoders))\n decoded = decoder.decode(serialized_example, items=decode_items)\n return dict(zip(decode_items, decoded))\n\n @property\n def decode_hooks(self):\n \"\"\"List of functions to be run after full decodes have been produced.\n\n Returns:\n List of functions. Each function should expect a single argument, an\n instance of decoding.DecodeHookArgs and optionally return a list of\n tf.Summary.Value objects.\n \"\"\"\n return []\n\n @property\n def has_inputs(self):\n return \"inputs\" in self.get_feature_encoders()\n\n @property\n def feature_info(self):\n \"\"\"Retrieve dict<feature name, FeatureInfo>.\n\n Must first call Problem.get_hparams or Problem.dataset to have the problem's\n internal hparams already constructed.\n\n Returns:\n dict<feature name, FeatureInfo>\n \"\"\"\n if self._feature_info is not None:\n return self._feature_info\n\n assert self._hparams is not None\n\n hp = self.get_hparams()\n if self.has_inputs:\n in_id = hp.input_space_id\n out_id = hp.target_space_id\n\n features = collections.defaultdict(FeatureInfo)\n for feature_name, modality_cls in six.iteritems(hp.modality):\n finfo = features[feature_name]\n finfo.modality = modality_cls\n finfo.vocab_size = modality_cls.top_dimensionality\n\n vocabs = hp.vocabulary\n for name, encoder in six.iteritems(vocabs):\n features[name].encoder = encoder\n\n if self.has_inputs:\n features[\"inputs\"].space_id = in_id\n features[\"targets\"].space_id = out_id\n\n self._feature_info = features\n return features\n\n def make_estimator_input_fn(self,\n mode,\n hparams,\n data_dir=None,\n force_repeat=False,\n prevent_repeat=False,\n dataset_kwargs=None):\n \"\"\"Return input_fn wrapped for Estimator.\"\"\"\n\n def estimator_input_fn(params, config):\n return self.input_fn(\n mode,\n hparams,\n data_dir=data_dir,\n params=params,\n config=config,\n force_repeat=force_repeat,\n prevent_repeat=prevent_repeat,\n dataset_kwargs=dataset_kwargs)\n\n return estimator_input_fn\n\n def _dataset_partition(self, mode, config):\n \"\"\"Which part of the training data to read.\n\n If there are multiple parallel calls to input_fn (multiple TPU hosts),\n then we want each one to read from a separate partition of the training\n data.\n\n Args:\n mode: tf.estimator.ModeKeys\n config: RunConfig\n Returns:\n partition_id: an integer\n num_partitions: an integer\n \"\"\"\n if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, \"tpu_config\"):\n # Reset in the case when using TPU but alternating TRAIN and EVAL.\n self._next_partition_id = 0\n return 0, 1\n phift = config.tpu_config.per_host_input_for_training\n # This is the mesh-tensorflow case.\n if (hasattr(tpu_config.InputPipelineConfig, \"BROADCAST\") and\n phift == tpu_config.InputPipelineConfig.BROADCAST):\n return 0, 1\n if phift:\n num_partitions = max(config.tpu_config.num_shards // 8, 1)\n else:\n num_partitions = config.tpu_config.num_shards\n partition_id = getattr(self, \"_next_partition_id\", 0)\n self._next_partition_id = partition_id + 1\n tf.logging.info(\"num_partitions = %d partition_id = %d\" %\n (num_partitions, partition_id))\n assert partition_id < num_partitions\n return partition_id, num_partitions\n\n def input_fn(self,\n mode,\n hparams,\n data_dir=None,\n params=None,\n config=None,\n force_repeat=False,\n prevent_repeat=False,\n dataset_kwargs=None):\n \"\"\"Builds input pipeline for problem.\n\n Args:\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hparams\n data_dir: str, data directory; if None, will use hparams.data_dir\n params: dict, may include \"batch_size\"\n config: RunConfig; should have the data_parallelism attribute if not using\n TPU\n force_repeat: bool, whether to repeat the data even if not training\n prevent_repeat: bool, whether to not repeat when in training mode.\n Overrides force_repeat.\n dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset\n method when called\n\n Returns:\n (features_dict<str name, Tensor feature>, Tensor targets)\n \"\"\"\n partition_id, num_partitions = self._dataset_partition(mode, config)\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n if config and config.use_tpu:\n num_threads = 64\n else:\n num_threads = cpu_count() if is_training else 1\n\n if config and hasattr(config,\n \"data_parallelism\") and config.data_parallelism:\n num_shards = config.data_parallelism.n\n else:\n num_shards = 1\n\n max_length = self.max_length(hparams)\n mlperf_log.transformer_print(\n key=mlperf_log.INPUT_MAX_LENGTH, value=max_length)\n\n def tpu_valid_size(example):\n return data_reader.example_valid_size(example, hparams.min_length,\n max_length)\n\n def gpu_valid_size(example):\n drop_long_sequences = is_training or hparams.eval_drop_long_sequences\n max_validate_length = max_length if drop_long_sequences else 10**9\n return data_reader.example_valid_size(example, hparams.min_length,\n max_validate_length)\n\n def define_shapes(example):\n batch_size = config and config.use_tpu and params[\"batch_size\"]\n return standardize_shapes(example, batch_size=batch_size)\n\n # Read and preprocess\n data_dir = data_dir or (hasattr(hparams, \"data_dir\") and hparams.data_dir)\n\n dataset_kwargs = dataset_kwargs or {}\n dataset_kwargs.update({\n \"mode\": mode,\n \"data_dir\": data_dir,\n \"num_threads\": num_threads,\n \"hparams\": hparams,\n \"partition_id\": partition_id,\n \"num_partitions\": num_partitions,\n })\n\n dataset = self.dataset(**dataset_kwargs)\n if (force_repeat or is_training) and not prevent_repeat:\n # Repeat and skip a random number of records\n dataset = dataset.repeat()\n\n if is_training and self.skip_random_fraction_when_training:\n data_files = tf.contrib.slim.parallel_reader.get_data_files(\n self.filepattern(data_dir, mode))\n # In continuous_train_and_eval when switching between train and\n # eval, this input_fn method gets called multiple times and it\n # would give you the exact same samples from the last call\n # (because the Graph seed is set). So this skip gives you some\n # shuffling.\n dataset = skip_random_fraction(dataset, data_files[0])\n\n dataset = dataset.map(\n data_reader.cast_ints_to_int32, num_parallel_calls=num_threads)\n\n if self.batch_size_means_tokens:\n batch_size_means_tokens = True\n else:\n if _are_shapes_fully_defined(dataset.output_shapes):\n batch_size_means_tokens = False\n else:\n tf.logging.warning(\n \"Shapes are not fully defined. Assuming batch_size means tokens.\")\n batch_size_means_tokens = True\n\n # Batching\n if not batch_size_means_tokens:\n # Batch size means examples per datashard.\n if config and config.use_tpu:\n # on TPU, we use params[\"batch_size\"], which specifies the number of\n # examples across all datashards\n batch_size = params[\"batch_size\"]\n dataset = dataset.batch(batch_size, drop_remainder=True)\n else:\n batch_size = hparams.batch_size * num_shards\n dataset = dataset.batch(batch_size)\n else:\n # batch_size means tokens per datashard\n if config and config.use_tpu:\n dataset = dataset.filter(tpu_valid_size)\n padded_shapes = self._pad_for_tpu(dataset.output_shapes, hparams)\n # on TPU, we use params[\"batch_size\"], which specifies the number of\n # examples across all datashards\n batch_size = params[\"batch_size\"]\n if hparams.pad_batch:\n tf.logging.warn(\n \"Padding the batch to ensure that remainder eval batches are \"\n \"processed. This may lead to incorrect metrics for \"\n \"non-zero-padded features, e.g. images. Use a smaller batch \"\n \"size that has no remainder in that case.\")\n dataset = dataset.padded_batch(\n batch_size, padded_shapes, drop_remainder=False)\n dataset = dataset.map(\n functools.partial(pad_batch, batch_multiple=batch_size),\n num_parallel_calls=num_threads)\n else:\n dataset = dataset.padded_batch(\n batch_size, padded_shapes, drop_remainder=True)\n else:\n # On GPU, bucket by length\n dataset = dataset.filter(gpu_valid_size)\n batching_scheme = data_reader.hparams_to_batching_scheme(\n hparams,\n shard_multiplier=num_shards,\n length_multiplier=self.get_hparams().batch_size_multiplier)\n if hparams.use_fixed_batch_size:\n # Here batch_size really means examples per datashard.\n batching_scheme[\"batch_sizes\"] = [hparams.batch_size]\n batching_scheme[\"boundaries\"] = []\n dataset = dataset.apply(\n tf.contrib.data.bucket_by_sequence_length(\n data_reader.example_length, batching_scheme[\"boundaries\"],\n batching_scheme[\"batch_sizes\"]))\n\n if not is_training:\n batch_multiple = num_shards\n if hparams.use_fixed_batch_size:\n # Make sure the last batch has the same fixed size as the rest.\n batch_multiple *= hparams.batch_size\n if batch_multiple > 1:\n tf.logging.warn(\n \"Padding the batch to ensure that remainder eval batches have \"\n \"a batch size divisible by the number of data shards. This may \"\n \"lead to incorrect metrics for non-zero-padded features, e.g. \"\n \"images. Use a single datashard (i.e. 1 GPU) in that case.\")\n dataset = dataset.map(\n functools.partial(pad_batch, batch_multiple=batch_multiple),\n num_parallel_calls=num_threads)\n\n dataset = dataset.map(define_shapes, num_parallel_calls=num_threads)\n\n # Add shuffling for training batches. This is necessary along with record\n # level shuffling in the dataset generation. Record shuffling will shuffle\n # the examples. However, in some cases, it's possible that the shuffle\n # buffer size for record shuffling is smaller than the batch size. In such\n # cases, adding batch shuffling ensures that the data is in random order\n # during training\n if (is_training and hasattr(hparams, \"batch_shuffle_size\") and\n hparams.batch_shuffle_size):\n dataset = dataset.shuffle(hparams.batch_shuffle_size)\n\n def prepare_for_output(example):\n if not config or not config.use_tpu:\n _summarize_features(example, num_shards)\n if mode == tf.estimator.ModeKeys.PREDICT:\n example[\"infer_targets\"] = example.pop(\"targets\")\n return example\n else:\n return example, example[\"targets\"]\n\n dataset = dataset.map(prepare_for_output, num_parallel_calls=num_threads)\n dataset = dataset.prefetch(2)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n # This is because of a bug in the Estimator that short-circuits prediction\n # if it doesn't see a QueueRunner. DummyQueueRunner implements the\n # minimal expected interface but does nothing.\n tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS,\n data_reader.DummyQueueRunner())\n\n return dataset\n\n @property\n def export_assets(self):\n \"\"\"Assets to export with the model.\n\n This property contains a dictionary of assets, such as vocabulary files,\n that should be exported together with the model, or None if no assets\n are needed.\n \"\"\"\n\n return None\n\n def serving_input_fn(self, hparams):\n \"\"\"Input fn for serving export, starting from serialized example.\"\"\"\n mode = tf.estimator.ModeKeys.PREDICT\n serialized_example = tf.placeholder(\n dtype=tf.string, shape=[None], name=\"serialized_example\")\n dataset = tf.data.Dataset.from_tensor_slices(serialized_example)\n dataset = dataset.map(self.decode_example)\n dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams))\n dataset = dataset.map(self.maybe_reverse_and_copy)\n dataset = dataset.map(data_reader.cast_ints_to_int32)\n dataset = dataset.padded_batch(\n tf.shape(serialized_example, out_type=tf.int64)[0],\n dataset.output_shapes)\n dataset = dataset.map(standardize_shapes)\n features = tf.contrib.data.get_single_element(dataset)\n\n if self.has_inputs:\n features.pop(\"targets\", None)\n\n return tf.estimator.export.ServingInputReceiver(\n features=features, receiver_tensors=serialized_example)\n\n def _pad_for_tpu(self, shapes_dict, hparams):\n \"\"\"Pads unknown features' dimensions for TPU.\"\"\"\n max_length = self.max_length(hparams)\n padded_shapes = {}\n\n def get_filler(specified_max_length):\n if not specified_max_length:\n return max_length\n return min(specified_max_length, max_length)\n\n inputs_none_filler = get_filler(hparams.max_input_seq_length)\n targets_none_filler = get_filler(hparams.max_target_seq_length)\n\n def pad_one_shape(shape, none_filler):\n return [\n (dim if dim is not None else none_filler) for dim in shape.as_list()\n ]\n\n for key, shape in six.iteritems(shapes_dict):\n if key == \"inputs\":\n padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)\n elif key == \"targets\":\n padded_shapes[key] = pad_one_shape(shape, targets_none_filler)\n else:\n padded_shapes[key] = pad_one_shape(shape, max_length)\n return padded_shapes\n\n\nclass FeatureInfo(object):\n \"\"\"Encapsulates information about a feature.\"\"\"\n\n def __init__(self,\n encoder=None,\n modality=None,\n vocab_size=None,\n space_id=None):\n self.encoder = encoder\n self.modality = modality\n self.vocab_size = vocab_size\n self.space_id = space_id\n\n\ndef _copy_problem_hparams(p_hparams):\n \"\"\"Use input modality, vocab, and space id for target.\"\"\"\n p = p_hparams\n # Duplicate input modality.\n p.modality[\"targets\"] = p.modality[\"inputs\"]\n # Duplicate input vocab size.\n p.vocab_size[\"targets\"] = p.vocab_size[\"inputs\"]\n # Duplicate input vocabulary.\n p.vocabulary[\"targets\"] = p.vocabulary[\"inputs\"]\n # Duplicate input space ids.\n p.target_space_id = p.input_space_id\n # Mark that p was reversed.\n p.was_copy = True\n\n\ndef _reverse_problem_hparams(p_hparams):\n \"\"\"Swap input/output modalities, vocab, and space ids.\"\"\"\n p = p_hparams\n\n # Swap modalities.\n # TODO(trandustin): Note this assumes target modalities have feature name\n # 'target', and each intended feature to swap has feature name 'input'.\n # In the future, remove need for this behavior.\n reversed_modality = {}\n for feature_name in six.iterkeys(p.modality):\n reversed_feature_name = feature_name.replace(\"target\", \"input\")\n if \"target\" in feature_name and reversed_feature_name in p.modality:\n reversed_modality[feature_name] = p.modality[reversed_feature_name]\n reversed_modality[reversed_feature_name] = p.modality[feature_name]\n else:\n reversed_modality[feature_name] = p.modality[feature_name]\n\n p.modality = reversed_modality\n\n # Swap vocab sizes.\n reversed_vocab_size = {}\n for feature_name in six.iterkeys(p.vocab_size):\n reversed_feature_name = feature_name.replace(\"target\", \"input\")\n if \"target\" in feature_name and reversed_feature_name in p.vocab_size:\n reversed_vocab_size[feature_name] = p.vocab_size[reversed_feature_name]\n reversed_vocab_size[reversed_feature_name] = p.vocab_size[feature_name]\n else:\n reversed_vocab_size[feature_name] = p.vocab_size[feature_name]\n\n p.vocab_size = reversed_vocab_size\n\n # Swap vocabularies.\n input_vocabulary = p.vocabulary.pop(\"inputs\", None)\n target_vocabulary = p.vocabulary.pop(\"targets\", None)\n if input_vocabulary is not None:\n p.vocabulary[\"targets\"] = input_vocabulary\n if target_vocabulary is not None:\n p.vocabulary[\"inputs\"] = target_vocabulary\n\n # Swap input/target space ids.\n input_space_id = p.input_space_id\n target_space_id = p.target_space_id\n if input_space_id is not None:\n p.target_space_id = input_space_id\n else:\n p.target_space_id = SpaceID.GENERIC\n if target_space_id is not None:\n p.input_space_id = target_space_id\n else:\n p.input_space_id = SpaceID.GENERIC\n\n # Mark that p was reversed.\n p.was_reversed = True\n\n\ndef _create_modalities(problem_hparams, hparams):\n \"\"\"Creates modalities and overrides any according to model hparams.\n\n Args:\n problem_hparams: tf.contrib.training.HParams for the Problem. It must have\n modality which is a dict of strings to Modality classes.\n hparams: tf.contrib.training.HParams for the model. It may have\n input_modalities and target_modality, which will override\n problem_hparams' modality input and target keys.\n\n Returns:\n None\n \"\"\"\n modality_overrides = getattr(hparams, \"modality\", {})\n modality = {}\n for feature_name, modality_cls in six.iteritems(problem_hparams.modality):\n vocab_size = problem_hparams.vocab_size[feature_name]\n # If needed for using a pre-trained model's vocabulary where extra indices\n # were allocated for adding new tasks with unique task ids.\n if (hasattr(hparams, \"multiproblem_vocab_size\") and\n hparams.multiproblem_vocab_size > 0):\n vocab_size = hparams.multiproblem_vocab_size\n modality_cls = modality_overrides.get(feature_name, modality_cls)\n modality[feature_name] = modality_cls(hparams, vocab_size)\n problem_hparams.modality = modality\n\n\ndef _default_hparams():\n \"\"\"A set of basic model hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n # Use this parameter to get comparable perplexity numbers with different\n # tokenizations. This value should be set to the ratio of the number of\n # tokens in the test set according to the tokenization used to the number\n # of tokens in the test set in the \"official\" tokenization. For\n # example, if we are using a word-piece based model and we want to\n # compute per-word perplexity, then we set loss_multiplier to the number\n # of wordpieces per word in the test set.\n loss_multiplier=1.0,\n\n # Use this parameter to allow for larger sequences in the batch. Without\n # the use of this parameter, the size of the inner two dimensions will\n # be used to judge the sequence length.\n batch_size_multiplier=1,\n\n # During inference for autoregressive problems, if the batch_size is 1,\n # the inference will stop when the model predict a text_encoder.EOS_ID\n # token.\n stop_at_eos=False,\n\n # Modalities used to map from features to a space compatible with\n # chosen model architecture. It comprises key-value pairs of a feature\n # name (str) and its modality class.\n modality={},\n\n # Identifiers used to tell the model which input/target space will be\n # expected. For example, it can tell that we expect French as characters\n # as output, or Spanish as sound. Spaces defined as constants in SpaceID\n # class.\n input_space_id=SpaceID.GENERIC,\n target_space_id=SpaceID.GENERIC)\n\n\ndef _are_shapes_fully_defined(shapes_dict):\n for shape in shapes_dict.values():\n if not shape.is_fully_defined():\n return False\n return True\n\n\ndef _summarize_features(features, num_shards=1):\n with tf.name_scope(\"input_stats\"):\n for (k, v) in six.iteritems(features):\n if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:\n tf.summary.scalar(\"%s_batch\" % k, tf.shape(v)[0] // num_shards)\n tf.summary.scalar(\"%s_length\" % k, tf.shape(v)[1])\n nonpadding = tf.to_float(tf.not_equal(v, 0))\n nonpadding_tokens = tf.reduce_sum(nonpadding)\n tf.summary.scalar(\"%s_nonpadding_tokens\" % k, nonpadding_tokens)\n tf.summary.scalar(\"%s_nonpadding_fraction\" % k,\n tf.reduce_mean(nonpadding))\n\n\ndef standardize_shapes(features, batch_size=None):\n \"\"\"Set the right shapes for the features.\"\"\"\n\n for fname in [\"inputs\", \"targets\"]:\n if fname not in features:\n continue\n\n f = features[fname]\n while len(f.get_shape()) < 4:\n f = tf.expand_dims(f, axis=-1)\n\n features[fname] = f\n\n if batch_size:\n # Ensure batch size is set on all features\n for _, t in six.iteritems(features):\n shape = t.get_shape().as_list()\n shape[0] = batch_size\n t.set_shape(t.get_shape().merge_with(shape))\n # Assert shapes are fully known\n t.get_shape().assert_is_fully_defined()\n\n return features\n\n\ndef pad_batch(features, batch_multiple):\n \"\"\"Pad batch dim of features to nearest multiple of batch_multiple.\"\"\"\n feature = list(features.items())[0][1]\n batch_size = tf.shape(feature)[0]\n mod = batch_size % batch_multiple\n has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)\n batch_padding = batch_multiple * has_mod - mod\n\n padded_features = {}\n for k, feature in features.items():\n rank = len(feature.shape)\n paddings = []\n for _ in range(rank):\n paddings.append([0, 0])\n paddings[0][1] = batch_padding\n padded_feature = tf.pad(feature, paddings)\n padded_features[k] = padded_feature\n return padded_features\n\n\ndef problem_hparams_to_features(problem_hparams):\n input_space_id, target_space_id = 0, 0\n if problem_hparams:\n input_space_id = problem_hparams.input_space_id\n target_space_id = problem_hparams.target_space_id\n return {\n \"input_space_id\": input_space_id,\n \"target_space_id\": target_space_id,\n }\n\n\ndef skip_random_fraction(dataset, data_file):\n # Skip a random fraction at the beginning of the stream. The skip is\n # essential for synchronous highly-parallel training to avoid multiple\n # replicas reading the same data in lock-step.\n num_skip = random.randint(0, _file_num_records_cached(data_file))\n return dataset.skip(num_skip)\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.estimator.export.ServingInputReceiver", "tensorflow.contrib.slim.parallel_reader.get_data_files", "tensorflow.logging.warning", "tensorflow.cast", "tensorflow.shape", "tensorflow.concat", "tensorflow.FixedLenFeature", "tensorflow.contrib.slim.tfexample_decoder.Tensor", "tensorflow.logging.info", "tensorflow.constant", "tensorflow.contrib.data.bucket_by_sequence_length", "tensorflow.pad", "tensorflow.contrib.training.HParams", "tensorflow.data.Dataset.from_tensors", "tensorflow.logging.warn", "tensorflow.python_io.tf_record_iterator", "tensorflow.expand_dims", "tensorflow.summary.scalar", "tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder", "tensorflow.placeholder", "tensorflow.contrib.data.get_single_element", "tensorflow.name_scope", "tensorflow.reduce_sum", "tensorflow.not_equal", "tensorflow.contrib.data.parallel_interleave", "tensorflow.VarLenFeature", "tensorflow.reduce_mean" ] ]
A1berttt/pytorch-original-transformer
[ "b8924f20f7e76bd15a3061193843a8b6f6205629" ]
[ "utils/data_utils.py" ]
[ "import time\nimport os\nimport enum\n\n\nimport torch\nfrom torchtext.legacy.data import Dataset, BucketIterator, Field, Example\nfrom torchtext.data.utils import interleave_keys\nfrom torchtext.legacy import datasets\nimport spacy\n\n\nfrom .constants import BOS_TOKEN, EOS_TOKEN, PAD_TOKEN, DATA_DIR_PATH\n\n\nclass DatasetType(enum.Enum):\n IWSLT = 0,\n WMT14 = 1\n\n\nclass LanguageDirection(enum.Enum):\n E2G = 0,\n G2E = 1\n\n\n#\n# Caching mechanism datasets and functions (you don't need this but it makes things a lot faster!)\n#\n\n\nclass FastTranslationDataset(Dataset):\n \"\"\"\n After understanding the source code of torch text's IWSLT, TranslationDataset and Dataset I realized how I\n can make data preparation much faster (tokenization was taking a lot of time and there is no need to redo it\n every time) by using a simple caching mechanism.\n\n This dataset leverages that caching mechanism which reduced loading time from ~70s -> 2.5s (massive!)\n\n \"\"\"\n\n @staticmethod\n def sort_key(ex):\n # What this does is basically it takes a 16-bit binary representation of lengths and interleaves them.\n # Example: lengths len(ex.src)=5 and len(ex.trg)=3 result in f(101, 011)=100111, 7 and 1 in f(111, 001)=101011\n # It's basically a heuristic that helps the BucketIterator sort bigger batches first\n return interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, cache_path, fields, **kwargs):\n # save_cache interleaves src and trg examples so here we read the cache file having that format in mind\n cached_data = [line.split() for line in open(cache_path, encoding='utf-8')]\n\n cached_data_src = cached_data[0::2] # Even lines contain source examples\n cached_data_trg = cached_data[1::2] # Odd lines contain target examples\n\n assert len(cached_data_src) == len(cached_data_trg), f'Source and target data should be of the same length.'\n\n examples = []\n src_dataset_total_number_of_tokens = 0\n trg_dataset_total_number_of_tokens = 0\n for src_tokenized_data, trg_tokenized_data in zip(cached_data_src, cached_data_trg):\n ex = Example()\n\n setattr(ex, 'src', src_tokenized_data)\n setattr(ex, 'trg', trg_tokenized_data)\n\n examples.append(ex)\n\n # Update the number of tokens\n src_dataset_total_number_of_tokens += len(src_tokenized_data)\n trg_dataset_total_number_of_tokens += len(trg_tokenized_data)\n\n # Print relevant information about the dataset (parsing the cache file name)\n filename_parts = os.path.split(cache_path)[1].split('_')\n src_language, trg_language = ('English', 'German') if filename_parts[0] == 'en' else ('German', 'English')\n dataset_name = 'IWSLT' if filename_parts[2] == 'iwslt' else 'WMT-14'\n dataset_type = 'train' if filename_parts[3] == 'train' else 'val'\n print(f'{dataset_type} dataset ({dataset_name}) has {src_dataset_total_number_of_tokens} tokens in the source language ({src_language}) corpus.')\n print(f'{dataset_type} dataset ({dataset_name}) has {trg_dataset_total_number_of_tokens} tokens in the target language ({trg_language}) corpus.')\n\n # Call the parent class Dataset's constructor\n super().__init__(examples, fields, **kwargs)\n\n\nclass DatasetWrapper(FastTranslationDataset):\n \"\"\"\n Just a wrapper around the FastTranslationDataset.\n\n \"\"\"\n\n @classmethod\n def get_train_and_val_datasets(cls, train_cache_path, val_cache_path, fields, **kwargs):\n\n train_dataset = cls(train_cache_path, fields, **kwargs)\n val_dataset = cls(val_cache_path, fields, **kwargs)\n\n return train_dataset, val_dataset\n\n\ndef save_cache(cache_path, dataset):\n with open(cache_path, 'w', encoding='utf-8') as cache_file:\n # Interleave source and target tokenized examples, source is on even lines, target is on odd lines\n for ex in dataset.examples:\n cache_file.write(' '.join(ex.src) + '\\n')\n cache_file.write(' '.join(ex.trg) + '\\n')\n\n\n#\n# End of caching mechanism utilities\n#\n\n\ndef get_datasets_and_vocabs(dataset_path, language_direction, use_iwslt=True, use_caching_mechanism=True):\n german_to_english = language_direction == LanguageDirection.G2E.name\n spacy_de = spacy.load('de_core_news_sm')\n spacy_en = spacy.load('en_core_web_sm')\n\n def tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(text)]\n\n def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\n # batch first set to true as my transformer is expecting that format (that's consistent with the format\n # used in computer vision), namely (B, C, H, W) -> batch size, number of channels, height and width\n src_tokenizer = tokenize_de if german_to_english else tokenize_en\n trg_tokenizer = tokenize_en if german_to_english else tokenize_de\n src_field_processor = Field(tokenize=src_tokenizer, pad_token=PAD_TOKEN, batch_first=True)\n trg_field_processor = Field(tokenize=trg_tokenizer, init_token=BOS_TOKEN, eos_token=EOS_TOKEN, pad_token=PAD_TOKEN, batch_first=True)\n\n fields = [('src', src_field_processor), ('trg', trg_field_processor)]\n MAX_LEN = 100 # filter out examples that have more than MAX_LEN tokens\n filter_pred = lambda x: len(x.src) <= MAX_LEN and len(x.trg) <= MAX_LEN\n\n # Only call once the splits function it is super slow as it constantly has to redo the tokenization\n prefix = 'de_en' if german_to_english else 'en_de'\n prefix += '_iwslt' if use_iwslt else '_wmt14'\n train_cache_path = os.path.join(dataset_path, f'{prefix}_train_cache.csv')\n val_cache_path = os.path.join(dataset_path, f'{prefix}_val_cache.csv')\n test_cache_path = os.path.join(dataset_path, f'{prefix}_test_cache.csv')\n\n # This simple caching mechanism gave me ~30x speedup on my machine! From ~70s -> ~2.5s!\n ts = time.time()\n if not use_caching_mechanism or not (os.path.exists(train_cache_path) and os.path.exists(val_cache_path)):\n # dataset objects have a list of examples where example is simply an empty Python Object that has\n # .src and .trg attributes which contain a tokenized list of strings (created by tokenize_en and tokenize_de).\n # It's that simple, we can consider our datasets as a table with 2 columns 'src' and 'trg'\n # each containing fields with tokenized strings from source and target languages\n src_ext = '.de' if german_to_english else '.en'\n trg_ext = '.en' if german_to_english else '.de'\n dataset_split_fn = datasets.IWSLT.splits if use_iwslt else datasets.WMT14.splits\n train_dataset, val_dataset, test_dataset = dataset_split_fn(\n exts=(src_ext, trg_ext),\n fields=fields,\n root=dataset_path,\n filter_pred=filter_pred\n )\n\n save_cache(train_cache_path, train_dataset)\n save_cache(val_cache_path, val_dataset)\n save_cache(test_cache_path, test_dataset)\n else:\n # it's actually better to load from cache as we'll get rid of '\\xa0', '\\xa0 ' and '\\x85' unicode characters\n # which we don't need and which SpaCy unfortunately includes as tokens.\n train_dataset, val_dataset = DatasetWrapper.get_train_and_val_datasets(\n train_cache_path,\n val_cache_path,\n fields,\n filter_pred=filter_pred\n )\n\n print(f'Time it took to prepare the data: {time.time() - ts:3f} seconds.')\n\n MIN_FREQ = 2\n # __getattr__ implementation in the base Dataset class enables us to call .src on Dataset objects even though\n # we only have a list of examples in the Dataset object and the example itself had .src attribute.\n # Implementation will yield examples and call .src/.trg attributes on them (and those contain tokenized lists)\n src_field_processor.build_vocab(train_dataset.src, min_freq=MIN_FREQ)\n trg_field_processor.build_vocab(train_dataset.trg, min_freq=MIN_FREQ)\n\n return train_dataset, val_dataset, src_field_processor, trg_field_processor\n\n\nglobal longest_src_sentence, longest_trg_sentence\n\n\ndef batch_size_fn(new_example, count, sofar):\n \"\"\"\n If we use this function in the BucketIterator the batch_size is no longer the number of examples/sentences\n in a batch but a number of tokens in a batch - which allows us to max out VRAM on a given GPU.\n\n Example: if we don't use this function and we set batch size to say 10 we will sometimes end up with\n a tensor of size (10, 100) because the longest sentence had a size of 100 tokens but other times we'll end\n up with a size of (10, 5) because the longest sentence had only 5 tokens!\n\n With this function what we do is we specify that source and target tensors can't go over a certain number\n of tokens like 1000. So usually either source or target tensors will contain around 1000 tokens and\n in worst case both will be really close to a 1000 tokens each. If that is still below max VRAM availabe on\n the system we're using the max potential of our GPU w.r.t. VRAM.\n\n Note: to understand this function you unfortunately would probably have to dig deeper into torch text's\n source code.\n\n \"\"\"\n global longest_src_sentence, longest_trg_sentence\n\n if count == 1:\n longest_src_sentence = 0\n longest_trg_sentence = 0\n\n longest_src_sentence = max(longest_src_sentence, len(new_example.src))\n # 2 because of start/end of sentence tokens (<s> and </s>)\n longest_trg_sentence = max(longest_trg_sentence, len(new_example.trg) + 2)\n\n num_of_tokens_in_src_tensor = count * longest_src_sentence\n num_of_tokens_in_trg_tensor = count * longest_trg_sentence\n\n return max(num_of_tokens_in_src_tensor, num_of_tokens_in_trg_tensor)\n\n\n# https://github.com/pytorch/text/issues/536#issuecomment-719945594 <- there is a \"bug\" in BucketIterator i.e. it's\n# description is misleading as it won't group examples of similar length unless you set sort_within_batch to True!\ndef get_data_loaders(dataset_path, language_direction, dataset_name, batch_size, device):\n train_dataset, val_dataset, src_field_processor, trg_field_processor = get_datasets_and_vocabs(dataset_path, language_direction, dataset_name == DatasetType.IWSLT.name)\n\n train_token_ids_loader, val_token_ids_loader = BucketIterator.splits(\n datasets=(train_dataset, val_dataset),\n batch_size=batch_size,\n device=device,\n sort_within_batch=True, # this part is really important otherwise we won't group similar length sentences\n batch_size_fn=batch_size_fn # this helps us max out GPU's VRAM\n )\n\n return train_token_ids_loader, val_token_ids_loader, src_field_processor, trg_field_processor\n\n\ndef get_masks_and_count_tokens_src(src_token_ids_batch, pad_token_id):\n batch_size = src_token_ids_batch.shape[0]\n\n # src_mask shape = (B, 1, 1, S) check out attention function in transformer_model.py where masks are applied\n # src_mask only masks pad tokens as we want to ignore their representations (no information in there...)\n src_mask = (src_token_ids_batch != pad_token_id).view(batch_size, 1, 1, -1)\n num_src_tokens = torch.sum(src_mask.long())\n\n return src_mask, num_src_tokens\n\n\ndef get_masks_and_count_tokens_trg(trg_token_ids_batch, pad_token_id):\n batch_size = trg_token_ids_batch.shape[0]\n device = trg_token_ids_batch.device\n\n # Same as src_mask but we additionally want to mask tokens from looking forward into the future tokens\n # Note: wherever the mask value is true we want to attend to that token, otherwise we mask (ignore) it.\n sequence_length = trg_token_ids_batch.shape[1] # trg_token_ids shape = (B, T) where T max trg token-sequence length\n trg_padding_mask = (trg_token_ids_batch != pad_token_id).view(batch_size, 1, 1, -1) # shape = (B, 1, 1, T)\n trg_no_look_forward_mask = torch.triu(torch.ones((1, 1, sequence_length, sequence_length), device=device) == 1).transpose(2, 3)\n\n # logic AND operation (both padding mask and no-look-forward must be true to attend to a certain target token)\n trg_mask = trg_padding_mask & trg_no_look_forward_mask # final shape = (B, 1, T, T)\n num_trg_tokens = torch.sum(trg_padding_mask.long())\n\n return trg_mask, num_trg_tokens\n\n\ndef get_masks_and_count_tokens(src_token_ids_batch, trg_token_ids_batch, pad_token_id, device):\n src_mask, num_src_tokens = get_masks_and_count_tokens_src(src_token_ids_batch, pad_token_id)\n trg_mask, num_trg_tokens = get_masks_and_count_tokens_trg(trg_token_ids_batch, pad_token_id)\n\n return src_mask, trg_mask, num_src_tokens, num_trg_tokens\n\n\ndef get_src_and_trg_batches(token_ids_batch):\n src_token_ids_batch, trg_token_ids_batch = token_ids_batch.src, token_ids_batch.trg\n\n # Target input should be shifted by 1 compared to the target output tokens\n # Example: if we had a sentence like: [<s>,what,is,up,</s>] then to train the NMT model what we do is we pass\n # [<s>,what,is,up] to the input as set [what,is,up,</s>] as the expected output.\n trg_token_ids_batch_input = trg_token_ids_batch[:, :-1]\n\n # We reshape from (B, S) into (BxS, 1) as that's the the shape expected by LabelSmoothing which will produce\n # the shape (BxS, V) where V is the target vocab size which is the same shape as the one that comes out\n # from the transformer so we can directly pass them into the KL divergence loss\n trg_token_ids_batch_gt = trg_token_ids_batch[:, 1:].reshape(-1, 1)\n\n return src_token_ids_batch, trg_token_ids_batch_input, trg_token_ids_batch_gt\n\n\n#\n# Everything below is for testing purposes only - feel free to ignore\n#\n\n\ndef sample_text_from_loader(src_field_processor, trg_field_processor, token_ids_loader, num_samples=2, sample_src=True, sample_trg=True, show_padded=False):\n assert sample_src or sample_trg, f'Either src or trg or both must be enabled.'\n\n for b_idx, token_ids_batch in enumerate(token_ids_loader):\n if b_idx == num_samples: # Number of sentence samples to print\n break\n\n print('*' * 5)\n if sample_src:\n print(\"Source text:\", end=\"\\t\")\n for token_id in token_ids_batch.src[0]: # print only the first example from the batch\n src_token = src_field_processor.vocab.itos[token_id]\n\n if src_token == PAD_TOKEN and not show_padded:\n continue\n\n print(src_token, end=\" \")\n print()\n\n if sample_trg:\n print(\"Target text:\", end=\"\\t\")\n for token_id in token_ids_batch.trg[0]:\n trg_token = trg_field_processor.vocab.itos[token_id]\n\n if trg_token == PAD_TOKEN and not show_padded:\n continue\n\n print(trg_token, end=\" \")\n print()\n\n\nif __name__ == \"__main__\":\n # To run this delete the dot from from .constants import - not the most elegant solution but it works\n # without me having to add sys.path stuff, if you have a more elegant solution please open an issue <3\n batch_size = 8\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n dataset_name = DatasetType.IWSLT.name\n language_direction = LanguageDirection.G2E.name\n train_token_ids_loader, val_token_ids_loader, src_field_processor, trg_field_processor = get_data_loaders(DATA_DIR_PATH, language_direction, dataset_name, batch_size, device)\n\n # Verify that the mask logic is correct\n pad_token_id = src_field_processor.vocab.stoi[PAD_TOKEN]\n for batch in train_token_ids_loader:\n # Visually inspect that masks make sense\n src_padding_mask, trg_mask, num_src_tokens, num_trg_tokens = get_masks_and_count_tokens(batch.src, batch.trg, pad_token_id, device)\n break\n\n # Check vocab size\n print(f'Source vocabulary size={len(src_field_processor.vocab)}')\n print(f'Target vocabulary size={len(trg_field_processor.vocab)}')\n\n # Show text from token loader\n sample_text_from_loader(src_field_processor, trg_field_processor, train_token_ids_loader)\n\n" ]
[ [ "torch.cuda.is_available", "torch.ones" ] ]
MacHu-GWU/seedinvest_monitor-project
[ "d66b4ee474a0729a9ff4b3ffeed4bcc331296515" ]
[ "export/export.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom superjson import json\nfrom seedinvest_monitor.model import Startup\n\njson_data = list()\ndf_data = list()\ndf_columns = set()\n\ncursor = Startup.scan(attributes_to_get=[\"id\", \"details\"])\nfor startup in cursor:\n details_data = startup.details.as_dict()\n json_data.append(details_data)\n for key in details_data:\n df_columns.add(key)\n df_data.append(details_data)\n\ndf_columns = list(df_columns)\ndf_columns.sort()\n\njson.dump(json_data, \"startups-data.json\", pretty=True, overwrite=True)\ndf = pd.DataFrame(df_data, columns=df_columns)\ndf.to_excel(\"startups-data.xlsx\", sheet_name=\"data\", index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
HotMaps/customized_h_fa_dm
[ "bab2961d766f7885f30c1306b7c67adbf99d88e1" ]
[ "CM/CM_TUW9/shp2csv.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on July 6 2017\n\n@author: [email protected]\n\"\"\"\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom osgeo import gdal\nfrom osgeo import ogr\nfrom osgeo import osr\nimport time\npath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.\n abspath(__file__))))\nif path not in sys.path:\n sys.path.append(path)\n'''\n- The user can select a region which is potentially larger than his/her\nuploaded layer. An upper hand module should combine the uploaded layer with the\ndefault building layer and submit a shapefile covering the \"selected region by\nuser\" to this module.\n- This code expects that the default layer provides information regarding\nnumber of floors as well.\n- The demand of each building, if applicable, should be entered in kWh.\n- Column \"Type\" should be either \"0\" for residential or \"1\" for service sector\n- if the user no Type field for his/her own shapefile provides, all buildings\nwithin his/her shapefile will be considered as residential. For the regions\ncoming from OSM, the OSM usage will be attributed to the Type.\n- OSM building usage is also expected to be input as 0 and 1.\n- The output CSV file only includes the data which are relevant for the BUHDM.\n###############################################################################\nExplanation of indexing in this code:\nshape fields: [a b c d e f g h i j]\nfieldList : [j f i d]\nfIndex: [9 -1 5 8 -1 3 -1 -1 -1 -1]\nnewFieldList [0 2 3 5]\n###############################################################################\nUnits of input/output data:\n GFA: [m2]\n demand: [kWh/a]\n spec_demand: [kWh/m2]\n'''\n\n\ndef indexing(UsefulDemandRaster, X, Y):\n UsefulDemandDataSource = gdal.Open(UsefulDemandRaster)\n transform = UsefulDemandDataSource.GetGeoTransform()\n x0 = transform[0]\n y0 = transform[3]\n resolution = transform[1]\n xIndex = np.floor((X-x0)/resolution).astype(int)\n yIndex = np.floor((y0-Y)/resolution).astype(int)\n band1 = UsefulDemandDataSource.GetRasterBand(1)\n arrUsefulDemand = band1.ReadAsArray()\n # find the indices which are out of range of the raster\n h, w = arrUsefulDemand.shape\n l = yIndex.size\n # define specific demand array with the same length as l and fill it with\n # NaN\n spec_demand = np.empty(l)\n outRangeY = np.concatenate((np.argwhere(yIndex < 0),\n np.argwhere(yIndex >= h)), axis=0)\n outRangeX = np.concatenate((np.argwhere(xIndex < 0),\n np.argwhere(xIndex >= w)), axis=0)\n outRange = np.union1d(outRangeY, outRangeX)\n IndexInRange = np.setdiff1d(np.arange(l), outRange)\n # fill elements which are in range\n spec_demand[IndexInRange] = arrUsefulDemand[yIndex[IndexInRange],\n xIndex[IndexInRange]]\n UsefulDemandDataSource = None\n return spec_demand\n\n\ndef shp2csv(inShapefile, UsefulDemandRaster, outCSV, epsg=3035):\n # Get the input layer\n driver = ogr.GetDriverByName('ESRI Shapefile')\n inDataSet = driver.Open(inShapefile)\n inLayer = inDataSet.GetLayer()\n # Get projection from input Layer\n inSpatialRef = inLayer.GetSpatialRef()\n # Desired projection is EPSG3035\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(epsg)\n # Compare projection of input layer and the desired projection and\n # create the coordinate transformation parameter\n flag = False\n if inSpatialRef != outSpatialRef:\n flag = True\n coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n inLayerDefn = inLayer.GetLayerDefn()\n feat_count = inLayer.GetFeatureCount()\n # List of the fields which are expected to be seen in the input layer.\n # update the comment in \"update_building_layer\" regarding input csv\n fieldList = np.array(['hotmaps_ID', 'inputLyr_ID', 'Type',\n 'Year_Construction', 'Address', 'Footprint',\n 'NrFloor', 'GFA', 'spec_demand', 'demand', 'X_3035',\n 'Y_3035'], dtype=str)\n fieldListSize = fieldList.size\n # Determine the location of parameters in the above field list\n demIndex = int(np.argwhere(fieldList == 'demand'))\n typIndex = int(np.argwhere(fieldList == 'Type'))\n xIndex = int(np.argwhere(fieldList == 'X_3035'))\n yIndex = int(np.argwhere(fieldList == 'Y_3035'))\n FootprintIndex = int(np.argwhere(fieldList == 'Footprint'))\n NrFloorIndex = int(np.argwhere(fieldList == 'NrFloor'))\n GFAIndex = int(np.argwhere(fieldList == 'GFA'))\n spec_demandIndex = int(np.argwhere(fieldList == 'spec_demand'))\n # Initialize the field values with \"NaN\". This is important to distinguish\n # between 0 and unavailable data\n fieldvalues = np.nan * np.empty((feat_count, fieldListSize))\n fieldvalues[:, typIndex] = 0\n usefulDemand = np.zeros((feat_count, len(UsefulDemandRaster)))\n # 1 is assigned to Nr. of floors. If no data for that would be available,\n # only the net floor area will be considered for the calculation.\n fieldvalues[:, NrFloorIndex] = 1\n # Attribute -1 to all field indices and update them if they exist in the\n # input layer.\n fIndex = -1 * np.ones(fieldListSize)\n for i in range(inLayerDefn.GetFieldCount()):\n temp1 = inLayerDefn.GetFieldDefn(i).GetName()\n if temp1 in fieldList:\n fIndex[np.argwhere(fieldList == temp1)] = i\n # Update the field list with those which also exist in the input layer\n newFieldList = np.argwhere(fIndex != -1)\n # loop through the input features\n inFeature = inLayer.GetNextFeature()\n while inFeature:\n fid = inFeature.GetFID()\n fieldvalues[fid, 0] = fid\n # get the input geometry\n geom = inFeature.GetGeometryRef()\n if flag:\n # change projection of the geometry\n geom.Transform(coordTrans)\n fieldvalues[fid, xIndex] = geom.Centroid().GetX()\n fieldvalues[fid, yIndex] = geom.Centroid().GetY()\n for item in newFieldList:\n fieldvalues[fid, item] = inFeature.GetField(int(fIndex[item]))\n '''\n Footprint should be assigned after above for-loop in order to prevent\n overwriting of Footprint corresponding to those attributes that coming\n from OSM and not from user inputs --> case large selected area with\n small input data provided by user.\n Footprint may exist in the input shapefile for some attributes;\n however, recalculation of it does not cause deviation since basically\n it is should be similar to the input\n '''\n if geom.GetGeometryName() == 'POINT':\n fieldvalues[fid, FootprintIndex] = 0\n else:\n fieldvalues[fid, FootprintIndex] = geom.GetArea()\n inFeature = inLayer.GetNextFeature()\n if 'GFA' not in fieldList[newFieldList]:\n fieldvalues[:, GFAIndex] = fieldvalues[:, FootprintIndex] * \\\n fieldvalues[:, NrFloorIndex]\n else:\n # Assign a value to GFA for the attributes that have no entries\n k = np.argwhere(np.isnan(fieldvalues[:, GFAIndex]) +\n fieldvalues[:, GFAIndex] == 0)\n noGFA = k[::2]\n fieldvalues[noGFA, GFAIndex] = fieldvalues[noGFA, FootprintIndex] * \\\n fieldvalues[noGFA, NrFloorIndex]\n for i, raster in enumerate(UsefulDemandRaster):\n usefulDemand[:, i] = indexing(raster, fieldvalues[:, xIndex],\n fieldvalues[:, yIndex])\n if 'demand' not in fieldList[newFieldList]:\n if 'spec_demand' not in fieldList[newFieldList]:\n if 'Type' not in fieldList[newFieldList]:\n fieldvalues[:, spec_demandIndex] = usefulDemand[:, 0]\n else:\n '''\n it is possible that some attributes have no value for Type\n (unknown buildings). The assumption is that all unknown\n buildings will be considered as residential building. So, in\n query for residential attributes, \"!= 1\" is used.\n '''\n res = np.argwhere(fieldvalues[:, typIndex] != 1)\n serv = np.argwhere(fieldvalues[:, typIndex] == 1)\n fieldvalues[res, spec_demandIndex] = usefulDemand[res, 0]\n fieldvalues[serv, spec_demandIndex] = usefulDemand[serv, 1]\n spec_demand = fieldvalues[:, spec_demandIndex]\n GFA = fieldvalues[:, GFAIndex]\n fieldvalues[:, demIndex] = spec_demand * GFA\n '''\n this part of the code is implemented to cover the situation in which user\n has selected a region and within a smaller part of that region, he has\n provided a set of data. Therefore, the entries coming from OSM, do not\n have demand data. in this case, the standard country demand value will be\n attributed.\n '''\n k = np.argwhere(np.isnan(fieldvalues[:, demIndex]))\n if k:\n noDemandRows = k[::2]\n res = np.argwhere(fieldvalues[noDemandRows, typIndex] != 1)\n serv = np.argwhere(fieldvalues[noDemandRows, typIndex] == 1)\n fieldvalues[res, spec_demandIndex] = usefulDemand[res, 0]\n fieldvalues[serv, spec_demandIndex] = usefulDemand[serv, 1]\n GFA_noData = fieldvalues[noDemandRows, GFAIndex]\n spec_demand_noData = fieldvalues[noDemandRows, spec_demandIndex]\n fieldvalues[noDemandRows, demIndex] = spec_demand_noData * GFA_noData\n df = pd.DataFrame(fieldvalues, columns=fieldList,\n index=np.arange(feat_count))\n df = df[fieldList]\n df = df.sort_values([\"hotmaps_ID\"])\n df.to_csv(outCSV)\n inDataSet = None\n df = None\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.empty", "numpy.union1d", "numpy.ones", "numpy.arange", "numpy.argwhere", "numpy.floor" ] ]
EshwarCVS/Machine_Learning_A-Z
[ "d6d186c2fde990f15c08706244dd6e91a8854038" ]
[ "Part 5 - Association Rule Learning/Section 28 - Apriori/apriori.py" ]
[ "# Apriori\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Data Preprocessing\ndataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)\ntransactions = []\nfor i in range(0, 7501):\n transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])\n\n# Training Apriori on the dataset\nfrom apyori import apriori\nrules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)\n\n# Visualising the results\nresults = list(rules)\nresults_list = []\nfor i in range(0, len(results)):\n results_list.append('RULE:\\t' + str(results[i][0]) + '\\nSUPPORT:\\t' + str(results[i][1]))" ]
[ [ "pandas.read_csv" ] ]
aliwimo/breast_cancer_detect
[ "9f474043f62939b7ab53ca1459c85fa06914e1fe" ]
[ "log_reg.py" ]
[ "import numpy as np\r\n\r\nclass Logistic_Regression:\r\n def __init__(self):\r\n self.w = []\r\n\r\n # sigmoid function\r\n def sigmoid(self, z):\r\n return 1 / (1 + np.exp(-z))\r\n\r\n # logistic function\r\n def hx(self, w, X):\r\n weights_num = X.shape[1] + 1\r\n z_arrays = [1] * weights_num\r\n z_arrays[0] = w[0]\r\n for i in range(weights_num - 1):\r\n z_arrays[i + 1] = w[i + 1] * np.array(X[:, i])\r\n z = np.array(sum(z_arrays[j] for j in range(weights_num)))\r\n return self.sigmoid(z)\r\n\r\n # cost function - binary cross entropy\r\n def cost(self, w, X, Y):\r\n y_pred = self.hx(w, X)\r\n return -1 * sum(Y * np.log(y_pred) + (1 - Y) * np.log(1 - y_pred))\r\n\r\n # gradient descent\r\n def grad_descent(self, w, X, Y):\r\n y_pred = self.hx(w, X)\r\n weights_num = X.shape[1] + 1\r\n grad = [0] * weights_num\r\n grad[0] = -1 * sum(Y * (1 - y_pred) - (1 - Y) * y_pred)\r\n for i in range(weights_num - 1):\r\n grad[i + 1] = -1 * sum(Y * (1 - y_pred) * X[:, i] - (1 - Y) * y_pred * X[:, i])\r\n return grad\r\n\r\n def fit(self, X, Y, w, lr, max_iter):\r\n weights_num = X.shape[1] + 1\r\n iter = 0\r\n while iter < max_iter:\r\n w_prev = w.copy()\r\n grad = self.grad_descent(w_prev, X, Y)\r\n for index in range(weights_num):\r\n w[index] = w[index] - lr * grad[index]\r\n iter += 1\r\n self.w = w.copy()\r\n\r\n def predict(self, X):\r\n predictions = self.hx(self.w, X)\r\n return np.around(predictions).astype(int)\r\n" ]
[ [ "numpy.around", "numpy.array", "numpy.exp", "numpy.log" ] ]
stalhabukhari/pytorch-classification
[ "bef373c62c247633514f0bb93bad6d9260b53589" ]
[ "utils/misc.py" ]
[ "'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport errno\nimport os\nimport sys\nimport time\nimport math, csv\n\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\n__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'CSVLogger']\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\ndef mkdir_p(path):\n '''make dir if not exist'''\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\n Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n \"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass CSVLogger(object):\n def __init__(self, path, fields=None):\n if fields is None:\n fields = ['Arch', 'Dataset', 'CheckpointPath', 'LoadModel',\n 'Loss', 'Top1Acc', 'Top5Acc']\n self.fields = fields\n if not os.path.isfile(path):\n self.write_to_csv(None, path, write_header=True)\n self.path = path\n\n def __call__(self, data_row):\n self.check_keys(data_row, self.fields)\n self.write_to_csv(data_row, self.path)\n\n def write_to_csv(self, data_row, file_name, write_header=False):\n write_mode = 'w' if write_header else 'a'\n with open(file_name, mode=write_mode, newline='') as file:\n file_writer = csv.DictWriter(file, fieldnames=self.fields)\n if write_header:\n file_writer.writeheader()\n else:\n file_writer.writerow(data_row)\n\n @staticmethod\n def check_keys(dc_in, l_ref):\n \"\"\"\"\"\"\n lin, lref = list(dc_in.keys()), list(l_ref)\n lin.sort()\n lref.sort()\n assert lin == lref\n" ]
[ [ "torch.nn.init.normal", "torch.nn.init.kaiming_normal", "torch.nn.init.constant" ] ]
humeniuka/sGDML_dataset_generation
[ "a99f792b6aac7ff869ebcd1bd7a7226ca81f43ee" ]
[ "sgdml_dataset_generation/readers/qchem.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# # Imports\nimport numpy as np\nfrom collections import OrderedDict\nimport logging\n\n# # Logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format=\"[%(module)-12s] %(message)s\", level=logging.INFO)\n\nfrom sgdml_dataset_generation import units\n\nclass QChemOutputFile(object):\n \"\"\"\n parse output file produced by the quantum chemistry program QChem.\n\n Parameters\n ----------\n f : File\n file handle opened for reading.\n The user has to ensure the file handle is opened and closed at the end.\n\n The data records can be accessed by their names (see example below).\n\n Example\n -------\n\n >>> with open(\"qchem.out\") as f:\n >>> qchem = QChemOutputFile(f)\n >>> # show names of all records\n >>> print(qchem.keys())\n >>> # element names\n >>> print(qchem['symbols'])\n\n \"\"\"\n def __init__(self, f):\n self.filename = f.name\n self.data = OrderedDict()\n self.data['ok'] = False\n self.data['gradients (au)'] = {}\n self.data['derivative couplings (au)'] = {}\n \n for line in f:\n parts = line.split()\n\n if \"Standard Nuclear Orientation (Angstroms)\" in line:\n f.readline()\n f.readline()\n symbols = []\n geometry = []\n while True:\n line = f.readline()\n if \"-----\" in line:\n break\n parts = line.split()\n symbols.append(parts[1])\n pos_xyz = list(map(float, parts[2:5]))\n geometry.append(pos_xyz)\n geometry = np.array(geometry)\n self._getset('symbols', symbols)\n self._getset('geometry (au)', geometry / units.bohr_to_angs)\n\n elif \"Total energy in the final basis set\" in line:\n parts = line.split()\n self._getset('scf energy', float(parts[8]))\n # set ground state energy\n energies = self._getset('energies (au)', {})\n energies[0] = self['scf energy']\n \n elif \"TDDFT Excitation Energies\" in line:\n exc_energies = self._getset('excitation energies (eV)', {})\n osc_strengths = self._getset('oscillator strengths', {})\n f.readline()\n f.readline()\n while True:\n line = f.readline()\n parts = line.split()\n if \"-----\" in line:\n break\n if \"Excited state\" in line:\n state = int(parts[2].replace(\":\", \"\"))\n exc_energies[state] = float(parts[7])\n elif \"Strength\" in line:\n osc_strengths[state] = float(parts[2])\n # total energies of all states\n energies = self._getset('energies (au)', {})\n for state, en_ex in self['excitation energies (eV)'].items():\n energies[state] = energies[0] + en_ex / units.hartree_to_ev\n elif \"between states \" in line:\n derivative_couplings = self._getset('derivative couplings (au)', {})\n parts = line.split()\n I, J = int(parts[2]), int(parts[4])\n while True:\n line = f.readline()\n if \"DC between\" in line and \"with ETF\" in line:\n break\n for i in range(2):\n f.readline()\n nacv = []\n while True:\n line = f.readline()\n parts = line.split()\n if \"-----\" in line:\n break\n nacv_i = list(map(float, parts[1:4]))\n nacv.append(nacv_i)\n nacv = np.array(nacv)\n \n derivative_couplings[(I,J)] = nacv\n\n elif \"Gradient of SCF Energy\" in line:\n grad_scf = []\n while True:\n line = f.readline()\n if \"Max gradient\" in line:\n break\n parts = line.split()\n atom_indices = list(map(lambda p: int(p)-1, parts))\n grad_xyz = []\n for xyz in [0,1,2]:\n line = f.readline()\n parts = line.split()\n dEdx = list(map(lambda p: float(p), parts[1:]))\n grad_xyz.append( dEdx )\n grad_xyz = list(np.array(grad_xyz).transpose())\n grad_scf += grad_xyz\n grad_scf = np.array(grad_scf)\n gradients = self._getset('gradients (au)', {})\n gradients[0] = grad_scf\n\n elif \"RPA\" in line and \"State Energy is\" in line:\n state = int(line.split()[1])\n \n elif \"Gradient of the state energy\" in line:\n grad_ex = []\n while True:\n line = f.readline()\n if \"Gradient time\" in line:\n break\n parts = line.split()\n atom_indices = list(map(lambda p: int(p)-1, parts))\n grad_xyz = []\n for xyz in [0,1,2]:\n line = f.readline()\n parts = line.split()\n dEdx = list(map(lambda p: float(p), parts[1:]))\n grad_xyz.append( dEdx )\n grad_xyz = list(np.array(grad_xyz).transpose())\n grad_ex += grad_xyz\n grad_ex = np.array(grad_ex)\n gradients = self._getset('gradients (au)', {})\n gradients[state] = grad_ex\n\n elif \"Thank you very much for using Q-Chem.\" in line:\n self.data['ok'] = True\n \n def _getset(self, key, default):\n item = self.data.get(key, default)\n self.data[key] = item\n return item\n\n def __getitem__(self, key):\n \"\"\"\n access data fields by their names\n\n Parameters\n ----------\n key : str\n name of field that should be retrieved (e.g. 'gradients (au)')\n\n Returns\n -------\n field : float, int or ndarray\n a KeyError is raised if the field is not present\n \"\"\"\n return self.data[key]\n def keys(self):\n \"\"\"\n list names of all fields read\n\n Returns\n -------\n keys : list of str\n field names\n \"\"\"\n return self.data.keys()\n\n" ]
[ [ "numpy.array" ] ]
HimangiM/Self-Supervised-Scene-Flow-Estimation
[ "0a3350843de1ed769e69c3be17eb70db32ca6881" ]
[ "src/model_concat_upsa_cycle.py" ]
[ "\nimport tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\nimport utils.tf_util\nfrom utils.pointnet_util import *\nfrom tf_grouping import query_ball_point, group_point, knn_point\n\n\ndef placeholder_inputs(batch_size, num_point, num_frames=3):\n # change here, num_point*2 -> numpoint*5\n pointclouds_pl = tf.placeholder(tf.float32,\n shape=(batch_size, num_point * num_frames, 6))\n # labels_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n # masks_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))\n # return pointclouds_pl, labels_pl, masks_pl\n return pointclouds_pl\n\n\ndef get_model(radius, layer, point_cloud, is_training, bn_decay=None, knn=False,\n flow_module='default', num_frames=2, stop_gradient=False,\n rigidity=False, rigidity_radius=0.5, rigidity_nsample=4, rgb=False):\n\n num_point = point_cloud.get_shape()[1].value // num_frames\n\n pred_flow, end_points = get_model_flow(radius, layer, point_cloud, is_training,\n bn_decay=None, knn=False,\n flow_module='default')\n\n pred_f = pred_flow + point_cloud[:, :num_point, :3] # flow + p1 = pred_f => pc2_hat\n\n _, idx = knn_point(1, point_cloud[:, num_point:num_point*2, :3],\n pred_f)\n\n\n grouped_xyz = group_point(point_cloud[:, num_point:num_point*2, :3], idx)\n\n\n grouped_xyz = tf.squeeze(grouped_xyz, axis=2) # grouped_xyz => pc2nn\n end_points_f = {\n 'idx': idx,\n 'pred_flow': pred_flow,\n 'pc2': point_cloud[:, num_point:num_point*2, :3]\n }\n\n if rigidity:\n pc1 = point_cloud[:, :2048, :3]\n rigid_idx, _ = query_ball_point(rigidity_radius, rigidity_nsample, pc1,\n pc1)\n rigid_grouped_flow = group_point(pred_flow, rigid_idx)\n end_points_f['rigid_group_flow'] = rigid_grouped_flow\n end_points_f['rigid_pc1_flow'] = pred_flow\n\n if rgb:\n pred_f_rgb, dist_f, grouped_xyz_rgb_f = get_interpolated_rgb(pred_f, point_cloud[:, num_point:])\n end_points_f['pred_f_rgb'] = pred_f_rgb\n end_points_f['dist_f'] = dist_f\n end_points_f['grouped_xyz_rgb_f'] = grouped_xyz_rgb_f\n\n\n # changes from here\n if stop_gradient:\n pred_f_copy = tf.Variable(0, dtype=pred_f.dtype, trainable=False, collections=[])\n pred_f_copy = tf.assign(pred_f_copy, pred_f, validate_shape=False)\n else:\n pred_f_copy = pred_f\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n\n pred_fb_xyz = (pred_f_copy + grouped_xyz) / 2\n\n pred_fb = tf.concat([pred_fb_xyz, point_cloud[:, :num_point, 3:]], axis = 2)\n\n # num_point = pred_f (predicted point cloud 2), num_point:num_point*2 = point cloud 1\n point_cloud_back = tf.concat([pred_fb, point_cloud[:, :num_point]], axis = 1)\n\n # import ipdb; ipdb.set_trace()\n\n pred_flow_back, end_points = get_model_flow(radius, layer, point_cloud_back, is_training,\n bn_decay=None, knn=False,\n flow_module='default')\n\n pred_b = pred_flow_back + pred_fb_xyz\n\n end_points_b = {\n 'pred_flow_b': pred_flow_back,\n }\n\n if rgb:\n pred_b_rgb, dist_b, grouped_xyz_rgb_b = get_interpolated_rgb(pred_b, point_cloud[:, :num_point])\n end_points_f['pred_b_rgb'] = pred_b_rgb\n end_points_f['dist_b'] = dist_b\n end_points_f['grouped_xyz_rgb_b'] = grouped_xyz_rgb_b\n\n return pred_f, pred_b, grouped_xyz, end_points_f, end_points_b\n\ndef get_model_flow(radius, layer, point_cloud, is_training, bn_decay=None, knn=False, flow_module='default'):\n \"\"\" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class \"\"\"\n\n end_points = {}\n batch_size = point_cloud.get_shape()[0].value # batch_size = 16\n num_point = point_cloud.get_shape()[1].value // 2\n # change here, num_point hard coded to 2048\n # num_point = 2048\n\n l0_xyz_f1 = point_cloud[:, :num_point, 0:3]\n l0_points_f1 = point_cloud[:, :num_point, 3:]\n l0_xyz_f2 = point_cloud[:, num_point:, 0:3]\n l0_points_f2 = point_cloud[:, num_point:, 3:]\n\n RADIUS1 = 0.5\n RADIUS2 = 1.0\n RADIUS3 = 2.0\n RADIUS4 = 4.0\n\n with tf.variable_scope('sa1') as scope:\n # radius, npoints, nlayers, mlp size, sampling technique\n # Set conv layers, POINT FEATURE LEARNING\n # Frame 1, Layer 1 (with radius = 0.5)\n l1_xyz_f1, l1_points_f1, l1_indices_f1 = pointnet_sa_module(l0_xyz_f1,\n l0_points_f1,\n npoint=1024,\n radius=RADIUS1,\n nsample=16,\n mlp=[32, 32,\n 64],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer1',\n knn=knn)\n end_points['l1_indices_f1'] = l1_indices_f1\n end_points['l1_xyz_f1'] = l1_points_f1\n end_points['l1_input_f1'] = l0_xyz_f1\n\n # Frame 1, Layer 2 (with radius = 1.0), Inputs are the above function's output\n l2_xyz_f1, l2_points_f1, l2_indices_f1 = pointnet_sa_module(l1_xyz_f1,\n l1_points_f1,\n npoint=256,\n radius=RADIUS2,\n nsample=16,\n mlp=[64, 64,\n 128],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer2',\n knn=knn)\n end_points['l2_indices_f1'] = l2_indices_f1\n end_points['l2_xyz_f1'] = l2_points_f1\n end_points['l2_input_f1'] = l1_xyz_f1\n\n scope.reuse_variables()\n # Frame 2, Layer 1 (with radius = 0.5)\n l1_xyz_f2, l1_points_f2, l1_indices_f2 = pointnet_sa_module(l0_xyz_f2,\n l0_points_f2,\n npoint=1024,\n radius=RADIUS1,\n nsample=16,\n mlp=[32, 32,\n 64],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer1',\n knn=knn)\n end_points['l1_points_f2'] = l1_points_f2\n end_points['l1_xyz_f2'] = l1_indices_f2\n end_points['l1_input_f2'] = l0_xyz_f2\n # Tensor(\"sa1/layer1_1/GatherPoint:0\", shape=(16, 1024, 3), dtype=float32, device= / device: GPU:0)\n # Tensor(\"sa1/layer1_1/Squeeze:0\", shape=(16, 1024, 64), dtype=float32, device= / device: GPU:0)\n # Tensor(\"sa1/layer1_1/QueryBallPoint:0\", shape=(16, 1024, 16), dtype=int32, device= / device: GPU:0)\n\n\n # Frame 2, Layer 2(with radius = 1.0), input are of the above function's output\n l2_xyz_f2, l2_points_f2, l2_indices_f2 = pointnet_sa_module(l1_xyz_f2,\n l1_points_f2,\n npoint=256,\n radius=RADIUS2,\n nsample=16,\n mlp=[64, 64,\n 128],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer2',\n knn=knn)\n end_points['l2_points_f2'] = l2_points_f2\n end_points['l2_xyz_f2'] = l2_indices_f2\n end_points['l2_input_f2'] = l1_xyz_f2\n\n\n # Tensor(\"sa1/layer2_1/GatherPoint:0\", shape=(16, 256, 3), dtype=float32, device= / device: GPU:0)\n # Tensor(\"sa1/layer2_1/Squeeze:0\", shape=(16, 256, 128), dtype=float32, device= / device: GPU:0)\n # Tensor(\"sa1/layer2_1/QueryBallPoint:0\", shape=(16, 256, 16), dtype=int32, device= / device: GPU:0)\n\n # POINT MIXTURE\n # embedding layer\n # radius = 1, 10, 50\n if flow_module == 'default':\n _, l2_points_f1_new = flow_embedding_module(l2_xyz_f1, l2_xyz_f2,\n l2_points_f1, l2_points_f2,\n radius=radius, nsample=64,\n mlp=[128, 128, 128],\n is_training=is_training,\n bn_decay=bn_decay,\n scope='flow_embedding', bn=True,\n pooling='max', knn=True,\n corr_func='concat')\n end_points['l2_points_f1_new'] = l2_points_f1_new\n elif flow_module == 'all':\n _, l2_points_f1_new = flow_embedding_module_all(l2_xyz_f1, l2_xyz_f2,\n l2_points_f1, l2_points_f2,\n radius=radius, nsample=256,\n mlp=[128, 128, 128],\n is_training=is_training,\n bn_decay=bn_decay,\n scope='flow_embedding', bn=True,\n pooling='max', knn=True,\n corr_func='concat')\n end_points['l2_points_f1_new'] = l2_points_f1_new\n\n # setconv layer\n # Layer 3 with radius = 2.0\n l3_xyz_f1, l3_points_f1, l3_indices_f1 = pointnet_sa_module(l2_xyz_f1,\n l2_points_f1_new,\n npoint=64,\n radius=RADIUS3,\n nsample=8,\n mlp=[128, 128,\n 256],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer3')\n end_points['l3_indices_f1'] = l3_indices_f1\n end_points['l3_xyz_f1'] = l3_points_f1\n # Tensor(\"layer3/GatherPoint:0\", shape=(16, 64, 3), dtype=float32, device=/device:GPU:0)\n # Tensor(\"layer3/Squeeze:0\", shape=(16, 64, 256), dtype=float32, device=/device:GPU:0)\n # Tensor(\"layer3/QueryBallPoint:0\", shape=(16, 64, 8), dtype=int32, device=/device:GPU:0)\n\n # Layer 4 with radius = 4.0\n l4_xyz_f1, l4_points_f1, l4_indices_f1 = pointnet_sa_module(l3_xyz_f1,\n l3_points_f1,\n npoint=16,\n radius=RADIUS4,\n nsample=8,\n mlp=[256, 256,\n 512],\n mlp2=None,\n group_all=False,\n is_training=is_training,\n bn_decay=bn_decay,\n scope='layer4')\n end_points['l4_indices_f1'] = l4_indices_f1\n end_points['l4_xyz_f1'] = l4_points_f1\n # Tensor(\"layer4/GatherPoint:0\", shape=(16, 16, 3), dtype=float32, device=/device:GPU:0)\n # Tensor(\"layer4/Squeeze:0\", shape=(16, 16, 512), dtype=float32, device=/device:GPU:0)\n # Tensor(\"layer4/QueryBallPoint:0\", shape=(16, 16, 8), dtype=int32, device=/device:GPU:0)\n\n ### FLOW REFINEMENT MODULE\n # Feature Propagation\n # Frame 1, l1->l2; l2->l3; l3->l4\n l3_feat_f1 = set_upconv_module(l3_xyz_f1, l4_xyz_f1, l3_points_f1,\n l4_points_f1, nsample=8, radius=2.4, mlp=[],\n mlp2=[256, 256], scope='up_sa_layer1',\n is_training=is_training, bn_decay=bn_decay,\n knn=True)\n end_points['l3_feat_f1'] = l3_feat_f1\n\n l2_feat_f1 = set_upconv_module(l2_xyz_f1, l3_xyz_f1, tf.concat(axis=-1,\n values=[\n l2_points_f1,\n l2_points_f1_new]),\n l3_feat_f1, nsample=8, radius=1.2,\n mlp=[128, 128, 256], mlp2=[256],\n scope='up_sa_layer2',\n is_training=is_training, bn_decay=bn_decay,\n knn=True)\n end_points['l2_feat_f1'] = l2_feat_f1\n\n l1_feat_f1 = set_upconv_module(l1_xyz_f1, l2_xyz_f1, l1_points_f1,\n l2_feat_f1, nsample=8, radius=0.6,\n mlp=[128, 128, 256], mlp2=[256],\n scope='up_sa_layer3',\n is_training=is_training, bn_decay=bn_decay,\n knn=True)\n end_points['l1_feat_f1'] = l1_feat_f1\n\n if layer == 'pointnet':\n l0_feat_f1 = pointnet_fp_module(l0_xyz_f1, l1_xyz_f1, l0_points_f1,\n l1_feat_f1, [256, 256], is_training,\n bn_decay, scope='fa_layer4')\n else:\n l0_feat_f1 = set_upconv_module(l0_xyz_f1, l1_xyz_f1, l0_points_f1,\n l1_feat_f1, nsample=8, radius=0.3,\n mlp=[128,128,256], mlp2=[256],\n scope='up_sa_layer4',\n is_training=is_training, bn_decay=bn_decay,\n knn=True)\n end_points['l0_feat_f1'] = l0_feat_f1\n\n # FC layers\n net = tf_util.conv1d(l0_feat_f1, 128, 1, padding='VALID', bn=True,\n is_training=is_training, scope='fc1',\n bn_decay=bn_decay)\n\n end_points['net1'] = net\n net = tf_util.conv1d(net, 3, 1, padding='VALID', activation_fn=None,\n scope='fc2')\n\n end_points['net'] = net\n return net, end_points\n\n\ndef huber_loss(error, delta):\n abs_error = tf.abs(error)\n quadratic = tf.minimum(abs_error, delta)\n linear = (abs_error - quadratic)\n losses = 0.5 * quadratic ** 2 + delta * linear\n return tf.reduce_mean(losses)\n\n\ndef get_loss(pred, label):\n \"\"\" pred: BxNx3,\n label: BxNx3,\n mask: BxN\n \"\"\"\n batch_size = pred.get_shape()[0].value\n num_point = pred.get_shape()[1].value\n l2_loss = tf.reduce_mean(\n tf.reduce_sum((pred - label) * (pred - label), axis=2) / 2.0)\n tf.summary.scalar('l2 loss', l2_loss)\n tf.add_to_collection('losses', l2_loss)\n return l2_loss\n\n\ndef get_cycle_loss(pred_f, grouped_xyz, pred_b, point_cloud1, end_points=None,\n rigidity=False, rgb=False, point_cloud1_rgb=None, flip_prefix='', cycle_loss_weight=1,\n knn_loss_weight=1):\n\n end_points_loss = {}\n\n knn_l2_loss = knn_loss_weight*tf.reduce_mean(\n tf.reduce_sum((pred_f - grouped_xyz) * (pred_f - grouped_xyz), axis=2) / 2.0)\n tf.summary.scalar('{}KNN L2 loss'.format(flip_prefix), knn_l2_loss)\n tf.add_to_collection('{}KNN losses'.format(flip_prefix), knn_l2_loss)\n\n end_points_loss['knn_l2_loss'] = knn_l2_loss\n\n cycle_l2_loss = cycle_loss_weight*tf.reduce_mean(\n tf.reduce_sum((pred_b - point_cloud1) * (pred_b - point_cloud1), axis=2) / 2.0)\n tf.summary.scalar('{}Cycle l2 loss'.format(flip_prefix), cycle_l2_loss)\n tf.add_to_collection('{}Cycle losses'.format(flip_prefix), cycle_l2_loss)\n\n end_points_loss['cycle_l2_loss'] = cycle_l2_loss\n\n l2_loss = knn_l2_loss + cycle_l2_loss\n\n avg_distance_metric = tf.reduce_mean(\n tf.reduce_sum((pred_f - grouped_xyz) * (pred_f - grouped_xyz), axis=2) ** 0.5)\n tf.summary.scalar('{}Avg Distance Metric loss'.format(flip_prefix), avg_distance_metric)\n tf.add_to_collection('{}Avg Distance Metric losses'.format(flip_prefix), avg_distance_metric)\n\n if rigidity:\n rigid_group_flow = end_points['rigid_group_flow']\n rigid_pc1_flow = tf.expand_dims(end_points['rigid_pc1_flow'], 2)\n\n rigidity_loss = tf.reduce_mean(\n tf.reduce_sum((rigid_group_flow - rigid_pc1_flow) * (rigid_group_flow - rigid_pc1_flow),\n axis=3) / 2.0)\n tf.summary.scalar('{}Rigidity loss'.format(flip_prefix), rigidity_loss)\n tf.add_to_collection('{}Rigidity losses'.format(flip_prefix), rigidity_loss)\n\n end_points_loss['rigidity_loss'] = rigidity_loss\n\n l2_loss = l2_loss + rigidity_loss\n\n if rgb:\n pred_f_rgb = end_points['pred_f_rgb']\n rgb_loss_f = 10*tf.reduce_mean(\n tf.reduce_sum((pred_f_rgb - point_cloud1_rgb) * (pred_f_rgb - point_cloud1_rgb), axis=2) / 2.0)\n\n end_points_loss['rgb_loss_f'] = rgb_loss_f\n\n pred_b_rgb = end_points['pred_b_rgb']\n rgb_loss_b = 10*tf.reduce_mean(\n tf.reduce_sum((pred_b_rgb - point_cloud1_rgb) * (pred_b_rgb - point_cloud1_rgb), axis=2) / 2.0)\n\n end_points_loss['rgb_loss_b'] = rgb_loss_b\n\n rgb_loss = rgb_loss_f + rgb_loss_b\n tf.summary.scalar('{}RGB Loss Forward'.format(flip_prefix), rgb_loss_f)\n tf.add_to_collection('{}RGB Loss Forward'.format(flip_prefix), rgb_loss_f)\n\n tf.summary.scalar('{}RGB Loss Backward'.format(flip_prefix), rgb_loss_b)\n tf.add_to_collection('{}RGB Loss Backward'.format(flip_prefix), rgb_loss_b)\n\n tf.summary.scalar('{}RGB Loss'.format(flip_prefix), rgb_loss)\n tf.add_to_collection('{}RGB Loss'.format(flip_prefix), rgb_loss)\n\n end_points_loss['rgb_loss'] = rgb_loss\n l2_loss = l2_loss + rgb_loss\n\n end_points_loss['l2_loss'] = l2_loss\n tf.summary.scalar('{}Total l2 loss'.format(flip_prefix), l2_loss)\n tf.add_to_collection('{}Total losses'.format(flip_prefix), l2_loss)\n\n return l2_loss, end_points_loss\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n inputs = tf.zeros((32, 1024 * 2, 6))\n outputs = get_model(inputs, tf.constant(True))\n print(outputs)\n" ]
[ [ "tensorflow.abs", "tensorflow.zeros", "tensorflow.minimum", "tensorflow.assign", "tensorflow.concat", "tensorflow.summary.scalar", "tensorflow.expand_dims", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.squeeze", "tensorflow.placeholder", "tensorflow.get_variable_scope", "tensorflow.reduce_sum", "tensorflow.reduce_mean", "tensorflow.add_to_collection" ] ]
JB-MS/pynumpress
[ "50aef00157033cfd1c6e61fcde5d898abfb3c667" ]
[ "setup.py" ]
[ "import os\nimport platform\nfrom setuptools import setup, Extension, find_packages\nimport pip\n\ntry:\n import numpy as np\nexcept ImportError:\n pip.main(['install', 'numpy'])\n import numpy as np\n\nextra_compile_args = []\nif platform.system().lower() == 'windows':\n # This may fail if compiled on Windows with a compiler\n # that doesn't provide a cl.exe compatability frontend\n # like mingw\n extra_compile_args = ['/EHsc']\n\ntry:\n from Cython.Build import cythonize\nexcept:\n pip.main(['install', 'cython'])\n\ntry:\n from Cython.Build import cythonize\n ext_modules = [\n Extension(\n \"pynumpress.pynumpress\", [\n os.path.join('pynumpress/pynumpress.pyx'),\n os.path.join('pynumpress/MSNumpress.cpp'),\n ],\n language='c++',\n extra_compile_args=extra_compile_args,\n include_dirs=[np.get_include()]\n )\n ]\n ext_modules = cythonize(ext_modules)\nexcept ImportError:\n ext_modules = [\n Extension(\n \"pynumpress.pynumpress\", [\n os.path.join('pynumpress/pynumpress.cpp'),\n os.path.join('pynumpress/MSNumpress.cpp'),\n ],\n language='c++',\n extra_compile_args=extra_compile_args,\n include_dirs=[np.get_include()]\n )\n ]\n\n\nsetup(\n name=\"pynumpress\",\n packages=find_packages(),\n version='0.0.2',\n install_requires=['numpy'],\n include_dirs=[np.get_include()],\n ext_modules=ext_modules)\n" ]
[ [ "numpy.get_include" ] ]
awublack/opticspy
[ "3049e89f89cc7d6285c45daed8d436b548b3e38d" ]
[ "zernike_rec.py" ]
[ "from __future__ import division as __division__\nimport numpy as __np__\nfrom numpy import cos as __cos__\nfrom numpy import sin as __sin__\nfrom numpy import sqrt as __sqrt__\nfrom numpy import arctan2 as __arctan2__\nimport matplotlib.pyplot as __plt__\nfrom matplotlib import cm as __cm__\nfrom matplotlib.ticker import LinearLocator as __LinearLocator__\nfrom matplotlib.ticker import FormatStrFormatter as __FormatStrFormatter__\nfrom numpy.fft import fftshift as __fftshift__\nfrom numpy.fft import ifftshift as __ifftshift__\nfrom numpy.fft import fft2 as __fft2__\nfrom numpy.fft import ifft2 as __ifft2__\nfrom . import tools as __tools__\n\n\nclass Coefficient(object):\n\t\"\"\"\n\tReturn a set of Orthonormal Rectangular Polynomials For Rectangle aperture\n\n\tReference: Mahajan, Virendra N., and Guang-ming Dai. \n\t\"Orthonormal polynomials in wavefront analysis: analytical \n\tsolution.\" JOSA A 24.9 (2007): 2994-3016.\n\t\"\"\"\n\t__coefficients__ = []\n\t__a__ = 1/__sqrt__(2)\n\t__zernikelist__ = []\n\n\tdef __init__(self, a = __a__,\\\n\t\t\tR1=0, R2=0, R3=0, R4=0, R5=0, R6=0, R7=0, R8=0, \\\n\t\t\tR9=0, R10=0, R11=0, R12=0, R13=0, R14=0, R15=0):\n\t\tif type(R1) == list:\n\t\t\tself.__coefficients__ = R1 + [0]*(15-len(R1))\n\t\t\tself.__a__ = a\n\t\telse:\n\t\t\tself.__coefficients__ = [R1, R2, R3, R4, R5, R6, R7, \n\t\t\t\t\tR8, R9, R10, R11, R12, R13, R14, R15]\n\t\t\tself.__a__ = a\n\tdef outputcoefficient(self):\n\t\treturn [self.__a__,self.__coefficients__]\n\n\tdef zernikesurface(self):\n\t\t\"\"\"\n\t\t------------------------------------------------\n\t\tzernikesurface(self, label_1 = True):\n\n\t\tReturn a 3D Zernike Polynomials surface figure\n\n\t\tlabel_1: default show label\n\n\t\t------------------------------------------------\n\t\t\"\"\"\n\t\ta = self.__a__\n\t\tb = __sqrt__(1-a**2)\n\t\tx1 = __np__.linspace(-a, a, 50)\n\t\ty1 = __np__.linspace(-b, b, 50)\n\t\t[X,Y] = __np__.meshgrid(x1,y1)\n\t\tZ = __zernikecartesian__(self.__coefficients__,a,X,Y)\n\t\tfig = __plt__.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca(projection='3d')\n\t\tsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=__cm__.RdYlGn,\n\t linewidth=0, antialiased=False, alpha = 0.6)\n\n\t\tax.auto_scale_xyz([-1, 1], [-1, 1], [Z.max(), Z.min()])\n\t\t# ax.set_xlim(-a, a)\n\t\t# ax.set_ylim(-b, b)\n\t\t# v = max(abs(Z.max()),abs(Z.min()))\n\t\t# ax.set_zlim(-v*5, v*5)\n\t\t# cset = ax.contourf(X, Y, Z, zdir='z', offset=-v*5, cmap=__cm__.RdYlGn)\n\n\t\t# ax.zaxis.set_major_locator(__LinearLocator__(10))\n\t\t# ax.zaxis.set_major_formatter(__FormatStrFormatter__('%.02f'))\n\t\tfig.colorbar(surf, shrink=1, aspect=30)\n\n\t\t# p2v = round(__tools__.peak2valley(Z),5)\n\t\t# rms1 = round(__tools__.rms(Z),5)\n\t\t__plt__.show()\n\tdef zernikemap(self):\n\t\ta = self.__a__\n\t\tb = __sqrt__(1-a**2)\n\t\tx1 = __np__.linspace(-a, a, 100)\n\t\ty1 = __np__.linspace(-b, b, 100)\n\t\t[X,Y] = __np__.meshgrid(x1,y1)\n\t\tZ = __zernikecartesian__(self.__coefficients__,a,X,Y)\n\t\tfig = __plt__.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca()\n\t\tim = __plt__.pcolormesh(X, Y, Z, cmap=__cm__.RdYlGn)\n\t\t__plt__.colorbar()\n\t\tax.set_aspect('equal', 'datalim')\n\t\t__plt__.show()\n\n\t\treturn 0\n\n\tdef __psfcaculator__(self,lambda_1=632*10**(-9),z=0.1):\n\t\t\"\"\"\n\t\theight: Exit pupil height\n\t\twidth: Exit pupil width\n\t\tz: Distance from exit pupil to image plane\n\t\t\"\"\"\n\t\ta = self.__a__\n\t\tb = __sqrt__(1-a**2)\n\t\tl1 = 100;\n\t\tx1 = __np__.linspace(-a, a, l1)\n\t\ty1 = __np__.linspace(-b, b, l1)\n\t\t[X,Y] = __np__.meshgrid(x1,y1)\n\t\tZ = __zernikecartesian__(self.__coefficients__,a,X,Y)\n\t\td = 400 # background\n\t\tA = __np__.zeros([d,d])\n\t\tA[d/2-l1/2+1:d/2+l1/2+1,d/2-l1/2+1:d/2+l1/2+1] = Z\n\t\t# fig = __plt__.figure()\n\t\t# __plt__.imshow(A)\n\t\t# __plt__.colorbar()\n\t\t# __plt__.show()\n\t\tabbe = __np__.exp(-1j*2*__np__.pi*A)\n\t\tfor i in range(len(abbe)):\n\t\t\tfor j in range(len(abbe)):\n\t\t\t\tif abbe[i][j]==1:\n\t\t\t\t\tabbe[i][j]=0\n\t\tPSF = __fftshift__(__fft2__(__fftshift__(abbe)))**2\n\t\tPSF = PSF/PSF.max()\n\t\treturn PSF\n\n\tdef psf(self,lambda_1=632*10**(-9),z=0.1):\n\t\t\"\"\"\n\t\t------------------------------------------------\n\t\tpsf()\n\n\t\tReturn the point spread function of a wavefront described by\n\t\tOrthonormal Rectangular Polynomials\n\t\t------------------------------------------------\n\t\tInput: \n\n\t\tr: exit pupil radius(mm)\n\n\t\tlambda_1: wavelength(m)\n\n\t\tz: exit pupil to image plane distance(m)\n\n\t\t\"\"\"\n\t\tPSF = self.__psfcaculator__(lambda_1=lambda_1,z=z)\n\t\tfig = __plt__.figure(figsize=(9, 6), dpi=80)\n\t\t__plt__.imshow(abs(PSF),cmap=__cm__.RdYlGn)\n\t\t__plt__.colorbar()\n\t\t__plt__.show()\n\t\treturn 0\n\n\tdef mtf(self,lambda_1=632*10**(-9),z=0.1,matrix = False):\n\t\t\"\"\"\n\t\tModulate Transfer function\n\t\t\"\"\"\n\t\tPSF = self.__psfcaculator__(lambda_1=lambda_1,z=z)\n\t\tMTF = __fftshift__(__fft2__(PSF))\n\t\tMTF = MTF/MTF.max()\n\t\tfig = __plt__.figure(figsize=(9, 6), dpi=80)\n\t\t__plt__.imshow(abs(MTF),cmap=__cm__.bwr)\n\t\t__plt__.colorbar()\n\t\t__plt__.show()\n\t\tif matrix == True:\n\t\t\treturn MTF\n\t\telse:\n\t\t\treturn 0\n\n\tdef ptf(self):\n\t\t\"\"\"\n\t\tPhase transfer function\n\t\t\"\"\"\n\t\tPSF = self.__psfcaculator__()\n\t\tPTF = __fftshift__(__fft2__(PSF))\n\t\tPTF = __np__.angle(PTF)\n\t\tl1 = 100\n\t\td = 400\n\t\tA = __np__.zeros([d,d])\n\t\tA[d/2-l1/2+1:d/2+l1/2+1,d/2-l1/2+1:d/2+l1/2+1] = PTF[d/2-l1/2+1:d/2+l1/2+1,d/2-l1/2+1:d/2+l1/2+1]\n\t\t__plt__.imshow(abs(A),cmap=__cm__.rainbow)\n\t\t__plt__.colorbar()\n\t\t__plt__.show()\n\t\treturn 0\n\ndef __zernikepolar__(coefficient,a,r,u):\n\t\"\"\"\n\t------------------------------------------------\n\t__zernikepolar__(coefficient,r,u):\n\n\tReturn combined aberration\n\n\tOrthonormal Rectangle Aperture Polynomials Caculation in polar coordinates\n\n\tcoefficient: Orthonormal Rectangle Aperture Polynomials Coefficient from input\n\tr: rho in polar coordinates\n\tu: theta in polar coordinates\n\t------------------------------------------------\n\t\"\"\"\n\tmu = __sqrt__(9-36*a**2+103*a**4-134*a**6+67*a**6+67*a**8)\n\tv = __sqrt__(49-196*a**2+330*a**4-268*a**6+134*a**8)\n\ttau = 1/(128*v*a**4*(1-a**2)**2)\n\teta = 9-45*a**2+139*a**4-237*a**6+210*a**8-67*a**10\n\n\tR = [0]+coefficient\n\tR1 = R[1] * 1 \n\tR2 = R[2] * __sqrt__(3)/a*r*__cos__(u)\n\tR3 = R[3] * __sqrt__(3/(1-a**2))*r*__sin__(u)\n\tR4 = R[4] * __sqrt__(5)/2/__sqrt__(1-2*a**2+2*a**4)*(3*r**2-1)\n\tR5 = R[5] * 3/2/a/__sqrt__(1-a**2)*r**2*__sin__(2*u)\n\tR6 = R[6] * __sqrt__(5)/2/a**2/(1-a**2)/__sqrt__(1-2*a**2+2*a**4)*\\\n\t\t\t\t\t(3*(1-2*a**2+2*a**4)*r**2*__cos__(2*u)+3*(1-2*a**2)*r**2-\\\n\t\t\t\t\t2*a**2*(1-a**2)*(1-2*a**2))\n\tR7 = R[7] * __sqrt__(21)/2/__sqrt__(27-81*a**2+116*a**4-62*a**6)*\\\n\t\t\t\t\t(15*r**2-9+4*a**2)*r*__sin__(u)\n\tR8 = R[8] * __sqrt__(21)/2/a/__sqrt__(35-70*a**2+62*a**4)*\\\n\t\t\t\t\t(15*r**2-5-4*a**2)*r*__cos__(u)\n\tR9 = R[9] * (__sqrt__(5)*__sqrt__((27-54*a**2+62*a**4)/(1-a**2))/\\\n\t\t\t\t\t(8*a**2*(27-81*a**2+116*a**4-62*a**6)))*((27-54*a**2+62*a**4)*\\\n\t\t\t\t\tr*__sin__(3*u)-3*(4*a**2*(3-13*a**2+10*a**4)-(9-18*a**2-26*a**4))\\\n\t\t\t\t\t*r*__sin__(u))\n\tr1 = 35-70*a**2+62*a**4\n\tR10 = R[10] * (__sqrt__(5)/(8*a**3*(1-a**2)*__sqrt__(r1)))*((r1)*r**3*__cos__(3*u)-\\\n\t\t\t\t\t3*(4*a**2*(7-17*a**2+10*a**4)-(r1)*r**2)*r*__cos__(u))\n\tR11 = R[11] * 1/8/mu*(315*r**4+30*(1-2*a**2)*r**2*__cos__(2*u)-240*r**2+27+16*a*2-16*a**4)\n\tR12 = R[12] * (3*mu/(8*a**2*v*eta))*(315*(1-2*a**2)*(1-2*a**2+2*a**4)*r**4+\\\n\t\t\t\t\t5*(7*mu**2*r**2-21+72*a**2-225*a**4+306*a**6-152*a**8)*r**2*__cos__(2*u)-\\\n\t\t\t\t\t15*(1-2*a**2)*(7+4*a**2-71*a**4+134*a**6-67*a**8)*r**2+\\\n\t\t\t\t\t\ta**2*(1-a**2)*(1-2*a**2)*(70-233*a**2+233*a**4))\n\tR13 = R[13] * __sqrt__(21)/(4*a*__sqrt__(1-3*a**2+4*a**4-2*a**6))*(5*r**2-3)*r**2*__sin__(2*u)\n\tR14 = R[14] * 6*tau*(5*v**2*r**4*__cos__(4*u)-20*(1-2*a**2)*(6*a**2*(7-16*a**2+18*a**4-9*a**6)-\\\n\t\t\t\t\t49*(1-2*a**2+2*a**4)*r**2)*r**2*__cos__(u)+8*a**4*(1-a**2)**2*(21-62*a**2+62*a**4)-\\\n\t\t\t\t\t120*a**2*(7-30*a**2+46*a**4-23*a**6)*r**2+\\\n\t\t\t\t\t15*(49-196*a**2+282*a**4-172*a**6+86*a**8)*r**4)\n\tR15 = R[15] * (__sqrt__(21)/(8*a**3*__sqrt__((1-a**2)**3))/__sqrt__(1-2*a**2+2*a**4))*\\\n\t\t\t\t\t(-(1-2*a**2)*(6*a**2-6*a**4-5*r**2)*r**2*__sin__(2*u)+\\\n\t\t\t\t\t(5/2)*(1-2*a**2+2**a**4)*r**4*__sin__(4*u))\n\tRW = \tR1 + R2 + R3+ R4+ R5+ R6+ R7+ R8+ R9+ \\\n\t\t\tR10+ R11+ R12+ R13+ R14+ R15\n\treturn RW\n\ndef __zernikecartesian__(coefficient,a,x,y):\n\t\"\"\"\n\t------------------------------------------------\n\t__zernikecartesian__(coefficient,a,x,y):\n\n\tReturn combined aberration\n\n\tOrthonormal Rectangle Aperture Polynomials Caculation for \n\tRectangle aperture in Cartesian coordinates\n\n\tcoefficient: Zernike Polynomials Coefficient from input\n\ta: 1/2 aperture width in a circle(See reference)\n\tx: x in Cartesian coordinates\n\ty: y in Cartesian coordinates\n\t------------------------------------------------\n\t\"\"\"\n\tmu = __sqrt__(9-36*a**2+103*a**4-134*a**6+67*a**6+67*a**8)\n\tv = __sqrt__(49-196*a**2+330*a**4-268*a**6+134*a**8)\n\ttau = 1/(128*v*a**4*(1-a**2)**2)\n\teta = 9-45*a**2+139*a**4-237*a**6+210*a**8-67*a**10\n\tr = x**2+y**2\n\n\tR = [0]+coefficient\n\tR1 = R[1] * 1 \n\tR2 = R[2] * __sqrt__(3)/a*x\n\tR3 = R[3] * __sqrt__(3/(1-a**2))*y\n\tR4 = R[4] * __sqrt__(5)/2/__sqrt__(1-2*a**2+2*a**4)*(3*r**2-1)\n\tR5 = R[5] * 3/a/__sqrt__(1-a**2)*x*y\n\tR6 = R[6] * __sqrt__(5)/4/a**2/(1-a**2)/__sqrt__(1-2*a**2+2*a**4)*\\\n\t\t\t\t\t(3*(1-a**2)**2*x**2-3*a**4*y**2-a*82*(1-3*a**2+2*a**4))\n\tR7 = R[7] * __sqrt__(21)/2/__sqrt__(27-81*a**2+116*a**4-62*a**6)*\\\n\t\t\t\t\t(15*r**2-9+4*a**2)*y\n\tR8 = R[8] * __sqrt__(21)/2/a/__sqrt__(35-70*a**2+62*a**4)*\\\n\t\t\t\t\t(15*r**2-5-4*a**2)*x\n\tR9 = R[9] * (__sqrt__(5)*__sqrt__((27-54*a**2+62*a**4)/(1-a**2))/\\\n\t\t\t\t\t(2*a**2*(27-81*a**2+116*a**4-62*a**6)))*(27*(1-a**2)**2*x**2-\\\n\t\t\t\t\t35*a**4*y**2-a**2*(9-39*a**2+30*a**4))*y\n\tr1 = 35-70*a**2+62*a**4\n\tR10 = R[10] * (__sqrt__(5)/(2*a**3*(1-a**2)*__sqrt__(r1)))*(35*(1-a**2)**2*x**2-\\\n\t\t\t\t\t27*a**4*y**2-a**2*(21-51*a**2+30*a**4))*x\n\tR11 = R[11] * 1/8/mu*(315*r**4+30*(7+2*a**2)*x**2-30*(9-2*a**2)*y**2+27+16*a**2-16*a**4)\n\n\tR12 = R[12] * (3*mu/(8*a**2*v*eta))*(35*(1-a**2)**2*(18-36*a**2+67*a**4)*x**4+\\\n\t\t\t\t\t630*(1-2*a**2)*(1-2*a**2+2*a**4)*x**2*y**2-35*a**4*(49-98*a**2+67*a**4)*y**4-\\\n\t\t\t\t\t30*(1-a**2)*(7-10*a**2-12*a**4+75*a**6-67*a**8)*x**2-\\\n\t\t\t\t\t30*a**2*(7-77*a**2+189*a**4-193*a**6+67*a**8)*y**2+\\\n\t\t\t\t\ta**2*(1-a**2)*(1-2*a**2)*(70-233*a**2+233*a**4))\n\tR13 = R[13] * __sqrt__(21)/(2*a*__sqrt__(1-3*a**2+4*a**4-2*a**6))*(5*r**2-3)*x*y\n\tR14 = R[14] * 16*tau*(735*(1-a**2)**4*x**4-540*a**4*(1-a**2)**2*x**2*y**2+735*a**8*y**4-\\\n\t\t\t\t\t90*a**2*(1-a**2)**3*(7-9*a**2)*x**2+90*a**6*(1-a**2)*(2-9*a**2)*y**2+\\\n\t\t\t\t\t+3*a**4*(1-a**2)**2*(21-62*a**2+62*a**4))\n\tR15 = R[15] * __sqrt__(21)/(2*a**3*(1-a**2)*__sqrt__(1-3*a**2+4*a**4-2*a**6))*\\\n\t\t\t\t\t(5*(1-a**2)**2*x**2-5*a**4*y**2-a**2*(3-9*a**2+6*a**4))*x*y\n\n\tRW = \tR1 + R2 + R3+ R4+ R5+ R6+ R7+ R8+ R9+ \\\n\t\t\tR10+ R11+ R12+ R13+ R14+ R15\n\treturn RW\n\n\n\n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.pcolormesh", "numpy.angle", "numpy.sin", "numpy.zeros", "numpy.fft.fft2", "numpy.exp", "matplotlib.pyplot.figure", "numpy.sqrt", "numpy.cos", "numpy.fft.fftshift", "matplotlib.pyplot.show", "numpy.linspace", "numpy.meshgrid" ] ]
ElliotMunro200/reinforcement_learning_an_introduction
[ "a0ac9e5da6eaeae14d297a560c499d1a6e579c2a" ]
[ "code/exercises/ex_5_10/run.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n--------------------------------\nproject: code\ncreated: 11/06/2018 18:00\n---------------------------------\n\nI added a crash penalty of -100 to deter the agent from\nrunning into walls to end up at favourable starting positions.\n\nAlso, the algorithm doesn't seem to converge well with the noise on.\n\n\"\"\"\nimport pickle\nimport os\n\nimport numpy as np\n\nimport constants as c\n\nfrom exercises.ex_5_10.model import Brain, Car, RaceTrack, run_episode, train\nfrom exercises.ex_5_10.utils import load_track\n\n\nif __name__ == \"__main__\":\n\n TRACK_NAME = \"track_2\"\n eps_start = 0.1\n\n folder = os.path.join(\n c.Paths.input,\n 'ex_5_10'\n )\n\n track_indices = load_track(\n path=os.path.join(folder, f\"{TRACK_NAME}.csv\"),\n track_flag=0,\n start_flag=2,\n finish_flag=3\n )\n\n racetrack = RaceTrack(\n noise_level=None, #0.1,\n crash_penalty=-1000,\n **track_indices\n )\n car = Car(None, 5, 1)\n\n brain = Brain(\n car,\n racetrack,\n epsilon=eps_start,\n random_state=np.random.RandomState(seed=123)\n )\n\n print(racetrack)\n\n # initialise the policy with random runs\n brain.epsilon = 1.\n for i in range(3):\n car.set_policy(\n brain.epsilon_greedy_policy()\n )\n g = train(brain, car, racetrack)\n print(\"------------------------------------------------------\")\n print(f\"Finished random policy episode set {i}\")\n print(f\"Epsilon = {brain.epsilon}\")\n print(f\"Average Return: {g}\")\n print(\"------------------------------------------------------\")\n print(\"\\n\")\n\n brain.epsilon = eps_start\n returns = list()\n training_epsilons = list()\n n_runs = 20\n for i in range(n_runs):\n car.set_policy(\n brain.epsilon_greedy_policy()\n )\n g = train(brain, car, racetrack)\n returns.append(g)\n training_epsilons.append(brain.epsilon)\n print(\"------------------------------------------------------\")\n print(f\"Finished episode set {i}\")\n print(f\"Epsilon = {brain.epsilon}\")\n print(f\"Average Return: {g}\")\n print(\"------------------------------------------------------\")\n print(\"\\n\")\n # brain.epsilon -= eps_start / n_runs\n\n greedy_episodes = dict()\n print(\"\\n\")\n racetrack.set_noise_level(None)\n car.set_policy(brain.greedy_policy())\n for pos in racetrack.start_positions:\n greedy_episode = run_episode(\n car,\n racetrack,\n start_position=pos\n )\n print(f\"Greedy Episode: starting at {pos}\")\n print(f\"Return: {sum(greedy_episode.rewards)}\")\n racetrack.print_episode(greedy_episode)\n greedy_episodes[pos] = greedy_episode\n\n info = dict(\n track_name=TRACK_NAME,\n returns=returns,\n training_epsilons=training_epsilons,\n greedy_episodes=greedy_episodes\n )\n\n with open(os.path.join(c.Paths.output, 'ex_5_10', f'{TRACK_NAME}.pkl'), 'wb') as f:\n pickle.dump(info, f)\n\n" ]
[ [ "numpy.random.RandomState" ] ]
sunchang0124/cgans
[ "99ae3d572897546a100860f9674b2c79201ec59a" ]
[ "dp_cgans/base.py" ]
[ "\"\"\"Base Class for tabular models.\"\"\"\n\nimport logging\nimport pickle\nimport uuid\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom dp_cgans.errors import ConstraintsNotMetError\nfrom dp_cgans.metadata import Table\n\nLOGGER = logging.getLogger(__name__)\nCOND_IDX = str(uuid.uuid4())\n\n\nclass NonParametricError(Exception):\n \"\"\"Exception to indicate that a model is not parametric.\"\"\"\n\n\nclass BaseTabularModel:\n \"\"\"Base class for all the tabular models.\n\n The ``BaseTabularModel`` class defines the common API that all the\n TabularModels need to implement, as well as common functionality.\n\n Args:\n field_names (list[str]):\n List of names of the fields that need to be modeled\n and included in the generated output data. Any additional\n fields found in the data will be ignored and will not be\n included in the generated output.\n If ``None``, all the fields found in the data are used.\n field_types (dict[str, dict]):\n Dictinary specifying the data types and subtypes\n of the fields that will be modeled. Field types and subtypes\n combinations must be compatible with the SDV Metadata Schema.\n field_transformers (dict[str, str]):\n Dictinary specifying which transformers to use for each field.\n Available transformers are:\n\n * ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``.\n * ``float``: Uses a ``NumericalTransformer`` of dtype ``float``.\n * ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise.\n * ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise.\n * ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``.\n * ``label_encoding``: Uses a ``LabelEncodingTransformer``.\n * ``boolean``: Uses a ``BooleanTransformer``.\n * ``datetime``: Uses a ``DatetimeTransformer``.\n\n anonymize_fields (dict[str, str]):\n Dict specifying which fields to anonymize and what faker\n category they belong to.\n primary_key (str):\n Name of the field which is the primary key of the table.\n constraints (list[Constraint, dict]):\n List of Constraint objects or dicts.\n table_metadata (dict or metadata.Table):\n Table metadata instance or dict representation.\n If given alongside any other metadata-related arguments, an\n exception will be raised.\n If not given at all, it will be built using the other\n arguments or learned from the data.\n rounding (int, str or None):\n Define rounding scheme for ``NumericalTransformer``. If set to an int, values\n will be rounded to that number of decimal places. If ``None``, values will not\n be rounded. If set to ``'auto'``, the transformer will round to the maximum number\n of decimal places detected in the fitted data. Defaults to ``'auto'``.\n min_value (int, str or None):\n Specify the minimum value the ``NumericalTransformer`` should use. If an integer\n is given, sampled data will be greater than or equal to it. If the string ``'auto'``\n is given, the minimum will be the minimum value seen in the fitted data. If ``None``\n is given, there won't be a minimum. Defaults to ``'auto'``.\n max_value (int, str or None):\n Specify the maximum value the ``NumericalTransformer`` should use. If an integer\n is given, sampled data will be less than or equal to it. If the string ``'auto'``\n is given, the maximum will be the maximum value seen in the fitted data. If ``None``\n is given, there won't be a maximum. Defaults to ``'auto'``.\n \"\"\"\n\n _DTYPE_TRANSFORMERS = None\n\n _metadata = None\n\n def __init__(self, field_names=None, field_types=None, field_transformers=None,\n anonymize_fields=None, primary_key=None, constraints=None, table_metadata=None,\n rounding='auto', min_value='auto', max_value='auto'):\n if table_metadata is None:\n self._metadata = Table(\n field_names=field_names,\n primary_key=primary_key,\n field_types=field_types,\n field_transformers=field_transformers,\n anonymize_fields=anonymize_fields,\n constraints=constraints,\n dtype_transformers=self._DTYPE_TRANSFORMERS,\n rounding=rounding,\n min_value=min_value,\n max_value=max_value\n )\n self._metadata_fitted = False\n else:\n for arg in (field_names, primary_key, field_types, anonymize_fields, constraints):\n if arg:\n raise ValueError(\n 'If table_metadata is given {} must be None'.format(arg.__name__))\n\n if isinstance(table_metadata, dict):\n table_metadata = Table.from_dict(table_metadata)\n\n table_metadata._dtype_transformers.update(self._DTYPE_TRANSFORMERS)\n\n self._metadata = table_metadata\n self._metadata_fitted = table_metadata.fitted\n\n def fit(self, data):\n \"\"\"Fit this model to the data.\n\n If the table metadata has not been given, learn it from the data.\n\n Args:\n data (pandas.DataFrame or str):\n Data to fit the model to. It can be passed as a\n ``pandas.DataFrame`` or as an ``str``.\n If an ``str`` is passed, it is assumed to be\n the path to a CSV file which can be loaded using\n ``pandas.read_csv``.\n \"\"\"\n LOGGER.debug('Fitting %s to table %s; shape: %s', self.__class__.__name__,\n self._metadata.name, data.shape)\n if not self._metadata_fitted:\n self._metadata.fit(data)\n\n self._num_rows = len(data)\n\n LOGGER.debug('Transforming table %s; shape: %s', self._metadata.name, data.shape)\n transformed = self._metadata.transform(data)\n\n if self._metadata.get_dtypes(ids=False):\n LOGGER.debug(\n 'Fitting %s model to table %s', self.__class__.__name__, self._metadata.name)\n self._fit(transformed)\n\n def get_metadata(self):\n \"\"\"Get metadata about the table.\n\n This will return an ``sdv.metadata.Table`` object containing\n the information about the data that this model has learned.\n\n This Table metadata will contain some common information,\n such as field names and data types, as well as additional\n information that each Sub-class might add, such as the\n observed data field distributions and their parameters.\n\n Returns:\n sdv.metadata.Table:\n Table metadata.\n \"\"\"\n return self._metadata\n\n @staticmethod\n def _filter_conditions(sampled, conditions, float_rtol):\n \"\"\"Filter the sampled rows that match the conditions.\n\n If condition columns are float values, consider a match anything that\n is closer than the given ``float_rtol`` and then make the value exact.\n\n Args:\n sampled (pandas.DataFrame):\n The sampled rows, reverse transformed.\n conditions (dict):\n The dictionary of conditioning values.\n float_rtol (float):\n Maximum tolerance when considering a float match.\n\n Returns:\n pandas.DataFrame:\n Rows from the sampled data that match the conditions.\n \"\"\"\n for column, value in conditions.items():\n column_values = sampled[column]\n if column_values.dtype.kind == 'f':\n distance = value * float_rtol\n sampled = sampled[np.abs(column_values - value) < distance]\n sampled[column] = value\n else:\n sampled = sampled[column_values == value]\n\n return sampled\n\n def _sample_rows(self, num_rows, conditions=None, transformed_conditions=None,\n float_rtol=0.1, previous_rows=None):\n \"\"\"Sample rows with the given conditions.\n\n Input conditions is taken both in the raw input format, which will be used\n for filtering during the reject-sampling loop, and already transformed\n to the model format, which will be passed down to the model if it supports\n conditional sampling natively.\n\n If condition columns are float values, consider a match anything that\n is closer than the given ``float_rtol`` and then make the value exact.\n\n If the model does not have any data columns, the result of this call\n is a dataframe of the requested length with no columns in it.\n\n Args:\n num_rows (int):\n Number of rows to sample.\n conditions (dict):\n The dictionary of conditioning values in the original format.\n transformed_conditions (dict):\n The dictionary of conditioning values transformed to the model format.\n float_rtol (float):\n Maximum tolerance when considering a float match.\n previous_rows (pandas.DataFrame):\n Valid rows sampled in the previous iterations.\n\n Returns:\n tuple:\n * pandas.DataFrame:\n Rows from the sampled data that match the conditions.\n * int:\n Number of rows that are considered valid.\n \"\"\"\n if self._metadata.get_dtypes(ids=False):\n if conditions is None:\n sampled = self._sample(num_rows)\n else:\n try:\n sampled = self._sample(num_rows, transformed_conditions)\n except NotImplementedError:\n sampled = self._sample(num_rows)\n\n sampled = self._metadata.reverse_transform(sampled)\n\n if previous_rows is not None:\n sampled = previous_rows.append(sampled, ignore_index=True)\n\n sampled = self._metadata.filter_valid(sampled)\n\n if conditions is not None:\n sampled = self._filter_conditions(sampled, conditions, float_rtol)\n\n num_valid = len(sampled)\n\n return sampled, num_valid\n\n else:\n sampled = pd.DataFrame(index=range(num_rows))\n sampled = self._metadata.reverse_transform(sampled)\n return sampled, num_rows\n\n def _sample_batch(self, num_rows=None, max_retries=100, max_rows_multiplier=10,\n conditions=None, transformed_conditions=None, float_rtol=0.01):\n \"\"\"Sample a batch of rows with the given conditions.\n\n This will enter a reject-sampling loop in which rows will be sampled until\n all of them are valid and match the requested conditions. If `max_retries`\n is exceeded, it will return as many rows as it has sampled, which may be less\n than the target number of rows.\n\n Input conditions is taken both in the raw input format, which will be used\n for filtering during the reject-sampling loop, and already transformed\n to the model format, which will be passed down to the model if it supports\n conditional sampling natively.\n\n If condition columns are float values, consider a match anything that is\n relatively closer than the given ``float_rtol`` and then make the value exact.\n\n If the model does not have any data columns, the result of this call\n is a dataframe of the requested length with no columns in it.\n\n Args:\n num_rows (int):\n Number of rows to sample. If not given the model\n will generate as many rows as there were in the\n data passed to the ``fit`` method.\n max_retries (int):\n Number of times to retry sampling discarded rows.\n Defaults to 100.\n max_rows_multiplier (int):\n Multiplier to use when computing the maximum number of rows\n that can be sampled during the reject-sampling loop.\n The maximum number of rows that are sampled at each iteration\n will be equal to this number multiplied by the requested num_rows.\n Defaults to 10.\n conditions (dict):\n The dictionary of conditioning values in the original input format.\n transformed_conditions (dict):\n The dictionary of conditioning values transformed to the model format.\n float_rtol (float):\n Maximum tolerance when considering a float match.\n\n Returns:\n pandas.DataFrame:\n Sampled data.\n \"\"\"\n sampled, num_valid = self._sample_rows(\n num_rows, conditions, transformed_conditions, float_rtol)\n\n counter = 0\n total_sampled = num_rows\n while num_valid < num_rows:\n if counter >= max_retries:\n break\n\n remaining = num_rows - num_valid\n valid_probability = (num_valid + 1) / (total_sampled + 1)\n max_rows = num_rows * max_rows_multiplier\n num_to_sample = min(int(remaining / valid_probability), max_rows)\n total_sampled += num_to_sample\n\n LOGGER.info('%s valid rows remaining. Resampling %s rows', remaining, num_to_sample)\n sampled, num_valid = self._sample_rows(\n num_to_sample, conditions, transformed_conditions, float_rtol, sampled\n )\n\n counter += 1\n\n return sampled.head(min(len(sampled), num_rows))\n\n def _make_conditions_df(self, conditions, num_rows):\n \"\"\"Transform `conditions` into a dataframe.\n\n Args:\n conditions (pd.DataFrame, dict or pd.Series):\n If this is a dictionary/Series which maps column names to the column\n value, then this method generates `num_rows` samples, all of\n which are conditioned on the given variables. If this is a DataFrame,\n then it generates an output DataFrame such that each row in the output\n is sampled conditional on the corresponding row in the input.\n num_rows (int):\n Number of rows to sample. If a conditions dataframe is given, this must\n either be ``None`` or match the length of the ``conditions`` dataframe.\n\n Returns:\n pandas.DataFrame:\n `conditions` as a dataframe.\n \"\"\"\n if isinstance(conditions, pd.Series):\n conditions = pd.DataFrame([conditions] * num_rows)\n\n elif isinstance(conditions, dict):\n try:\n conditions = pd.DataFrame(conditions)\n except ValueError:\n conditions = pd.DataFrame([conditions] * num_rows)\n\n elif not isinstance(conditions, pd.DataFrame):\n raise TypeError('`conditions` must be a dataframe, a dictionary or a pandas series.')\n\n elif num_rows is not None and len(conditions) != num_rows:\n raise ValueError(\n 'If `conditions` is a `DataFrame`, `num_rows` must be `None` or match its lenght.')\n\n return conditions.copy()\n\n def _conditionally_sample_rows(self, dataframe, max_retries, max_rows_multiplier,\n condition, transformed_condition, float_rtol,\n graceful_reject_sampling):\n num_rows = len(dataframe)\n sampled_rows = self._sample_batch(\n num_rows,\n max_retries,\n max_rows_multiplier,\n condition,\n transformed_condition,\n float_rtol\n )\n num_sampled_rows = len(sampled_rows)\n\n if num_sampled_rows < num_rows:\n # Didn't get enough rows.\n if len(sampled_rows) == 0:\n error = 'No valid rows could be generated with the given conditions.'\n raise ValueError(error)\n\n elif not graceful_reject_sampling:\n error = f'Could not get enough valid rows within {max_retries} trials.'\n raise ValueError(error)\n\n else:\n warn(f'Only {len(sampled_rows)} rows could '\n f'be sampled within {max_retries} trials.')\n\n if len(sampled_rows) > 0:\n sampled_rows[COND_IDX] = dataframe[COND_IDX].values[:len(sampled_rows)]\n\n return sampled_rows\n\n def sample(self, num_rows=None, max_retries=100, max_rows_multiplier=10,\n conditions=None, float_rtol=0.01, graceful_reject_sampling=False):\n \"\"\"Sample rows from this table.\n\n Args:\n num_rows (int):\n Number of rows to sample. If not given the model\n will generate as many rows as there were in the\n data passed to the ``fit`` method.\n max_retries (int):\n Number of times to retry sampling discarded rows.\n Defaults to 100.\n max_rows_multiplier (int):\n Multiplier to use when computing the maximum number of rows\n that can be sampled during the reject-sampling loop.\n The maximum number of rows that are sampled at each iteration\n will be equal to this number multiplied by the requested num_rows.\n Defaults to 10.\n conditions (pd.DataFrame, dict or pd.Series):\n If this is a dictionary/Series which maps column names to the column\n value, then this method generates `num_rows` samples, all of\n which are conditioned on the given variables. If this is a DataFrame,\n then it generates an output DataFrame such that each row in the output\n is sampled conditional on the corresponding row in the input.\n float_rtol (float):\n Maximum tolerance when considering a float match. This is the maximum\n relative distance at which a float value will be considered a match\n when performing reject-sampling based conditioning. Defaults to 0.01.\n graceful_reject_sampling (bool):\n If `False` raises a `ValueError` if not enough valid rows could be sampled\n within `max_retries` trials. If `True` prints a warning and returns\n as many rows as it was able to sample within `max_retries`.\n Defaults to False.\n\n Returns:\n pandas.DataFrame:\n Sampled data.\n\n Raises:\n ConstraintsNotMetError:\n If the conditions are not valid for the given constraints.\n ValueError:\n If any of the following happens:\n * any of the conditions' columns are not valid.\n * `graceful_reject_sampling` is `False` and not enough valid rows could be\n sampled within `max_retries` trials.\n * no rows could be generated.\n \"\"\"\n if conditions is None:\n num_rows = num_rows or self._num_rows\n return self._sample_batch(num_rows, max_retries, max_rows_multiplier)\n\n # convert conditions to dataframe\n conditions = self._make_conditions_df(conditions, num_rows)\n\n # validate columns\n for column in conditions.columns:\n if column not in self._metadata.get_fields():\n raise ValueError(f'Invalid column name `{column}`')\n\n try:\n transformed_conditions = self._metadata.transform(conditions, on_missing_column='drop')\n except ConstraintsNotMetError as cnme:\n cnme.message = 'Passed conditions are not valid for the given constraints'\n raise\n\n condition_columns = list(conditions.columns)\n transformed_columns = list(transformed_conditions.columns)\n conditions.index.name = COND_IDX\n conditions.reset_index(inplace=True)\n transformed_conditions.index.name = COND_IDX\n transformed_conditions.reset_index(inplace=True)\n grouped_conditions = conditions.groupby(condition_columns)\n\n # sample\n all_sampled_rows = list()\n\n for group, dataframe in grouped_conditions:\n if not isinstance(group, tuple):\n group = [group]\n\n condition_indices = dataframe[COND_IDX]\n condition = dict(zip(condition_columns, group))\n if len(transformed_columns) == 0:\n sampled_rows = self._conditionally_sample_rows(\n dataframe,\n max_retries,\n max_rows_multiplier,\n condition,\n None,\n float_rtol,\n graceful_reject_sampling\n )\n all_sampled_rows.append(sampled_rows)\n else:\n transformed_conditions_in_group = transformed_conditions.loc[condition_indices]\n transformed_groups = transformed_conditions_in_group.groupby(transformed_columns)\n for transformed_group, transformed_dataframe in transformed_groups:\n if not isinstance(transformed_group, tuple):\n transformed_group = [transformed_group]\n\n transformed_condition = dict(zip(transformed_columns, transformed_group))\n sampled_rows = self._conditionally_sample_rows(\n transformed_dataframe,\n max_retries,\n max_rows_multiplier,\n condition,\n transformed_condition,\n float_rtol,\n graceful_reject_sampling\n )\n all_sampled_rows.append(sampled_rows)\n\n all_sampled_rows = pd.concat(all_sampled_rows)\n all_sampled_rows = all_sampled_rows.set_index(COND_IDX)\n all_sampled_rows.index.name = conditions.index.name\n all_sampled_rows = all_sampled_rows.sort_index()\n all_sampled_rows = self._metadata.make_ids_unique(all_sampled_rows)\n\n return all_sampled_rows\n\n def _get_parameters(self):\n raise NonParametricError()\n\n def get_parameters(self):\n \"\"\"Get the parameters learned from the data.\n\n The result is a flat dict (single level) which contains\n all the necessary parameters to be able to reproduce\n this model.\n\n Subclasses which are not parametric, such as DeepLearning\n based models, raise a NonParametricError indicating that\n this method is not supported for their implementation.\n\n Returns:\n parameters (dict):\n flat dict (single level) which contains all the\n necessary parameters to be able to reproduce\n this model.\n\n Raises:\n NonParametricError:\n If the model is not parametric or cannot be described\n using a simple dictionary.\n \"\"\"\n if self._metadata.get_dtypes(ids=False):\n parameters = self._get_parameters()\n else:\n parameters = {}\n\n parameters['num_rows'] = self._num_rows\n return parameters\n\n def _set_parameters(self, parameters):\n raise NonParametricError()\n\n def set_parameters(self, parameters):\n \"\"\"Regenerate a previously learned model from its parameters.\n\n Subclasses which are not parametric, such as DeepLearning\n based models, raise a NonParametricError indicating that\n this method is not supported for their implementation.\n\n Args:\n dict:\n Model parameters.\n\n Raises:\n NonParametricError:\n If the model is not parametric or cannot be described\n using a simple dictionary.\n \"\"\"\n num_rows = parameters.pop('num_rows')\n self._num_rows = 0 if pd.isnull(num_rows) else max(0, int(round(num_rows)))\n\n if self._metadata.get_dtypes(ids=False):\n self._set_parameters(parameters)\n\n def save(self, path):\n \"\"\"Save this model instance to the given path using pickle.\n\n Args:\n path (str):\n Path where the SDV instance will be serialized.\n \"\"\"\n with open(path, 'wb') as output:\n pickle.dump(self, output)\n\n @classmethod\n def load(cls, path):\n \"\"\"Load a TabularModel instance from a given path.\n\n Args:\n path (str):\n Path from which to load the instance.\n\n Returns:\n TabularModel:\n The loaded tabular model.\n \"\"\"\n with open(path, 'rb') as f:\n return pickle.load(f)\n" ]
[ [ "pandas.isnull", "pandas.DataFrame", "numpy.abs", "pandas.concat" ] ]
graziano-giuliani/pythoncode
[ "4e505af5be3e32519cf4e62b85c101a63c885f77" ]
[ "pyuwphysret/common/pyfiles/atmos/Stability/svpwat.py" ]
[ "#!/usr/bin/env python\n# svpwat.py\nimport numpy as num\n\ndef svpwat(t):\n \"\"\"e = svpwat(t)\nCalculates the water vapor mixing ratio\n\nInputs: (all vectors of same length)\n t = dry bulb temperature(s) (K)\n\nOutputs:\n e = saturation vapor pressure with respect to a plane surface of ice (mb)\n\nRLT, 010710\n \"\"\"\n A0 = 0.999996876e0\n A1 = -0.9082695004e-2\n A2 = 0.7873616869e-4\n A3 = -0.6111795727e-6\n A4 = 0.4388418740e-8\n A5 = -0.2988388486e-10\n A6 = 0.2187442495e-12\n A7 = -0.1789232111e-14\n A8 = 0.1111201803e-16\n A9 = -0.3099457145e-19\n B = 0.61078e+1\n T = t - 273.16\n E = A0 + T*(A1 + T*(A2 + T*(A3 + T*(A4 + T*(A5 + T*(A6 + T*(A7 +\n T*(A8 + T*A9))))))))\n E = B/pow(E,8)\n return E\n\nif __name__ == '__main__':\n print(svpwat.__doc__)\n t = num.array(\n ( 24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.10, 14.69, 13.22, 11.52,\n 9.53, 7.24, 4.80, 2.34, 0.04, -2.29, -4.84, -7.64,-10.66,-13.95,\n -17.54,-21.45,-25.58,-29.90,-34.33,-38.94,-43.78,-48.80,-53.94,-58.79,\n -63.27,-67.32,-70.74,-73.62,-75.74,-77.07,-77.43,-76.63,-75.06,-73.14,\n -71.43 ))\n t = t + 273.15\n e = svpwat(t)\n print(t)\n print(e)\n" ]
[ [ "numpy.array" ] ]
aOlmo/SRN-Deblur-test
[ "8c2ad2f54a988811987c0ad5157db2a3b0de4b3d" ]
[ "models/model.py" ]
[ "from __future__ import print_function\nimport os\nimport time\nimport random\nimport datetime\nimport scipy.misc\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom datetime import datetime\nfrom util.util import *\nfrom util.BasicConvLSTMCell import *\n\n\nclass DEBLUR(object):\n def __init__(self, args):\n self.args = args\n self.n_levels = 3\n self.scale = 0.5\n self.chns = 3 if self.args.model == 'color' else 1 # input / output channels\n\n # if args.phase == 'train':\n self.crop_size = 256\n self.data_list = open(args.datalist, 'rt').read().splitlines()\n self.data_list = list(map(lambda x: x.split(' '), self.data_list))\n random.shuffle(self.data_list)\n self.train_dir = os.path.join('./checkpoints', args.model)\n if not os.path.exists(self.train_dir):\n os.makedirs(self.train_dir)\n\n self.batch_size = args.batch_size\n self.epoch = args.epoch\n self.data_size = (len(self.data_list)) // self.batch_size\n self.max_steps = int(self.epoch * self.data_size)\n self.learning_rate = args.learning_rate\n\n def input_producer(self, batch_size=10):\n def read_data():\n img_a = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[0]])),\n channels=3)\n img_b = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[1]])),\n channels=3)\n img_a, img_b = preprocessing([img_a, img_b])\n return img_a, img_b\n\n def preprocessing(imgs):\n imgs = [tf.cast(img, tf.float32) / 255.0 for img in imgs]\n if self.args.model != 'color':\n imgs = [tf.image.rgb_to_grayscale(img) for img in imgs]\n img_crop = tf.unstack(tf.random_crop(tf.stack(imgs, axis=0), [2, self.crop_size, self.crop_size, self.chns]),\n axis=0)\n return img_crop\n\n with tf.variable_scope('input'):\n List_all = tf.convert_to_tensor(self.data_list, dtype=tf.string)\n gt_list = List_all[:, 0]\n in_list = List_all[:, 1]\n\n self.data_queue = tf.train.slice_input_producer([in_list, gt_list], capacity=20)\n image_in, image_gt = read_data()\n batch_in, batch_gt = tf.train.batch([image_in, image_gt], batch_size=batch_size, num_threads=8, capacity=20)\n\n return batch_in, batch_gt\n\n def generator(self, inputs, reuse=False, scope='g_net'):\n n, h, w, c = inputs.get_shape().as_list()\n\n if self.args.model == 'lstm':\n with tf.variable_scope('LSTM'):\n cell = BasicConvLSTMCell([h / 4, w / 4], [3, 3], 128)\n rnn_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)\n\n x_unwrap = []\n with tf.variable_scope(scope, reuse=reuse):\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n activation_fn=tf.nn.relu, padding='SAME', normalizer_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),\n biases_initializer=tf.constant_initializer(0.0)):\n\n inp_pred = inputs\n for i in xrange(self.n_levels):\n scale = self.scale ** (self.n_levels - i - 1)\n hi = int(round(h * scale))\n wi = int(round(w * scale))\n inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)\n inp_pred = tf.stop_gradient(tf.image.resize_images(inp_pred, [hi, wi], method=0))\n inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')\n if self.args.model == 'lstm':\n rnn_state = tf.image.resize_images(rnn_state, [hi // 4, wi // 4], method=0)\n\n # encoder\n conv1_1 = slim.conv2d(inp_all, 32, [5, 5], scope='enc1_1')\n conv1_2 = ResnetBlock(conv1_1, 32, 5, scope='enc1_2')\n conv1_3 = ResnetBlock(conv1_2, 32, 5, scope='enc1_3')\n conv1_4 = ResnetBlock(conv1_3, 32, 5, scope='enc1_4')\n conv2_1 = slim.conv2d(conv1_4, 64, [5, 5], stride=2, scope='enc2_1')\n conv2_2 = ResnetBlock(conv2_1, 64, 5, scope='enc2_2')\n conv2_3 = ResnetBlock(conv2_2, 64, 5, scope='enc2_3')\n conv2_4 = ResnetBlock(conv2_3, 64, 5, scope='enc2_4')\n conv3_1 = slim.conv2d(conv2_4, 128, [5, 5], stride=2, scope='enc3_1')\n conv3_2 = ResnetBlock(conv3_1, 128, 5, scope='enc3_2')\n conv3_3 = ResnetBlock(conv3_2, 128, 5, scope='enc3_3')\n conv3_4 = ResnetBlock(conv3_3, 128, 5, scope='enc3_4')\n\n if self.args.model == 'lstm':\n deconv3_4, rnn_state = cell(conv3_4, rnn_state)\n else:\n deconv3_4 = conv3_4\n\n # decoder\n deconv3_3 = ResnetBlock(deconv3_4, 128, 5, scope='dec3_3')\n deconv3_2 = ResnetBlock(deconv3_3, 128, 5, scope='dec3_2')\n deconv3_1 = ResnetBlock(deconv3_2, 128, 5, scope='dec3_1')\n deconv2_4 = slim.conv2d_transpose(deconv3_1, 64, [4, 4], stride=2, scope='dec2_4')\n cat2 = deconv2_4 + conv2_4\n deconv2_3 = ResnetBlock(cat2, 64, 5, scope='dec2_3')\n deconv2_2 = ResnetBlock(deconv2_3, 64, 5, scope='dec2_2')\n deconv2_1 = ResnetBlock(deconv2_2, 64, 5, scope='dec2_1')\n deconv1_4 = slim.conv2d_transpose(deconv2_1, 32, [4, 4], stride=2, scope='dec1_4')\n cat1 = deconv1_4 + conv1_4\n deconv1_3 = ResnetBlock(cat1, 32, 5, scope='dec1_3')\n deconv1_2 = ResnetBlock(deconv1_3, 32, 5, scope='dec1_2')\n deconv1_1 = ResnetBlock(deconv1_2, 32, 5, scope='dec1_1')\n inp_pred = slim.conv2d(deconv1_1, self.chns, [5, 5], activation_fn=None, scope='dec1_0')\n\n if i >= 0:\n x_unwrap.append(inp_pred)\n if i == 0:\n tf.get_variable_scope().reuse_variables()\n\n return x_unwrap\n\n def build_model(self):\n img_in, img_gt = self.input_producer(self.batch_size)\n\n tf.summary.image('img_in', im2uint8(img_in))\n tf.summary.image('img_gt', im2uint8(img_gt))\n print('img_in, img_gt', img_in.get_shape(), img_gt.get_shape())\n\n # generator\n x_unwrap = self.generator(img_in, reuse=False, scope='g_net')\n # calculate multi-scale loss\n self.loss_total = 0\n for i in xrange(self.n_levels):\n _, hi, wi, _ = x_unwrap[i].get_shape().as_list()\n gt_i = tf.image.resize_images(img_gt, [hi, wi], method=0)\n loss = tf.reduce_mean((gt_i - x_unwrap[i]) ** 2)\n self.loss_total += loss\n\n tf.summary.image('out_' + str(i), im2uint8(x_unwrap[i]))\n tf.summary.scalar('loss_' + str(i), loss)\n\n # losses\n tf.summary.scalar('loss_total', self.loss_total)\n\n # training vars\n all_vars = tf.trainable_variables()\n self.all_vars = all_vars\n self.g_vars = [var for var in all_vars if 'g_net' in var.name]\n self.lstm_vars = [var for var in all_vars if 'LSTM' in var.name]\n for var in all_vars:\n print(var.name)\n\n def train(self):\n def get_optimizer(loss, global_step=None, var_list=None, is_gradient_clip=False):\n train_op = tf.train.AdamOptimizer(self.lr)\n if is_gradient_clip:\n grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)\n unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]\n rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]\n rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]\n capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)\n capped_gvs = list(zip(capped_grad, rnn_var))\n train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)\n else:\n train_op = train_op.minimize(loss, global_step, var_list)\n return train_op\n\n global_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)\n self.global_step = global_step\n\n # build model\n self.build_model()\n\n # learning rate decay\n self.lr = tf.train.polynomial_decay(self.learning_rate, global_step, self.max_steps, end_learning_rate=0.0,\n power=0.3)\n tf.summary.scalar('learning_rate', self.lr)\n\n # training operators\n train_gnet = get_optimizer(self.loss_total, global_step, self.all_vars)\n\n # session and thread\n gpu_options = tf.GPUOptions(allow_growth=True)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n self.sess = sess\n sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # training summary\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)\n\n for step in xrange(sess.run(global_step), self.max_steps + 1):\n\n start_time = time.time()\n\n # update G network\n _, loss_total_val = sess.run([train_gnet, self.loss_total])\n\n duration = time.time() - start_time\n # print loss_value\n assert not np.isnan(loss_total_val), 'Model diverged with loss = NaN'\n\n if step % 5 == 0:\n num_examples_per_step = self.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = (%.5f; %.5f, %.5f)(%.1f data/s; %.3f s/bch)')\n print(format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_total_val, 0.0,\n 0.0, examples_per_sec, sec_per_batch))\n\n if step % 20 == 0:\n # summary_str = sess.run(summary_op, feed_dict={inputs:batch_input, gt:batch_gt})\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, global_step=step)\n\n # Save the model checkpoint periodically.\n if step % 1000 == 0 or step == self.max_steps:\n checkpoint_path = os.path.join(self.train_dir, 'checkpoints')\n self.save(sess, checkpoint_path, step)\n\n def save(self, sess, checkpoint_dir, step):\n model_name = \"deblur.model\"\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)\n\n def load(self, sess, checkpoint_dir, step=None):\n print(\" [*] Reading checkpoints...\")\n model_name = \"deblur.model\"\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n\n if step is not None:\n ckpt_name = model_name + '-' + str(step)\n self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))\n print(\" [*] Reading intermediate checkpoints... Success\")\n return str(step)\n elif ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n ckpt_iter = ckpt_name.split('-')[1]\n self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))\n print(\" [*] Reading updated checkpoints... Success\")\n return ckpt_iter\n else:\n print(\" [*] Reading checkpoints... ERROR\")\n return False\n\n def test(self, height, width, input_path, output_path):\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n imgsName = sorted(os.listdir(input_path))\n\n H, W = height, width\n inp_chns = 3 if self.args.model == 'color' else 1\n self.batch_size = 1 if self.args.model == 'color' else 3\n inputs = tf.placeholder(shape=[self.batch_size, H, W, inp_chns], dtype=tf.float32)\n outputs = self.generator(inputs, reuse=False)\n\n sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))\n \n self.saver = tf.train.Saver()\n self.load(sess, self.train_dir)\n\n for imgName in imgsName:\n blur = scipy.misc.imread(os.path.join(input_path, imgName))\n h, w, c = blur.shape\n # make sure the width is larger than the height\n rot = False\n if h > w:\n blur = np.transpose(blur, [1, 0, 2])\n rot = True\n h = int(blur.shape[0])\n w = int(blur.shape[1])\n resize = False\n if h > H or w > W:\n scale = min(1.0 * H / h, 1.0 * W / w)\n new_h = int(h * scale)\n new_w = int(w * scale)\n blur = scipy.misc.imresize(blur, [new_h, new_w], 'bicubic')\n resize = True\n blurPad = np.pad(blur, ((0, H - new_h), (0, W - new_w), (0, 0)), 'edge')\n else:\n blurPad = np.pad(blur, ((0, H - h), (0, W - w), (0, 0)), 'edge')\n blurPad = np.expand_dims(blurPad, 0)\n if self.args.model != 'color':\n blurPad = np.transpose(blurPad, (3, 1, 2, 0))\n\n start = time.time()\n deblur = sess.run(outputs, feed_dict={inputs: blurPad / 255.0})\n duration = time.time() - start\n print('Saving results: %s ... %4.3fs' % (os.path.join(output_path, imgName), duration))\n res = deblur[-1]\n if self.args.model != 'color':\n res = np.transpose(res, (3, 1, 2, 0))\n res = im2uint8(res[0, :, :, :])\n # crop the image into original size\n if resize:\n res = res[:new_h, :new_w, :]\n res = scipy.misc.imresize(res, [h, w], 'bicubic')\n else:\n res = res[:h, :w, :]\n\n if rot:\n res = np.transpose(res, [1, 0, 2])\n scipy.misc.imsave(os.path.join(output_path, imgName), res)\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.constant_initializer", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.train.slice_input_producer", "tensorflow.train.get_checkpoint_state", "tensorflow.string_join", "tensorflow.stack", "tensorflow.global_variables_initializer", "tensorflow.image.rgb_to_grayscale", "tensorflow.cast", "tensorflow.trainable_variables", "tensorflow.concat", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.variable_scope", "tensorflow.contrib.slim.conv2d_transpose", "numpy.transpose", "tensorflow.get_variable_scope", "numpy.expand_dims", "numpy.pad", "tensorflow.train.AdamOptimizer", "tensorflow.train.Coordinator", "tensorflow.summary.scalar", "tensorflow.train.batch", "tensorflow.train.polynomial_decay", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.contrib.slim.conv2d", "tensorflow.image.resize_images", "tensorflow.clip_by_global_norm", "tensorflow.convert_to_tensor", "numpy.isnan", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.GPUOptions" ] ]
hsungyang/poseidonos
[ "0f523b36ccf0d70726364395ea96ac6ae3b845c3" ]
[ "test/system/benchmark/graph/draw.py" ]
[ "import lib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\n\n\ndef FormatLatency(y, idx=0):\n if y >= 1e9:\n return f\"{round(y/1e9)}s\"\n elif y >= 1e6:\n return f\"{round(y/1e6)}ms\"\n elif y >= 1e3:\n return f\"{round(y/1e3)}us\"\n else:\n return f\"{round(y)}ns\"\n\n\ndef FormatIOPS(y, idx=0):\n if y >= 1e9:\n return f\"{round(y/1e9)}Giops\"\n elif y >= 1e6:\n return f\"{round(y/1e6)}Miops\"\n elif y >= 1e3:\n return f\"{round(y/1e3)}Kiops\"\n else:\n return f\"{round(y)}iops\"\n\n\ndef FormatBW(y, idx=0):\n if y >= 1e9:\n return f\"{round(y/1e9)}GiB/s\"\n elif y >= 1e6:\n return f\"{round(y/1e6)}MiB/s\"\n elif y >= 1e3:\n return f\"{round(y/1e3)}KiB/s\"\n else:\n return f\"{round(y)}B/s\"\n\n\ndef FormatKBW(y, idx=0):\n if y >= 1e9:\n return f\"{round(y/1e9)}TiB/s\"\n elif y >= 1e6:\n return f\"{round(y/1e6)}GiB/s\"\n elif y >= 1e3:\n return f\"{round(y/1e3)}MiB/s\"\n else:\n return f\"{round(y)}KiB/s\"\n\n\ndef FormatSimpleFloat(y, pos=1):\n if y >= 1e9:\n return f\"{round(y/1e9, pos)}\"\n elif y >= 1e6:\n return f\"{round(y/1e6, pos)}\"\n elif y >= 1e3:\n return f\"{round(y/1e3, pos)}\"\n else:\n return f\"{round(y, pos)}\"\n\n\ndef DrawEta(data, pic_name, graph_list):\n try:\n plt.clf() # plot 초기화\n num_graph = len(graph_list)\n fig = plt.figure(figsize=(8, 3 * num_graph)) # plot size 설정(unit: inch)\n\n for i in range(num_graph):\n type = graph_list[i]\n ax = plt.subplot(num_graph, 1, i + 1) # subplot 생성(행, 렬, 순서)\n ax.set_title(type, fontsize=12)\n ax.grid(True, axis=\"y\", color=\"lightgrey\", zorder=0)\n plt.xlabel(\"percentage\", fontsize=9)\n if \"iops\" in type:\n ax.yaxis.set_major_formatter(ticker.FuncFormatter(FormatIOPS))\n elif \"bw\" in type:\n ax.yaxis.set_major_formatter(ticker.FuncFormatter(FormatBW))\n else:\n plt.ticklabel_format(axis=\"y\", style=\"plain\")\n ax.yaxis.set_major_formatter(ticker.EngFormatter())\n ax.tick_params(axis='y', labelrotation=30, labelsize=8)\n for v in data.values():\n plt.scatter(v[\"x\"], v[type], s=10, label=v[\"title\"]) # 점 그래프 그리기\n plt.plot(v[\"x\"], v[type]) # 선 그래프 그리기\n plt.legend(fontsize=8, loc=\"upper left\", ncol=2) # 범례 그리기\n\n plt.tight_layout()\n plt.savefig(f\"{pic_name}_eta.png\", dpi=200)\n plt.close(fig)\n except Exception as e:\n lib.printer.red(f\"{__name__} [Error] {e}\")\n plt.close(fig)\n\n\ndef DrawResult(data, pic_name):\n try:\n plt.clf() # plot 초기화\n fig = plt.figure(figsize=(12, 12)) # plot size 설정(unit: inch)\n prop_cycle = plt.rcParams[\"axes.prop_cycle\"]\n color_list = prop_cycle.by_key()[\"color\"]\n\n for i in range(12):\n ax = plt.subplot(4, 3, i + 1) # subplot 생성(행, 렬, 순서)\n ax.set_title(data[i][\"title\"], fontsize=12)\n ax.grid(True, axis=\"x\", color=\"lightgrey\", zorder=0)\n hbars = ax.barh( # 가로 막대 그래프 그리기\n range(len(data[i][\"value\"])),\n data[i][\"value\"],\n align=\"center\",\n color=color_list,\n zorder=3\n )\n ax.set_yticks(range(len(data[i][\"value\"])))\n ax.set_yticklabels(data[i][\"index\"], fontsize=8)\n ax.invert_yaxis()\n if \"lat\" in data[i][\"title\"]:\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(FormatLatency))\n elif \"iops\" in data[i][\"title\"]:\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(FormatIOPS))\n elif \"bw\" in data[i][\"title\"]:\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(FormatKBW))\n else:\n ax.xaxis.set_major_formatter(ticker.EngFormatter())\n ax.tick_params(axis=\"x\", labelrotation=30, labelsize=8)\n\n rects = ax.patches\n x_min, x_max = plt.gca().get_xlim()\n for rect in rects: # 막대에 label 붙여서 값 표시\n x_val = rect.get_width()\n y_val = rect.get_y() + rect.get_height() / 2\n label = FormatSimpleFloat(x_val)\n x_offset = 5\n align = \"left\"\n # 막대의 크기가 subplot의 3/4보다 크면 label이 subplot을 넘어가는 것 방지\n if 0.75 < (x_val / x_max):\n x_offset = -10\n align = \"right\"\n plt.annotate(\n label,\n (x_val, y_val),\n xytext=(x_offset, 0),\n textcoords=\"offset points\",\n va=\"center\",\n ha=align,\n fontsize=9\n )\n\n plt.tight_layout()\n plt.savefig(f\"{pic_name}_result.png\", dpi=200)\n plt.close(fig)\n except Exception as e:\n lib.printer.red(f\"{__name__} [Error] {e}\")\n plt.close(fig)\n" ]
[ [ "matplotlib.pyplot.annotate", "matplotlib.ticker.EngFormatter", "matplotlib.pyplot.gca", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.scatter", "matplotlib.pyplot.clf", "matplotlib.ticker.FuncFormatter", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.subplot" ] ]
openclosebrackets/scikit-learn-tests
[ "92ff5fda2e59ba40e515b89085a5357aab08d41a" ]
[ "tutorials/regression/linear/python/main_scipy.py" ]
[ "from classes import *\nfrom scipy.optimize import least_squares\n\ndef fun_lsm(w, height, weight):\n return weight - height * w[0] - w[1]\n\n \nif __name__ == '__main__':\n data_path = \"../../../../data/weight-height.csv\"\n obj = mice(data_path)\n\n x0 = [0.1, 0.1]\n res = least_squares(fun_lsm, x0, args=(obj.height, obj.weight))\n plots(obj.x, obj.y, obj.names, res.x)\n" ]
[ [ "scipy.optimize.least_squares" ] ]
mullovc/NMTGMinor
[ "b1b7b1e018eaa0d99a43449655937cc050a29987" ]
[ "train_language_model.py" ]
[ "from __future__ import division\n\nimport onmt\nimport onmt.markdown\nimport onmt.modules\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom torch import cuda\nfrom torch.autograd import Variable\nimport math\nimport time, datetime\nfrom onmt.train_utils.trainer import XETrainer\nfrom onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc\nfrom onmt.model_factory import build_language_model, optimize_model\nfrom onmt.data.lm_dataset import LanguageModelDataset\nfrom collections import defaultdict\n\n\nparser = argparse.ArgumentParser(description='train.py')\nonmt.markdown.add_md_help_argument(parser)\n\nfrom onmt.options import make_parser\n# Please look at the options file to see the options regarding models and data\nparser = make_parser(parser)\n\nopt = parser.parse_args()\n\nprint(opt)\n\n# An ugly hack to have weight norm on / off\nonmt.constants.weight_norm = opt.weight_norm\nonmt.constants.checkpointing = opt.checkpointing\nonmt.constants.max_position_length = opt.max_position_length\n\n# Use static dropout if checkpointing > 0\nif opt.checkpointing > 0:\n onmt.constants.static = True\n\nif torch.cuda.is_available() and not opt.gpus:\n print(\"WARNING: You have a CUDA device, should run with -gpus 0\")\n\n\ntorch.manual_seed(opt.seed)\n\n\ndef main():\n\n start = time.time()\n print(\"Loading data from '%s'\" % opt.data)\n\n if opt.data_format == 'raw':\n dataset = torch.load(opt.data)\n elapse = str(datetime.timedelta(seconds=int(time.time() - start)))\n print(\"Done after %s\" % elapse)\n\n dicts = dataset['dicts']\n\n # For backward compatibility\n train_dict = defaultdict(lambda: None, dataset['train'])\n valid_dict = defaultdict(lambda: None, dataset['valid'])\n\n if train_dict['src_lang'] is not None:\n assert 'langs' in dicts\n train_src_langs = train_dict['src_lang']\n train_tgt_langs = train_dict['tgt_lang']\n else:\n # allocate new languages\n dicts['langs'] = {'src': 0, 'tgt': 1}\n train_src_langs = list()\n train_tgt_langs = list()\n # Allocation one for the bilingual case\n train_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n train_data = LanguageModelDataset(\n dataset['train']['tgt'], train_tgt_langs,\n batch_size_sents=opt.batch_size_sents,\n seq_length=opt.lm_seq_length)\n\n if valid_dict['src_lang'] is not None:\n assert 'langs' in dicts\n valid_src_langs = valid_dict['src_lang']\n valid_tgt_langs = valid_dict['tgt_lang']\n else:\n # allocate new languages\n valid_src_langs = list()\n valid_tgt_langs = list()\n\n # Allocation one for the bilingual case\n valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n valid_data = LanguageModelDataset(\n dataset['valid']['tgt'], valid_tgt_langs,\n batch_size_sents=opt.batch_size_sents,\n seq_length=opt.lm_seq_length)\n\n\n\n if opt.load_from:\n checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)\n print(\"* Loading dictionaries from the checkpoint\")\n dicts = checkpoint['dicts']\n else:\n dicts['tgt'].patch(opt.patch_vocab_multiplier)\n checkpoint = None\n\n if \"src\" in dicts:\n print(' * vocabulary size. source = %d; target = %d' %\n (dicts['src'].size(), dicts['tgt'].size()))\n else:\n print(' * vocabulary size. target = %d' %\n (dicts['tgt'].size()))\n\n print(' * number of training sentences. %d' %\n train_data.size())\n print(' * maximum batch size (words per batch). %d' % (opt.batch_size_sents * opt.lm_seq_length))\n\n else:\n raise NotImplementedError\n\n print('Building model...')\n model = build_language_model(opt, dicts)\n optimize_model(model)\n\n \"\"\" Building the loss function \"\"\"\n loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(), label_smoothing=opt.label_smoothing)\n\n n_params = sum([p.nelement() for p in model.parameters()])\n print('* number of parameters: %d' % n_params)\n \n if len(opt.gpus) > 1 or opt.virtual_gpu > 1:\n raise NotImplementedError(\"Multi-GPU training is not supported ATM.\")\n else:\n # if opt.fp16:\n # trainer = FP16XETrainer(model, loss_function, train_data, valid_data, dicts, opt)\n # else:\n trainer = XETrainer(model, loss_function, train_data, valid_data, dicts, opt)\n\n trainer.run(checkpoint=checkpoint)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.manual_seed", "torch.cuda.is_available", "torch.Tensor", "torch.load" ] ]
neurips2020submission/invalid-action-masking
[ "f27065b187b8eed6316dcc4bb94322771bc89a6a" ]
[ "invalid_action_masking/ppo_no_adj_10x10.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom cleanrl.common import preprocess_obs_space, preprocess_ac_space\nimport argparse\nimport numpy as np\nimport gym\nimport gym_microrts\nfrom gym.wrappers import TimeLimit, Monitor\nimport pybullet_envs\nfrom gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space\nimport time\nimport random\nimport os\nimport pandas as pd\n\n# taken from https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_normalize.py\nclass RunningMeanStd(object):\n def __init__(self, epsilon=1e-4, shape=()):\n self.mean = np.zeros(shape, 'float64')\n self.var = np.ones(shape, 'float64')\n self.count = epsilon\n\n def update(self, x):\n batch_mean = np.mean([x], axis=0)\n batch_var = np.var([x], axis=0)\n batch_count = 1\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n self.mean, self.var, self.count = update_mean_var_count_from_moments(\n self.mean, self.var, self.count, batch_mean, batch_var, batch_count)\n\ndef update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n new_mean = mean + delta * batch_count / tot_count\n m_a = var * count\n m_b = batch_var * batch_count\n M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count\n new_var = M2 / tot_count\n new_count = tot_count\n\n return new_mean, new_var, new_count\nclass NormalizedEnv(gym.core.Wrapper):\n def __init__(self, env, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):\n super(NormalizedEnv, self).__init__(env)\n self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None\n self.ret_rms = RunningMeanStd(shape=(1,)) if ret else None\n self.clipob = clipob\n self.cliprew = cliprew\n self.ret = np.zeros(())\n self.gamma = gamma\n self.epsilon = epsilon\n\n def step(self, action):\n obs, rews, news, infos = self.env.step(action)\n infos['real_reward'] = rews\n # print(\"before\", self.ret)\n self.ret = self.ret * self.gamma + rews\n # print(\"after\", self.ret)\n obs = self._obfilt(obs)\n if self.ret_rms:\n self.ret_rms.update(np.array([self.ret].copy()))\n rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)\n self.ret = self.ret * (1-float(news))\n return obs, rews, news, infos\n\n def _obfilt(self, obs):\n if self.ob_rms:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)\n return obs\n else:\n return obs\n\n def reset(self):\n self.ret = np.zeros(())\n obs = self.env.reset()\n return self._obfilt(obs)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='PPO agent')\n # Common arguments\n parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(\".py\"),\n help='the name of this experiment')\n parser.add_argument('--gym-id', type=str, default=\"MicrortsMining10x10F9-v0\",\n help='the id of the gym environment')\n parser.add_argument('--seed', type=int, default=1,\n help='seed of the experiment')\n parser.add_argument('--episode-length', type=int, default=0,\n help='the maximum length of each episode')\n parser.add_argument('--total-timesteps', type=int, default=100000,\n help='total timesteps of the experiments')\n parser.add_argument('--no-torch-deterministic', action='store_false', dest=\"torch_deterministic\", default=True,\n help='if toggled, `torch.backends.cudnn.deterministic=False`')\n parser.add_argument('--no-cuda', action='store_false', dest=\"cuda\", default=True,\n help='if toggled, cuda will not be enabled by default')\n parser.add_argument('--prod-mode', action='store_true', default=False,\n help='run the script in production mode and use wandb to log outputs')\n parser.add_argument('--capture-video', action='store_true', default=False,\n help='weather to capture videos of the agent performances (check out `videos` folder)')\n parser.add_argument('--wandb-project-name', type=str, default=\"cleanRL\",\n help=\"the wandb's project name\")\n parser.add_argument('--wandb-entity', type=str, default=None,\n help=\"the entity (team) of wandb's project\")\n\n # Algorithm specific arguments\n parser.add_argument('--batch-size', type=int, default=2048,\n help='the batch size of ppo')\n parser.add_argument('--minibatch-size', type=int, default=256,\n help='the mini batch size of ppo')\n parser.add_argument('--gamma', type=float, default=0.99,\n help='the discount factor gamma')\n parser.add_argument('--gae-lambda', type=float, default=0.97,\n help='the lambda for the general advantage estimation')\n parser.add_argument('--ent-coef', type=float, default=0.01,\n help=\"coefficient of the entropy\")\n parser.add_argument('--max-grad-norm', type=float, default=0.5,\n help='the maximum norm for the gradient clipping')\n parser.add_argument('--clip-coef', type=float, default=0.2,\n help=\"the surrogate clipping coefficient\")\n parser.add_argument('--update-epochs', type=int, default=10,\n help=\"the K epochs to update the policy\")\n parser.add_argument('--kle-stop', action='store_true', default=False,\n help='If toggled, the policy updates will be early stopped w.r.t target-kl')\n parser.add_argument('--kle-rollback', action='store_true', default=False,\n help='If toggled, the policy updates will roll back to previous policy if KL exceeds target-kl')\n parser.add_argument('--target-kl', type=float, default=0.015,\n help='the target-kl variable that is referred by --kl')\n parser.add_argument('--gae', action='store_true', default=True,\n help='Use GAE for advantage computation')\n parser.add_argument('--policy-lr', type=float, default=3e-4,\n help=\"the learning rate of the policy optimizer\")\n parser.add_argument('--value-lr', type=float, default=3e-4,\n help=\"the learning rate of the critic optimizer\")\n parser.add_argument('--norm-obs', action='store_true', default=True,\n help=\"Toggles observation normalization\")\n parser.add_argument('--norm-returns', action='store_true', default=False,\n help=\"Toggles returns normalization\")\n parser.add_argument('--norm-adv', action='store_true', default=True,\n help=\"Toggles advantages normalization\")\n parser.add_argument('--obs-clip', type=float, default=10.0,\n help=\"Value for reward clipping, as per the paper\")\n parser.add_argument('--rew-clip', type=float, default=10.0,\n help=\"Value for observation clipping, as per the paper\")\n parser.add_argument('--anneal-lr', action='store_true', default=True,\n help=\"Toggle learning rate annealing for policy and value networks\")\n parser.add_argument('--weights-init', default=\"orthogonal\", choices=[\"xavier\", 'orthogonal'],\n help='Selects the scheme to be used for weights initialization'),\n parser.add_argument('--clip-vloss', action=\"store_true\", default=True,\n help='Toggles wheter or not to use a clipped loss for the value function, as per the paper.')\n parser.add_argument('--pol-layer-norm', action='store_true', default=False,\n help='Enables layer normalization in the policy network')\n args = parser.parse_args()\n if not args.seed:\n args.seed = int(time.time())\n\nargs.features_turned_on = sum([args.kle_stop, args.kle_rollback, args.gae, args.norm_obs, args.norm_returns, args.norm_adv, args.anneal_lr, args.clip_vloss, args.pol_layer_norm])\n# TRY NOT TO MODIFY: setup the environment\nexperiment_name = f\"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}\"\nwriter = SummaryWriter(f\"runs/{experiment_name}\")\nwriter.add_text('hyperparameters', \"|param|value|\\n|-|-|\\n%s\" % (\n '\\n'.join([f\"|{key}|{value}|\" for key, value in vars(args).items()])))\n\nif args.prod_mode:\n import wandb\n wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True)\n writer = SummaryWriter(f\"/tmp/{experiment_name}\")\n wandb.save(os.path.abspath(__file__))\n\n# TRY NOT TO MODIFY: seeding\ndevice = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\nenv = gym.make(args.gym_id)\n# respect the default timelimit\nassert isinstance(env.action_space, MultiDiscrete), \"only MultiDiscrete action space is supported\"\nassert isinstance(env, TimeLimit) or int(args.episode_length), \"the gym env does not have a built in TimeLimit, please specify by using --episode-length\"\nif isinstance(env, TimeLimit):\n if int(args.episode_length):\n env._max_episode_steps = int(args.episode_length)\n args.episode_length = env._max_episode_steps\nelse:\n env = TimeLimit(env, int(args.episode_length))\nenv = NormalizedEnv(env.env, ob=args.norm_obs, ret=args.norm_returns, clipob=args.obs_clip, cliprew=args.rew_clip, gamma=args.gamma)\nenv = TimeLimit(env, int(args.episode_length))\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.backends.cudnn.deterministic = args.torch_deterministic\nenv.seed(args.seed)\nenv.action_space.seed(args.seed)\nenv.observation_space.seed(args.seed)\nif args.capture_video:\n env = Monitor(env, f'videos/{experiment_name}')\n\n# ALGO LOGIC: initialize agent here:\nclass CategoricalMasked(Categorical):\n def __init__(self, probs=None, logits=None, validate_args=None, masks=[]):\n self.masks = masks\n if len(self.masks) == 0:\n super(CategoricalMasked, self).__init__(probs, logits, validate_args)\n else:\n self.masks = masks.type(torch.BoolTensor).to(device)\n logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(device))\n super(CategoricalMasked, self).__init__(probs, logits, validate_args)\n \n def entropy(self):\n if len(self.masks) == 0:\n return super(CategoricalMasked, self).entropy()\n p_log_p = self.logits * self.probs\n p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.).to(device))\n return -p_log_p.sum(-1)\n\nclass Policy(nn.Module):\n def __init__(self):\n super(Policy, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(27, 16, kernel_size=3,),\n nn.MaxPool2d(1),\n nn.ReLU(),\n nn.Conv2d(16, 32, kernel_size=3),\n nn.MaxPool2d(1),\n nn.ReLU())\n self.fc = nn.Sequential(\n nn.Linear(32*6*6, 128),\n nn.ReLU(),\n nn.Linear(128, env.action_space.nvec.sum())\n )\n\n def forward(self, x):\n x = torch.Tensor(np.moveaxis(x, -1, 1)).to(device)\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n def get_action(self, x, action=None, invalid_action_masks=None):\n logits = self.forward(x)\n split_logits = torch.split(logits, env.action_space.nvec.tolist(), dim=1)\n \n if invalid_action_masks is not None:\n split_invalid_action_masks = torch.split(invalid_action_masks, env.action_space.nvec.tolist(), dim=1)\n multi_categoricals = [CategoricalMasked(logits=logits, masks=iam) for (logits, iam) in zip(split_logits, split_invalid_action_masks)]\n else:\n multi_categoricals = [Categorical(logits=logits) for logits in split_logits]\n \n if action is None:\n action = torch.stack([categorical.sample() for categorical in multi_categoricals])\n logprob = torch.stack([categorical.log_prob(a) for a, categorical in zip(action, multi_categoricals)])\n return action, logprob, [], multi_categoricals\n\nclass Value(nn.Module):\n def __init__(self):\n super(Value, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(27, 16, kernel_size=3,),\n nn.MaxPool2d(1),\n nn.ReLU(),\n nn.Conv2d(16, 32, kernel_size=3),\n nn.MaxPool2d(1),\n nn.ReLU())\n self.fc = nn.Sequential(\n nn.Linear(32*6*6, 128),\n nn.ReLU(),\n nn.Linear(128, 1)\n )\n\n def forward(self, x):\n x = torch.Tensor(np.moveaxis(x, -1, 1)).to(device)\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\ndef discount_cumsum(x, dones, gamma):\n \"\"\"\n computing discounted cumulative sums of vectors that resets with dones\n input:\n vector x, vector dones,\n [x0, [0,\n x1, 0,\n x2 1,\n x3 0, \n x4] 0]\n output:\n [x0 + discount * x1 + discount^2 * x2,\n x1 + discount * x2,\n x2,\n x3 + discount * x4,\n x4]\n \"\"\"\n discount_cumsum = np.zeros_like(x)\n discount_cumsum[-1] = x[-1]\n for t in reversed(range(x.shape[0]-1)):\n discount_cumsum[t] = x[t] + gamma * discount_cumsum[t+1] * (1-dones[t])\n return discount_cumsum\n\npg = Policy().to(device)\nvf = Value().to(device)\n\n# MODIFIED: Separate optimizer and learning rates\npg_optimizer = optim.Adam(list(pg.parameters()), lr=args.policy_lr)\nv_optimizer = optim.Adam(list(vf.parameters()), lr=args.value_lr)\n\n# MODIFIED: Initializing learning rate anneal scheduler when need\nif args.anneal_lr:\n anneal_fn = lambda f: max(0, 1-f / args.total_timesteps)\n pg_lr_scheduler = optim.lr_scheduler.LambdaLR(pg_optimizer, lr_lambda=anneal_fn)\n vf_lr_scheduler = optim.lr_scheduler.LambdaLR(v_optimizer, lr_lambda=anneal_fn)\n\nloss_fn = nn.MSELoss()\n\n# TRY NOT TO MODIFY: start the game\nglobal_step = 0\nwhile global_step < args.total_timesteps:\n if args.capture_video:\n env.stats_recorder.done=True\n next_obs = np.array(env.reset())\n\n # ALGO Logic: Storage for epoch data\n obs = np.empty((args.batch_size,) + env.observation_space.shape)\n\n actions = np.empty((args.batch_size,) + env.action_space.shape)\n logprobs = torch.zeros((env.action_space.nvec.shape[0], args.batch_size,)).to(device)\n\n rewards = np.zeros((args.batch_size,))\n raw_rewards = np.zeros((len(env.rfs),args.batch_size,))\n \n real_rewards = []\n invalid_action_stats = []\n\n dones = np.zeros((args.batch_size,))\n values = torch.zeros((args.batch_size,)).to(device)\n\n invalid_action_masks = torch.zeros((args.batch_size, env.action_space.nvec.sum()))\n # TRY NOT TO MODIFY: prepare the execution of the game.\n for step in range(args.batch_size):\n env.render()\n global_step += 1\n obs[step] = next_obs.copy()\n\n # ALGO LOGIC: put action logic here\n invalid_action_mask = torch.ones(env.action_space.nvec.sum())\n invalid_action_mask[0:env.action_space.nvec[0]] = torch.tensor(env.unit_location_mask)\n invalid_action_mask[-env.action_space.nvec[-1]:] = torch.tensor(env.target_unit_location_mask)\n invalid_action_masks[step] = invalid_action_mask\n with torch.no_grad():\n values[step] = vf.forward(obs[step:step+1])\n action, logproba, _, probs = pg.get_action(obs[step:step+1], invalid_action_masks=invalid_action_masks[step:step+1])\n \n # CORE LOGIC:\n # use the action generated by CategoricalMasked, but \n # don't adjust the logprobability accordingly. Instead, calculate the log\n # probability using Categorical\n action, logproba, _, probs = pg.get_action(obs[step:step+1], action=action)\n \n actions[step] = action[:,0].data.cpu().numpy()\n logprobs[:,[step]] = logproba\n\n # TRY NOT TO MODIFY: execute the game and log data.\n next_obs, rewards[step], dones[step], info = env.step(action[:,0].data.cpu().numpy())\n raw_rewards[:,step] = info[\"rewards\"]\n real_rewards += [info['real_reward']]\n invalid_action_stats += [info['invalid_action_stats']]\n next_obs = np.array(next_obs)\n\n # Annealing the rate if instructed to do so.\n if args.anneal_lr:\n pg_lr_scheduler.step()\n vf_lr_scheduler.step()\n\n if dones[step]:\n # Computing the discounted returns:\n writer.add_scalar(\"charts/episode_reward\", np.sum(real_rewards), global_step)\n print(f\"global_step={global_step}, episode_reward={np.sum(real_rewards)}\")\n for i in range(len(env.rfs)):\n writer.add_scalar(f\"charts/episode_reward/{str(env.rfs[i])}\", raw_rewards.sum(1)[i], global_step)\n real_rewards = []\n for key, idx in zip(info['invalid_action_stats'], range(len(info['invalid_action_stats']))):\n writer.add_scalar(f\"stats/{key}\", pd.DataFrame(invalid_action_stats).sum(0)[idx], global_step)\n invalid_action_stats = []\n next_obs = np.array(env.reset())\n\n # bootstrap reward if not done. reached the batch limit\n last_value = 0\n if not dones[step]:\n last_value = vf.forward(next_obs.reshape((1,)+next_obs.shape))[0].detach().cpu().numpy()[0]\n bootstrapped_rewards = np.append(rewards, last_value)\n\n # calculate the returns and advantages\n if args.gae:\n bootstrapped_values = np.append(values.detach().cpu().numpy(), last_value)\n deltas = bootstrapped_rewards[:-1] + args.gamma * bootstrapped_values[1:] * (1-dones) - bootstrapped_values[:-1]\n advantages = discount_cumsum(deltas, dones, args.gamma * args.gae_lambda)\n advantages = torch.Tensor(advantages).to(device)\n returns = advantages + values\n else:\n returns = discount_cumsum(bootstrapped_rewards, dones, args.gamma)[:-1]\n advantages = returns - values.detach().cpu().numpy()\n advantages = torch.Tensor(advantages).to(device)\n returns = torch.Tensor(returns).to(device)\n\n # Advantage normalization\n if args.norm_adv:\n EPS = 1e-10\n advantages = (advantages - advantages.mean()) / (advantages.std() + EPS)\n\n # Optimizaing policy network\n entropys = []\n target_pg = Policy().to(device)\n inds = np.arange(args.batch_size,)\n for i_epoch_pi in range(args.update_epochs):\n np.random.shuffle(inds)\n for start in range(0, args.batch_size, args.minibatch_size):\n end = start + args.minibatch_size\n minibatch_ind = inds[start:end]\n target_pg.load_state_dict(pg.state_dict())\n \n _, newlogproba, _, _ = pg.get_action(\n obs[minibatch_ind],\n torch.LongTensor(actions[minibatch_ind].astype(np.int)).to(device).T,)\n ratio = (newlogproba - logprobs[:,minibatch_ind]).exp()\n\n # Policy loss as in OpenAI SpinUp\n clip_adv = torch.where(advantages[minibatch_ind] > 0,\n (1.+args.clip_coef) * advantages[minibatch_ind],\n (1.-args.clip_coef) * advantages[minibatch_ind]).to(device)\n\n # Entropy computation with resampled actions\n entropy = -(newlogproba.exp() * newlogproba).mean()\n entropys.append(entropy.item())\n\n policy_loss = -torch.min(ratio * advantages[minibatch_ind], clip_adv) + args.ent_coef * entropy\n policy_loss = policy_loss.mean()\n \n pg_optimizer.zero_grad()\n policy_loss.backward()\n nn.utils.clip_grad_norm_(pg.parameters(), args.max_grad_norm)\n pg_optimizer.step()\n\n approx_kl = (logprobs[:,minibatch_ind] - newlogproba).mean()\n # Optimizing value network\n new_values = vf.forward(obs[minibatch_ind]).view(-1)\n\n # Value loss clipping\n if args.clip_vloss:\n v_loss_unclipped = ((new_values - returns[minibatch_ind]) ** 2)\n v_clipped = values[minibatch_ind] + torch.clamp(new_values - values[minibatch_ind], -args.clip_coef, args.clip_coef)\n v_loss_clipped = (v_clipped - returns[minibatch_ind])**2\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n v_loss = 0.5 * v_loss_max.mean()\n else:\n v_loss = torch.mean((returns[minibatch_ind]- new_values).pow(2))\n\n v_optimizer.zero_grad()\n v_loss.backward()\n nn.utils.clip_grad_norm_(vf.parameters(), args.max_grad_norm)\n v_optimizer.step()\n\n if args.kle_stop:\n if approx_kl > args.target_kl:\n break\n if args.kle_rollback:\n if (logprobs[:,minibatch_ind] - \n pg.get_action(\n obs[minibatch_ind],\n torch.LongTensor(actions[minibatch_ind].astype(np.int)).to(device).T,\n invalid_action_masks[minibatch_ind])[1]).mean() > args.target_kl:\n pg.load_state_dict(target_pg.state_dict())\n break\n\n # TRY NOT TO MODIFY: record rewards for plotting purposes\n writer.add_scalar(\"losses/value_loss\", v_loss.item(), global_step)\n writer.add_scalar(\"charts/policy_learning_rate\", pg_optimizer.param_groups[0]['lr'], global_step)\n writer.add_scalar(\"charts/value_learning_rate\", v_optimizer.param_groups[0]['lr'], global_step)\n writer.add_scalar(\"losses/policy_loss\", policy_loss.item(), global_step)\n writer.add_scalar(\"losses/entropy\", np.mean(entropys), global_step)\n writer.add_scalar(\"losses/approx_kl\", approx_kl.item(), global_step)\n if args.kle_stop or args.kle_rollback:\n writer.add_scalar(\"debug/pg_stop_iter\", i_epoch_pi, global_step)\n\nenv.close()\nwriter.close()\n" ]
[ [ "torch.nn.Linear", "numpy.mean", "torch.distributions.categorical.Categorical", "torch.cuda.is_available", "torch.where", "numpy.zeros_like", "numpy.empty", "torch.nn.MaxPool2d", "pandas.DataFrame", "torch.manual_seed", "numpy.arange", "torch.tensor", "numpy.sqrt", "numpy.append", "torch.Tensor", "torch.utils.tensorboard.SummaryWriter", "torch.zeros", "numpy.square", "numpy.array", "torch.min", "numpy.zeros", "torch.max", "numpy.random.shuffle", "torch.clamp", "torch.nn.ReLU", "numpy.moveaxis", "torch.nn.Conv2d", "torch.nn.MSELoss", "numpy.random.seed", "numpy.sum", "torch.no_grad", "numpy.ones", "torch.optim.lr_scheduler.LambdaLR", "numpy.var" ] ]
nathanael-fijalkow/ec
[ "c20fd84ca3944f904f5c999ed5df46eb6b6f8ef5" ]
[ "dreamcoder/grammar.py" ]
[ "from collections import defaultdict\n\nfrom dreamcoder.frontier import *\nfrom dreamcoder.program import *\nfrom dreamcoder.type import *\nfrom dreamcoder.utilities import *\n\nimport time\n\n\n\nclass GrammarFailure(Exception):\n pass\n\nclass SketchEnumerationFailure(Exception):\n pass\n\nclass NoCandidates(Exception):\n pass\n\n\nclass Grammar(object):\n def __init__(self, logVariable, productions, continuationType=None):\n self.logVariable = logVariable\n self.productions = productions\n\n self.continuationType = continuationType\n\n self.expression2likelihood = dict((p, l) for l, _, p in productions)\n self.expression2likelihood[Index(0)] = self.logVariable\n\n def randomWeights(self, r):\n \"\"\"returns a new grammar with random weights drawn from r. calls `r` w/ old weight\"\"\"\n return Grammar(logVariable=r(self.logVariable),\n productions=[(r(l),t,p)\n for l,t,p in self.productions ],\n continuationType=self.continuationType)\n\n def strip_primitive_values(self):\n return Grammar(logVariable=self.logVariable,\n productions=[(l,t,strip_primitive_values(p))\n for l,t,p in self.productions ],\n continuationType=self.continuationType)\n\n def unstrip_primitive_values(self):\n return Grammar(logVariable=self.logVariable,\n productions=[(l,t,unstrip_primitive_values(p))\n for l,t,p in self.productions ],\n continuationType=self.continuationType)\n\n def __setstate__(self, state):\n \"\"\"\n Legacy support for loading grammar objects without the imperative type filled in\n \"\"\"\n assert 'logVariable' in state\n assert 'productions' in state\n if 'continuationType' in state:\n continuationType = state['continuationType']\n else:\n if any( 'turtle' in str(t) for l,t,p in state['productions'] ):\n continuationType = baseType(\"turtle\")\n elif any( 'tower' in str(t) for l,t,p in state['productions'] ):\n continuationType = baseType(\"tower\")\n else:\n continuationType = None\n \n self.__init__(state['logVariable'], state['productions'], continuationType=continuationType)\n\n @staticmethod\n def fromProductions(productions, logVariable=0.0, continuationType=None):\n \"\"\"Make a grammar from primitives and their relative logpriors.\"\"\"\n return Grammar(logVariable, [(l, p.infer(), p)\n for l, p in productions],\n continuationType=continuationType)\n\n @staticmethod\n def uniform(primitives, continuationType=None):\n return Grammar(0.0, [(0.0, p.infer(), p) for p in primitives], continuationType=continuationType)\n\n def __len__(self): return len(self.productions)\n\n def __str__(self):\n def productionKey(xxx_todo_changeme):\n (l, t, p) = xxx_todo_changeme\n return not isinstance(p, Primitive), l is not None and -l\n if self.continuationType is not None:\n lines = [\"continuation : %s\"%self.continuationType]\n else:\n lines = []\n lines += [\"%f\\tt0\\t$_\" % self.logVariable]\n for l, t, p in sorted(self.productions, key=productionKey):\n if l is not None:\n l = \"%f\\t%s\\t%s\" % (l, t, p)\n else:\n l = \"-Inf\\t%s\\t%s\" % (t, p)\n if not t.isArrow() and isinstance(p, Invented):\n try:\n l += \"\\teval = %s\" % (p.evaluate([]))\n except BaseException:\n pass\n\n lines.append(l)\n return \"\\n\".join(lines)\n\n def extract(self):\n \tpass\n\n # productions = [(log-probability, type, primitive)]\n def grammar_to_tree_grammar(self, return_type):\n \tbase_types = self.base_types()\n \tstart = return_type\n \trules = {basic_type: [] for basic_type in base_types}\n \tfor (l,t,p) in self.productions:\n \t\tif t.isArrow:\n \t\t\trules[t.arguments[-1]].append(t.arguments[:-1], exp(l))\n \t\telse:\n \t\t\trules[t].append([], exp(l))\n \treturn PCFG(start = start, rules = rules)\n\n def json(self):\n j = {\"logVariable\": self.logVariable,\n \"productions\": [{\"expression\": str(p), \"logProbability\": l}\n for l, _, p in self.productions]}\n if self.continuationType is not None:\n j[\"continuationType\"] = self.continuationType.json()\n return j\n\n def _immutable_code(self): return self.logVariable, tuple(self.productions)\n\n def __eq__(self, o): return self._immutable_code() == o._immutable_code()\n\n def __ne__(self, o): return not (self == o)\n\n def __hash__(self): return hash(self._immutable_code())\n\n @property\n def primitives(self):\n return [p for _, _, p in self.productions]\n\n def removeProductions(self, ps):\n return Grammar(\n self.logVariable, [\n (l, t, p) for (\n l, t, p) in self.productions if p not in ps],\n continuationType=self.continuationType)\n\n def buildCandidates(self, request, context, environment,\n # Should the log probabilities be normalized?\n normalize=True,\n # Should be returned a table mapping primitives to\n # their candidate entry?\n returnTable=False,\n # Should we return probabilities vs log probabilities?\n returnProbabilities=False,\n # Must be a leaf (have no arguments)?\n mustBeLeaf=False):\n \"\"\"Primitives that are candidates for being used given a requested type\n If returnTable is false (default): returns [((log)likelihood, tp, primitive, context)]\n if returntable is true: returns {primitive: ((log)likelihood, tp, context)}\"\"\"\n if returnProbabilities:\n assert normalize\n\n candidates = []\n variableCandidates = []\n for l, t, p in self.productions:\n try:\n newContext, t = t.instantiate(context)\n newContext = newContext.unify(t.returns(), request)\n t = t.apply(newContext)\n if mustBeLeaf and t.isArrow():\n continue\n candidates.append((l, t, p, newContext))\n except UnificationFailure:\n continue\n for j, t in enumerate(environment):\n try:\n newContext = context.unify(t.returns(), request)\n t = t.apply(newContext)\n if mustBeLeaf and t.isArrow():\n continue\n variableCandidates.append((t, Index(j), newContext))\n except UnificationFailure:\n continue\n\n if self.continuationType == request:\n terminalIndices = [v.i for t,v,k in variableCandidates if not t.isArrow()]\n if terminalIndices:\n smallestIndex = Index(min(terminalIndices))\n variableCandidates = [(t,v,k) for t,v,k in variableCandidates\n if t.isArrow() or v == smallestIndex]\n \n candidates += [(self.logVariable - log(len(variableCandidates)), t, p, k)\n for t, p, k in variableCandidates]\n if candidates == []:\n raise NoCandidates()\n #eprint(\"candidates inside buildCandidates before norm:\")\n #eprint(candidates)\n\n if normalize:\n z = lse([l for l, t, p, k in candidates])\n if returnProbabilities:\n candidates = [(exp(l - z), t, p, k)\n for l, t, p, k in candidates]\n else:\n candidates = [(l - z, t, p, k) for l, t, p, k in candidates]\n\n #eprint(\"candidates inside buildCandidates after norm:\")\n #eprint(candidates)\n\n if returnTable:\n return {p: (l, t, k) for l, t, p, k in candidates}\n else:\n return candidates\n\n\n def sample(self, request, maximumDepth=6, maxAttempts=None):\n attempts = 0\n\n while True:\n try:\n _, e = self._sample(\n request, Context.EMPTY, [], maximumDepth=maximumDepth)\n return e\n except NoCandidates:\n if maxAttempts is not None:\n attempts += 1\n if attempts > maxAttempts:\n return None\n continue\n\n def _sample(self, request, context, environment, maximumDepth):\n if request.isArrow():\n context, expression = self._sample(\n request.arguments[1], context, [\n request.arguments[0]] + environment, maximumDepth)\n return context, Abstraction(expression)\n\n candidates = self.buildCandidates(request, context, environment,\n normalize=True,\n returnProbabilities=True,\n # Force it to terminate in a\n # leaf; a primitive with no\n # function arguments\n mustBeLeaf=maximumDepth <= 1)\n #eprint(\"candidates:\")\n #eprint(candidates)\n newType, chosenPrimitive, context = sampleDistribution(candidates)\n\n # Sample the arguments\n xs = newType.functionArguments()\n returnValue = chosenPrimitive\n\n for x in xs:\n x = x.apply(context)\n context, x = self._sample(x, context, environment, maximumDepth - 1)\n returnValue = Application(returnValue, x)\n\n return context, returnValue\n\n def likelihoodSummary(self, context, environment, request, expression, silent=False):\n if request.isArrow():\n if not isinstance(expression, Abstraction):\n if not silent:\n eprint(\"Request is an arrow but I got\", expression)\n return context, None\n return self.likelihoodSummary(context,\n [request.arguments[0]] + environment,\n request.arguments[1],\n expression.body,\n silent=silent)\n # Build the candidates\n candidates = self.buildCandidates(request, context, environment,\n normalize=False,\n returnTable=True)\n\n # A list of everything that would have been possible to use here\n possibles = [p for p in candidates.keys() if not p.isIndex]\n numberOfVariables = sum(p.isIndex for p in candidates.keys())\n if numberOfVariables > 0:\n possibles += [Index(0)]\n\n f, xs = expression.applicationParse()\n\n if f not in candidates:\n if self.continuationType is not None and f.isIndex:\n ls = LikelihoodSummary()\n ls.constant = NEGATIVEINFINITY\n return ls\n \n if not silent:\n eprint(f, \"Not in candidates\")\n eprint(\"Candidates is\", candidates)\n #eprint(\"grammar:\", grammar.productions)\n eprint(\"request is\", request)\n eprint(\"xs\", xs)\n eprint(\"environment\", environment)\n assert False\n return context, None\n\n thisSummary = LikelihoodSummary()\n thisSummary.record(f, possibles,\n constant= -math.log(numberOfVariables) if f.isIndex else 0)\n\n _, tp, context = candidates[f]\n argumentTypes = tp.functionArguments()\n if len(xs) != len(argumentTypes):\n eprint(\"PANIC: not enough arguments for the type\")\n eprint(\"request\", request)\n eprint(\"tp\", tp)\n eprint(\"expression\", expression)\n eprint(\"xs\", xs)\n eprint(\"argumentTypes\", argumentTypes)\n # This should absolutely never occur\n raise GrammarFailure((context, environment, request, expression))\n\n for argumentType, argument in zip(argumentTypes, xs):\n argumentType = argumentType.apply(context)\n context, newSummary = self.likelihoodSummary(\n context, environment, argumentType, argument, silent=silent)\n if newSummary is None:\n return context, None\n thisSummary.join(newSummary)\n\n return context, thisSummary\n\n def bestFirstEnumeration(self, request):\n from heapq import heappush, heappop\n\n pq = []\n\n def choices(parentCost, xs):\n for c, x in xs:\n heappush(pq, (parentCost + c, x))\n\n def g(parentCost, request, _=None,\n context=None, environment=[],\n k=None):\n \"\"\"\n k is a continuation.\n k: Expects to be called with MDL, context, expression.\n \"\"\"\n\n assert k is not None\n if context is None:\n context = Context.EMPTY\n\n if request.isArrow():\n g(parentCost,\n request.arguments[1],\n context=context,\n environment=[request.arguments[0]] + environment,\n k=lambda MDL,\n newContext,\n p: k(MDL,\n newContext,\n Abstraction(p)))\n else:\n candidates = self.buildCandidates(request,\n context,\n environment,\n normalize=True,\n returnProbabilities=False,\n returnTable=True)\n choices(parentCost,\n [(-f_ll_tp_newContext[1][0],\n lambda: ga(parentCost - f_ll_tp_newContext[1][0],\n f_ll_tp_newContext[0],\n f_ll_tp_newContext[1][1].functionArguments(),\n context=f_ll_tp_newContext[1][2],\n environment=environment,\n k=k)) for f_ll_tp_newContext in iter(candidates.items())])\n\n def ga(costSoFar, f, argumentTypes, _=None,\n context=None, environment=None,\n k=None):\n if argumentTypes == []:\n k(costSoFar, context, f)\n else:\n t1 = argumentTypes[0].apply(context)\n g(costSoFar, t1, context=context, environment=environment,\n k=lambda newCost, newContext, argument:\n ga(newCost, Application(f, argument), argumentTypes[1:],\n context=newContext, environment=environment,\n k=k))\n\n def receiveResult(MDL, _, expression):\n heappush(pq, (MDL, expression))\n\n g(0., request, context=Context.EMPTY, environment=[], k=receiveResult)\n frontier = []\n while len(frontier) < 10**3:\n MDL, action = heappop(pq)\n if isinstance(action, Program):\n expression = action\n frontier.append(expression)\n #eprint(\"Enumerated program\",expression,-MDL,self.closedLogLikelihood(request, expression))\n else:\n action()\n\n def closedLikelihoodSummary(self, request, expression, silent=False):\n try:\n context, summary = self.likelihoodSummary(Context.EMPTY, [], request, expression, silent=silent)\n except GrammarFailure as e:\n failureExport = 'failures/grammarFailure%s.pickle' % (\n time.time() + getPID())\n eprint(\"PANIC: Grammar failure, exporting to \", failureExport)\n with open(failureExport, 'wb') as handle:\n pickle.dump((e, self, request, expression), handle)\n assert False\n\n return summary\n\n def logLikelihood(self, request, expression):\n summary = self.closedLikelihoodSummary(request, expression)\n if summary is None:\n eprint(\n \"FATAL: program [ %s ] does not have a likelihood summary.\" %\n expression, \"r = \", request, \"\\n\", self)\n assert False\n return summary.logLikelihood(self)\n\n def rescoreFrontier(self, frontier):\n return Frontier([FrontierEntry(e.program,\n logPrior=self.logLikelihood(frontier.task.request, e.program),\n logLikelihood=e.logLikelihood)\n for e in frontier],\n frontier.task)\n\n def productionUses(self, frontiers):\n \"\"\"Returns the expected number of times that each production was used. {production: expectedUses}\"\"\"\n frontiers = [self.rescoreFrontier(f).normalize()\n for f in frontiers if not f.empty]\n uses = {p: 0. for p in self.primitives}\n for f in frontiers:\n for e in f:\n summary = self.closedLikelihoodSummary(f.task.request,\n e.program)\n for p, u in summary.uses:\n uses[p] += u * math.exp(e.logPosterior)\n return uses\n\n def insideOutside(self, frontiers, pseudoCounts, iterations=1):\n # Replace programs with (likelihood summary, uses)\n frontiers = [ Frontier([ FrontierEntry((summary, summary.toUses()),\n logPrior=summary.logLikelihood(self),\n logLikelihood=e.logLikelihood)\n for e in f\n for summary in [self.closedLikelihoodSummary(f.task.request, e.program)] ],\n task=f.task)\n for f in frontiers ]\n\n g = self\n for i in range(iterations):\n u = Uses()\n for f in frontiers:\n f = f.normalize()\n for e in f:\n _, eu = e.program\n u += math.exp(e.logPosterior) * eu\n\n lv = math.log(u.actualVariables + pseudoCounts) - \\\n math.log(u.possibleVariables + pseudoCounts)\n g = Grammar(lv,\n [ (math.log(u.actualUses.get(p,0.) + pseudoCounts) - \\\n math.log(u.possibleUses.get(p,0.) + pseudoCounts),\n t,p)\n for _,t,p in g.productions ],\n continuationType=self.continuationType)\n if i < iterations - 1:\n frontiers = [Frontier([ FrontierEntry((summary, uses),\n logPrior=summary.logLikelihood(g),\n logLikelihood=e.logLikelihood)\n for e in f\n for (summary, uses) in [e.program] ],\n task=f.task)\n for f in frontiers ]\n return g\n\n def frontierMDL(self, frontier):\n return max( e.logLikelihood + self.logLikelihood(frontier.task.request, e.program)\n for e in frontier ) \n\n\n def enumeration(self,context,environment,request,upperBound,\n maximumDepth=20,\n lowerBound=0.):\n '''Enumerates all programs whose MDL satisfies: lowerBound <= MDL < upperBound'''\n if upperBound < 0 or maximumDepth == 1:\n return\n\n if request.isArrow():\n v = request.arguments[0]\n for l, newContext, b in self.enumeration(context, [v] + environment,\n request.arguments[1],\n upperBound=upperBound,\n lowerBound=lowerBound,\n maximumDepth=maximumDepth):\n yield l, newContext, Abstraction(b)\n\n else:\n candidates = self.buildCandidates(request, context, environment,\n normalize=True)\n\n for l, t, p, newContext in candidates:\n mdl = -l\n if not (mdl < upperBound):\n continue\n\n xs = t.functionArguments()\n for aL, aK, application in\\\n self.enumerateApplication(newContext, environment, p, xs,\n upperBound=upperBound + l,\n lowerBound=lowerBound + l,\n maximumDepth=maximumDepth - 1):\n yield aL + l, aK, application\n\n def enumerateApplication(self, context, environment,\n function, argumentRequests,\n # Upper bound on the description length of all of\n # the arguments\n upperBound,\n # Lower bound on the description length of all of\n # the arguments\n lowerBound=0.,\n maximumDepth=20,\n originalFunction=None,\n argumentIndex=0):\n if upperBound < 0. or maximumDepth == 1:\n return\n if originalFunction is None:\n originalFunction = function\n\n if argumentRequests == []:\n if lowerBound <= 0. and 0. < upperBound:\n yield 0., context, function\n else:\n return\n else:\n argRequest = argumentRequests[0].apply(context)\n laterRequests = argumentRequests[1:]\n for argL, newContext, arg in self.enumeration(context, environment, argRequest,\n upperBound=upperBound,\n lowerBound=0.,\n maximumDepth=maximumDepth):\n if violatesSymmetry(originalFunction, arg, argumentIndex):\n continue\n\n newFunction = Application(function, arg)\n for resultL, resultK, result in self.enumerateApplication(newContext, environment, newFunction,\n laterRequests,\n upperBound=upperBound + argL,\n lowerBound=lowerBound + argL,\n maximumDepth=maximumDepth,\n originalFunction=originalFunction,\n argumentIndex=argumentIndex + 1):\n yield resultL + argL, resultK, result\n\n def sketchEnumeration(self,context,environment,request,sk,upperBound,\n maximumDepth=20,\n lowerBound=0.):\n '''Enumerates all sketch instantiations whose MDL satisfies: lowerBound <= MDL < upperBound'''\n if upperBound < 0. or maximumDepth == 1:\n return\n\n if sk.isHole:\n yield from self.enumeration(context, environment, request, upperBound,\n maximumDepth=maximumDepth,\n lowerBound=lowerBound)\n elif request.isArrow():\n assert sk.isAbstraction\n v = request.arguments[0]\n for l, newContext, b in self.sketchEnumeration(context, [v] + environment,\n request.arguments[1],\n sk.body,\n upperBound=upperBound,\n lowerBound=lowerBound,\n maximumDepth=maximumDepth):\n yield l, newContext, Abstraction(b)\n\n else:\n f, xs = sk.applicationParse()\n if f.isIndex:\n ft = environment[f.i].apply(context)\n elif f.isInvented or f.isPrimitive:\n context, ft = f.tp.instantiate(context)\n elif f.isAbstraction:\n assert False, \"sketch is not in beta longform\"\n elif f.isHole:\n assert False, \"hole as function not yet supported\"\n elif f.isApplication:\n assert False, \"should never happen - bug in applicationParse\"\n else: assert False\n\n try: context = context.unify(ft.returns(), request) \n except UnificationFailure:\n print(\"Exception: sketch is ill-typed\")\n return #so that we can continue evaluating\n # raise SketchEnumerationFailure() #\"sketch is ill-typed\"\n ft = ft.apply(context)\n argumentRequests = ft.functionArguments()\n\n assert len(argumentRequests) == len(xs)\n\n yield from self.sketchApplication(context, environment,\n f, xs, argumentRequests,\n upperBound=upperBound,\n lowerBound=lowerBound,\n maximumDepth=maximumDepth - 1)\n\n\n def sketchApplication(self, context, environment,\n function, arguments, argumentRequests,\n # Upper bound on the description length of all of\n # the arguments\n upperBound,\n # Lower bound on the description length of all of\n # the arguments\n lowerBound=0.,\n maximumDepth=20):\n if upperBound < 0. or maximumDepth == 1:\n return\n\n if argumentRequests == []:\n if lowerBound <= 0. and 0. < upperBound:\n yield 0., context, function\n else:\n return\n else:\n argRequest = argumentRequests[0].apply(context)\n laterRequests = argumentRequests[1:]\n firstSketch = arguments[0]\n laterSketches = arguments[1:]\n for argL, newContext, arg in self.sketchEnumeration(context, environment, argRequest,\n firstSketch,\n upperBound=upperBound,\n lowerBound=0.,\n maximumDepth=maximumDepth):\n\n newFunction = Application(function, arg)\n for resultL, resultK, result in self.sketchApplication(newContext, environment, newFunction,\n laterSketches, laterRequests,\n upperBound=upperBound + argL,\n lowerBound=lowerBound + argL,\n maximumDepth=maximumDepth):\n\n yield resultL + argL, resultK, result\n\n def sketchLogLikelihood(self, request, full, sk, context=Context.EMPTY, environment=[]):\n \"\"\"\n calculates mdl of full program 'full' from sketch 'sk'\n \"\"\"\n if sk.isHole:\n _, summary = self.likelihoodSummary(context, environment, request, full)\n if summary is None:\n eprint(\n \"FATAL: program [ %s ] does not have a likelihood summary.\" %\n full, \"r = \", request, \"\\n\", self)\n assert False\n return summary.logLikelihood(self), context\n\n elif request.isArrow():\n assert sk.isAbstraction and full.isAbstraction\n #assert sk.f == full.f #is this right? or do i need to recurse?\n v = request.arguments[0]\n return self.sketchLogLikelihood(request.arguments[1], full.body, sk.body, context=context, environment=[v] + environment)\n\n else:\n sk_f, sk_xs = sk.applicationParse()\n full_f, full_xs = full.applicationParse()\n if sk_f.isIndex:\n assert sk_f == full_f, \"sketch and full program don't match on an index\"\n ft = environment[sk_f.i].apply(context)\n elif sk_f.isInvented or sk_f.isPrimitive:\n assert sk_f == full_f, \"sketch and full program don't match on a primitive\"\n context, ft = sk_f.tp.instantiate(context)\n elif sk_f.isAbstraction:\n assert False, \"sketch is not in beta longform\"\n elif sk_f.isHole:\n assert False, \"hole as function not yet supported\"\n elif sk_f.isApplication:\n assert False, \"should never happen - bug in applicationParse\"\n else: assert False\n\n try: context = context.unify(ft.returns(), request) \n except UnificationFailure: assert False, \"sketch is ill-typed\"\n ft = ft.apply(context)\n argumentRequests = ft.functionArguments()\n\n assert len(argumentRequests) == len(sk_xs) == len(full_xs) #this might not be true if holes??\n\n return self.sketchllApplication(context, environment,\n sk_f, sk_xs, full_f, full_xs, argumentRequests)\n\n def sketchllApplication(self, context, environment,\n sk_function, sk_arguments, full_function, full_arguments, argumentRequests):\n if argumentRequests == []:\n return torch.tensor([0.]).cuda(), context #does this make sense?\n else:\n argRequest = argumentRequests[0].apply(context)\n laterRequests = argumentRequests[1:]\n\n sk_firstSketch = sk_arguments[0]\n full_firstSketch = full_arguments[0]\n sk_laterSketches = sk_arguments[1:]\n full_laterSketches = full_arguments[1:]\n\n argL, newContext = self.sketchLogLikelihood(argRequest, full_firstSketch, sk_firstSketch, context=context, environment=environment)\n\n #finish this...\n sk_newFunction = Application(sk_function, sk_firstSketch) # is this redundant? maybe \n full_newFunction = Application(full_function, full_firstSketch)\n\n resultL, context = self.sketchllApplication(newContext, environment, sk_newFunction, sk_laterSketches,\n full_newFunction, full_laterSketches, laterRequests)\n\n return resultL + argL, context\n\n \n def enumerateNearby(self, request, expr, distance=3.0):\n \"\"\"Enumerate programs with local mutations in subtrees with small description length\"\"\"\n if distance <= 0:\n yield expr\n else:\n def mutations(tp, loss):\n for l, _, expr in self.enumeration(\n Context.EMPTY, [], tp, distance - loss):\n yield expr, l\n yield from Mutator(self, mutations).execute(expr, request)\n\n\n def enumerateHoles(self, request, expr, k=3, return_obj=Hole):\n \"\"\"Enumerate programs with a single hole within mdl distance\"\"\"\n #TODO: make it possible to enumerate sketches with multiple holes\n def mutations(tp, loss, is_left_application=False):\n \"\"\"\n to allow applications lhs to become a hole, \n remove the condition below and ignore all the is_left_application kwds \n \"\"\"\n if not is_left_application: \n yield return_obj(), 0\n top_k = []\n for expr, l in Mutator(self, mutations).execute(expr, request):\n if len(top_k) > 0:\n i, v = min(enumerate(top_k), key=lambda x:x[1][1])\n if l > v[1]:\n if len(top_k) >= k:\n top_k[i] = (expr, l)\n else:\n top_k.append((expr, l))\n elif len(top_k) < k:\n top_k.append((expr, l))\n else:\n top_k.append((expr, l))\n return sorted(top_k, key=lambda x:-x[1])\n\n def untorch(self):\n return Grammar(self.logVariable.data.tolist()[0], \n [ (l.data.tolist()[0], t, p)\n for l, t, p in self.productions],\n continuationType=self.continuationType)\n\nclass LikelihoodSummary(object):\n '''Summarizes the terms that will be used in a likelihood calculation'''\n\n def __init__(self):\n self.uses = {}\n self.normalizers = {}\n self.constant = 0.\n\n def __str__(self):\n return \"\"\"LikelihoodSummary(constant = %f,\nuses = {%s},\nnormalizers = {%s})\"\"\" % (self.constant,\n \", \".join(\n \"%s: %d\" % (k,\n v) for k,\n v in self.uses.items()),\n \", \".join(\n \"%s: %d\" % (k,\n v) for k,\n v in self.normalizers.items()))\n\n def record(self, actual, possibles, constant=0.):\n # Variables are all normalized to be $0\n if isinstance(actual, Index):\n actual = Index(0)\n\n # Make it something that we can hash\n possibles = frozenset(sorted(possibles, key=hash))\n\n self.constant += constant\n self.uses[actual] = self.uses.get(actual, 0) + 1\n self.normalizers[possibles] = self.normalizers.get(possibles, 0) + 1\n\n def join(self, other):\n self.constant += other.constant\n for k, v in other.uses.items():\n self.uses[k] = self.uses.get(k, 0) + v\n for k, v in other.normalizers.items():\n self.normalizers[k] = self.normalizers.get(k, 0) + v\n\n def logLikelihood(self, grammar):\n return self.constant + \\\n sum(count * grammar.expression2likelihood[p] for p, count in self.uses.items()) - \\\n sum(count * lse([grammar.expression2likelihood[p] for p in ps])\n for ps, count in self.normalizers.items())\n def logLikelihood_overlyGeneral(self, grammar):\n \"\"\"Calculates log likelihood of this summary, given that the summary might refer to productions that don't occur in the grammar\"\"\"\n return self.constant + \\\n sum(count * grammar.expression2likelihood[p] for p, count in self.uses.items()) - \\\n sum(count * lse([grammar.expression2likelihood.get(p,NEGATIVEINFINITY) for p in ps])\n for ps, count in self.normalizers.items()) \n def numerator(self, grammar):\n return self.constant + \\\n sum(count * grammar.expression2likelihood[p] for p, count in self.uses.items())\n def denominator(self, grammar):\n return \\\n sum(count * lse([grammar.expression2likelihood[p] for p in ps])\n for ps, count in self.normalizers.items())\n def toUses(self):\n from collections import Counter\n \n possibleVariables = sum( count if Index(0) in ps else 0\n for ps, count in self.normalizers.items() )\n actualVariables = self.uses.get(Index(0), 0.)\n actualUses = {k: v\n for k, v in self.uses.items()\n if not k.isIndex }\n possibleUses = dict(Counter(p\n for ps, count in self.normalizers.items()\n for p_ in ps\n if not p_.isIndex\n for p in [p_]*count ))\n return Uses(possibleVariables, actualVariables,\n possibleUses, actualUses)\n\n\nclass Uses(object):\n '''Tracks uses of different grammar productions'''\n\n def __init__(self, possibleVariables=0., actualVariables=0.,\n possibleUses={}, actualUses={}):\n self.actualVariables = actualVariables\n self.possibleVariables = possibleVariables\n self.possibleUses = possibleUses\n self.actualUses = actualUses\n\n def __str__(self):\n return \"Uses(actualVariables = %f, possibleVariables = %f, actualUses = %s, possibleUses = %s)\" %\\\n (self.actualVariables, self.possibleVariables, self.actualUses, self.possibleUses)\n\n def __repr__(self): return str(self)\n\n def __mul__(self, a):\n return Uses(a * self.possibleVariables,\n a * self.actualVariables,\n {p: a * u for p, u in self.possibleUses.items()},\n {p: a * u for p, u in self.actualUses.items()})\n\n def __imul__(self, a):\n self.possibleVariables *= a\n self.actualVariables *= a\n for p in self.possibleUses:\n self.possibleUses[p] *= a\n for p in self.actualUses:\n self.actualUses[p] *= a\n return self\n\n def __rmul__(self, a):\n return self * a\n\n def __radd__(self, o):\n if o == 0:\n return self\n return self + o\n\n def __add__(self, o):\n if o == 0:\n return self\n\n def merge(x, y):\n z = x.copy()\n for k, v in y.items():\n z[k] = v + x.get(k, 0.)\n return z\n return Uses(self.possibleVariables + o.possibleVariables,\n self.actualVariables + o.actualVariables,\n merge(self.possibleUses, o.possibleUses),\n merge(self.actualUses, o.actualUses))\n\n def __iadd__(self, o):\n self.possibleVariables += o.possibleVariables\n self.actualVariables += o.actualVariables\n for k, v in o.possibleUses.items():\n self.possibleUses[k] = self.possibleUses.get(k, 0.) + v\n for k, v in o.actualUses.items():\n self.actualUses[k] = self.actualUses.get(k, 0.) + v\n return self\n\n @staticmethod\n def join(z, *weightedUses):\n \"\"\"Consumes weightedUses\"\"\"\n if not weightedUses:\n Uses.empty\n if len(weightedUses) == 1:\n return weightedUses[0][1]\n for w, u in weightedUses:\n u *= exp(w - z)\n total = Uses()\n total.possibleVariables = sum(\n u.possibleVariables for _, u in weightedUses)\n total.actualVariables = sum(u.actualVariables for _, u in weightedUses)\n total.possibleUses = defaultdict(float)\n total.actualUses = defaultdict(float)\n for _, u in weightedUses:\n for k, v in u.possibleUses.items():\n total.possibleUses[k] += v\n for k, v in u.actualUses.items():\n total.actualUses[k] += v\n return total\n\n\nUses.empty = Uses()\n\nclass ContextualGrammar:\n def __init__(self, noParent, variableParent, library):\n self.noParent, self.variableParent, self.library = noParent, variableParent, library\n\n self.productions = [(None,t,p) for _,t,p in self.noParent.productions ]\n self.primitives = [p for _,_2,p in self.productions ]\n\n self.continuationType = noParent.continuationType\n assert variableParent.continuationType == self.continuationType\n\n assert set(noParent.primitives) == set(variableParent.primitives)\n assert set(variableParent.primitives) == set(library.keys())\n for e,gs in library.items():\n assert len(gs) == len(e.infer().functionArguments())\n for g in gs:\n assert set(g.primitives) == set(library.keys())\n assert g.continuationType == self.continuationType\n\n def untorch(self):\n return ContextualGrammar(self.noParent.untorch(), self.variableParent.untorch(),\n {e: [g.untorch() for g in gs ]\n for e,gs in self.library.items() })\n\n def randomWeights(self, r):\n \"\"\"returns a new grammar with random weights drawn from r. calls `r` w/ old weight\"\"\"\n return ContextualGrammar(self.noParent.randomWeights(r),\n self.variableParent.randomWeights(r),\n {e: [g.randomWeights(r) for g in gs]\n for e,gs in self.library.items() })\n def __str__(self):\n lines = [\"No parent:\",str(self.noParent),\"\",\n \"Variable parent:\",str(self.variableParent),\"\",\n \"\"]\n for e,gs in self.library.items():\n for j,g in enumerate(gs):\n lines.extend([\"Parent %s, argument index %s\"%(e,j),\n str(g),\n \"\"])\n return \"\\n\".join(lines)\n\n def json(self):\n return {\"noParent\": self.noParent.json(),\n \"variableParent\": self.variableParent.json(),\n \"productions\": [{\"program\": str(e),\n \"arguments\": [gp.json() for gp in gs ]}\n for e,gs in self.library.items() ]}\n\n @staticmethod\n def fromGrammar(g):\n return ContextualGrammar(g, g,\n {e: [g]*len(e.infer().functionArguments())\n for e in g.primitives })\n \n\n class LS: # likelihood summary\n def __init__(self, owner):\n self.noParent = LikelihoodSummary()\n self.variableParent = LikelihoodSummary()\n self.library = {e: [LikelihoodSummary() for _ in gs] for e,gs in owner.library.items() }\n\n def record(self, parent, parentIndex, actual, possibles, constant):\n if parent is None: ls = self.noParent\n elif parent.isIndex: ls = self.variableParent\n else: ls = self.library[parent][parentIndex]\n ls.record(actual, possibles, constant=constant)\n\n def join(self, other):\n self.noParent.join(other.noParent)\n self.variableParent.join(other.variableParent)\n for e,gs in self.library.items():\n for g1,g2 in zip(gs, other.library[e]):\n g1.join(g2)\n\n def logLikelihood(self, owner):\n return self.noParent.logLikelihood(owner.noParent) + \\\n self.variableParent.logLikelihood(owner.variableParent) + \\\n sum(r.logLikelihood(g)\n for e, rs in self.library.items()\n for r,g in zip(rs, owner.library[e]) ) \n def numerator(self, owner):\n return self.noParent.numerator(owner.noParent) + \\\n self.variableParent.numerator(owner.variableParent) + \\\n sum(r.numerator(g)\n for e, rs in self.library.items()\n for r,g in zip(rs, owner.library[e]) ) \n def denominator(self, owner):\n return self.noParent.denominator(owner.noParent) + \\\n self.variableParent.denominator(owner.variableParent) + \\\n sum(r.denominator(g)\n for e, rs in self.library.items()\n for r,g in zip(rs, owner.library[e]) ) \n\n def likelihoodSummary(self, parent, parentIndex, context, environment, request, expression):\n if request.isArrow():\n assert expression.isAbstraction\n return self.likelihoodSummary(parent, parentIndex,\n context,\n [request.arguments[0]] + environment,\n request.arguments[1],\n expression.body)\n if parent is None: g = self.noParent\n elif parent.isIndex: g = self.variableParent\n else: g = self.library[parent][parentIndex] \n candidates = g.buildCandidates(request, context, environment,\n normalize=False, returnTable=True)\n\n # A list of everything that would have been possible to use here\n possibles = [p for p in candidates.keys() if not p.isIndex]\n numberOfVariables = sum(p.isIndex for p in candidates.keys())\n if numberOfVariables > 0:\n possibles += [Index(0)]\n\n f, xs = expression.applicationParse()\n\n assert f in candidates\n\n thisSummary = self.LS(self)\n thisSummary.record(parent, parentIndex,\n f, possibles,\n constant= -math.log(numberOfVariables) if f.isIndex else 0)\n\n _, tp, context = candidates[f]\n argumentTypes = tp.functionArguments()\n assert len(xs) == len(argumentTypes)\n\n for i, (argumentType, argument) in enumerate(zip(argumentTypes, xs)):\n argumentType = argumentType.apply(context)\n context, newSummary = self.likelihoodSummary(f, i,\n context, environment, argumentType, argument)\n thisSummary.join(newSummary)\n\n return context, thisSummary\n\n def closedLikelihoodSummary(self, request, expression):\n return self.likelihoodSummary(None,None,\n Context.EMPTY,[],\n request, expression)[1]\n\n def logLikelihood(self, request, expression):\n return self.closedLikelihoodSummary(request, expression).logLikelihood(self)\n\n def sample(self, request, maximumDepth=8, maxAttempts=None):\n attempts = 0\n while True:\n try:\n _, e = self._sample(None, None, Context.EMPTY, [], request, maximumDepth)\n return e\n except NoCandidates:\n if maxAttempts is not None:\n attempts += 1\n if attempts > maxAttempts: return None\n continue\n \n def _sample(self, parent, parentIndex, context, environment, request, maximumDepth):\n if request.isArrow():\n context, body = self._sample(parent, parentIndex, context,\n [request.arguments[0]] + environment,\n request.arguments[1],\n maximumDepth)\n return context, Abstraction(body)\n if parent is None: g = self.noParent\n elif parent.isIndex: g = self.variableParent\n else: g = self.library[parent][parentIndex]\n candidates = g.buildCandidates(request, context, environment,\n normalize=True, returnProbabilities=True,\n mustBeLeaf=(maximumDepth <= 1))\n newType, chosenPrimitive, context = sampleDistribution(candidates)\n\n xs = newType.functionArguments()\n returnValue = chosenPrimitive\n\n for j,x in enumerate(xs):\n x = x.apply(context)\n context, x = self._sample(chosenPrimitive, j, context, environment, x, maximumDepth - 1)\n returnValue = Application(returnValue, x)\n \n return context, returnValue\n\n def expectedUsesMonteCarlo(self, request, debug=None):\n import numpy as np\n n = 0\n u = [0.]*len(self.primitives)\n primitives = list(sorted(self.primitives, key=str))\n noInventions = all( not p.isInvented for p in primitives )\n primitive2index = {primitive: i\n for i, primitive in enumerate(primitives)\n if primitive.isInvented or noInventions }\n eprint(primitive2index)\n ns = 10000\n with timing(f\"calculated expected uses using Monte Carlo simulation w/ {ns} samples\"):\n for _ in range(ns):\n p = self.sample(request, maxAttempts=0)\n if p is None: continue\n n += 1\n if debug and n < 10:\n eprint(debug, p)\n for _, child in p.walk():\n if child not in primitive2index: continue\n u[primitive2index[child]] += 1.0\n u = np.array(u)/n\n if debug:\n eprint(f\"Got {n} samples. Feature vector:\\n{u}\")\n eprint(f\"Likely used primitives: {[p for p,i in primitive2index.items() if u[i] > 0.5]}\")\n eprint(f\"Likely used primitive indices: {[i for p,i in primitive2index.items() if u[i] > 0.5]}\")\n return u\n\n def featureVector(self, _=None, requests=None, onlyInventions=True, normalize=True):\n \"\"\"\n Returns the probabilities licensed by the type system.\n This is like the grammar productions, but with irrelevant junk removed.\n Its intended use case is for clustering; it should be strictly better than the raw transition matrix.\n \"\"\"\n if requests is None:\n if self.continuationType: requests = {self.continuationType}\n elif any( 'REAL' == str(p) for p in self.primitives ): requests = set()\n elif any( 'STRING' == str(p) for p in self.primitives ): requests = {tlist(tcharacter)}\n else: requests = set()\n requests = {r.returns() for r in requests}\n features = []\n logWeights = []\n for l,t,p in sorted(self.noParent.productions,\n key=lambda z: str(z[2])):\n if onlyInventions and not p.isInvented: continue\n if any( canUnify(r, t.returns()) for r in requests ) or len(requests) == 0:\n logWeights.append(l)\n features.append(logWeights)\n for parent in sorted(self.primitives, key=str):\n if onlyInventions and not parent.isInvented: continue\n if parent not in self.library: continue\n argumentTypes = parent.infer().functionArguments()\n for j,g in enumerate(self.library[parent]):\n argumentType = argumentTypes[j]\n logWeights = []\n for l,t,p in sorted(g.productions,\n key=lambda z: str(z[2])):\n if onlyInventions and not p.isInvented: continue\n if canUnify(argumentType.returns(), t.returns()):\n logWeights.append(l)\n features.append(logWeights)\n\n if normalize:\n features = [ [math.exp(w - z) for w in lw ]\n for lw in features\n if lw\n for z in [lse(lw)] ]\n import numpy as np\n return np.array([f\n for lw in features\n for f in lw])\n\n def enumeration(self,context,environment,request,upperBound,\n parent=None, parentIndex=None,\n maximumDepth=20,\n lowerBound=0.):\n '''Enumerates all programs whose MDL satisfies: lowerBound <= MDL < upperBound'''\n if upperBound < 0 or maximumDepth == 1:\n return\n\n if request.isArrow():\n v = request.arguments[0]\n for l, newContext, b in self.enumeration(context, [v] + environment,\n request.arguments[1],\n parent=parent, parentIndex=parentIndex,\n upperBound=upperBound,\n lowerBound=lowerBound,\n maximumDepth=maximumDepth):\n yield l, newContext, Abstraction(b)\n else:\n if parent is None: g = self.noParent\n elif parent.isIndex: g = self.variableParent\n else: g = self.library[parent][parentIndex]\n\n candidates = g.buildCandidates(request, context, environment,\n normalize=True)\n\n for l, t, p, newContext in candidates:\n mdl = -l\n if not (mdl < upperBound):\n continue\n\n xs = t.functionArguments()\n for aL, aK, application in\\\n self.enumerateApplication(newContext, environment, p, xs,\n parent=p,\n upperBound=upperBound + l,\n lowerBound=lowerBound + l,\n maximumDepth=maximumDepth - 1):\n yield aL + l, aK, application\n\n def enumerateApplication(self, context, environment,\n function, argumentRequests,\n # Upper bound on the description length of all of\n # the arguments\n upperBound,\n # Lower bound on the description length of all of\n # the arguments\n lowerBound=0.,\n maximumDepth=20,\n parent=None, \n originalFunction=None,\n argumentIndex=0):\n assert parent is not None\n if upperBound < 0. or maximumDepth == 1:\n return\n if originalFunction is None:\n originalFunction = function\n\n if argumentRequests == []:\n if lowerBound <= 0. and 0. < upperBound:\n yield 0., context, function\n else:\n return\n else:\n argRequest = argumentRequests[0].apply(context)\n laterRequests = argumentRequests[1:]\n for argL, newContext, arg in self.enumeration(context, environment, argRequest,\n parent=parent, parentIndex=argumentIndex,\n upperBound=upperBound,\n lowerBound=0.,\n maximumDepth=maximumDepth):\n if violatesSymmetry(originalFunction, arg, argumentIndex):\n continue\n\n newFunction = Application(function, arg)\n for resultL, resultK, result in self.enumerateApplication(newContext, environment, newFunction,\n laterRequests,\n parent=parent,\n upperBound=upperBound + argL,\n lowerBound=lowerBound + argL,\n maximumDepth=maximumDepth,\n originalFunction=originalFunction,\n argumentIndex=argumentIndex + 1):\n yield resultL + argL, resultK, result\n \n \n\n\ndef violatesSymmetry(f, x, argumentIndex):\n if not f.isPrimitive:\n return False\n while x.isApplication:\n x = x.f\n if not x.isPrimitive:\n return False\n f = f.name\n x = x.name\n if f == \"car\":\n return x == \"cons\" or x == \"empty\"\n if f == \"cdr\":\n return x == \"cons\" or x == \"empty\"\n if f == \"+\":\n return x == \"0\" or (argumentIndex == 1 and x == \"+\")\n if f == \"-\":\n return argumentIndex == 1 and x == \"0\"\n if f == \"empty?\":\n return x == \"cons\" or x == \"empty\"\n if f == \"zero?\":\n return x == \"0\" or x == \"1\"\n if f == \"index\" or f == \"map\" or f == \"zip\":\n return x == \"empty\"\n if f == \"range\":\n return x == \"0\"\n if f == \"fold\":\n return argumentIndex == 1 and x == \"empty\"\n return False\n\ndef batchLikelihood(jobs):\n \"\"\"Takes as input a set of (program, request, grammar) and returns a dictionary mapping each of these to its likelihood under the grammar\"\"\"\n superGrammar = Grammar.uniform(list({p for _1,_2,g in jobs for p in g.primitives}),\n continuationType=list(jobs)[0][-1].continuationType)\n programsAndRequests = {(program, request)\n for program, request, grammar in jobs}\n with timing(f\"Calculated {len(programsAndRequests)} likelihood summaries\"):\n summary = {(program, request): superGrammar.closedLikelihoodSummary(request, program)\n for program, request in programsAndRequests}\n with timing(f\"Calculated log likelihoods from summaries\"):\n response = {}\n for program, request, grammar in jobs:\n fast = summary[(program, request)].logLikelihood_overlyGeneral(grammar)\n if False: # debugging\n slow = grammar.logLikelihood(request, program)\n print(program)\n eprint(grammar.closedLikelihoodSummary(request, program))\n eprint(superGrammar.closedLikelihoodSummary(request, program))\n print()\n assert abs(fast - slow) < 0.0001\n response[(program, request, grammar)] = fast\n return response\n\nif __name__ == \"__main__\":\n from dreamcoder.domains.arithmetic.arithmeticPrimitives import *\n g = ContextualGrammar.fromGrammar(Grammar.uniform([k0,k1,addition, subtraction]))\n g = g.randomWeights(lambda *a: random.random())\n #p = Program.parse(\"(lambda (+ 1 $0))\")\n request = arrow(tint,tint)\n for ll,_,p in g.enumeration(Context.EMPTY,[],request,\n 12.):\n ll_ = g.logLikelihood(request,p)\n print(ll,p,ll_)\n d = abs(ll - ll_)\n assert d < 0.0001\n" ]
[ [ "numpy.array" ] ]
ahmetustun/adapter-transformers
[ "0c914b1592a6e4553957499c118abcd1e3aa9c00" ]
[ "tests/test_adapter_training.py" ]
[ "import copy\nimport unittest\n\nimport torch\n\nfrom transformers import (\n AutoModelForSequenceClassification,\n AutoModelWithHeads,\n AutoTokenizer,\n GlueDataset,\n GlueDataTrainingArguments,\n Trainer,\n TrainingArguments,\n)\nfrom transformers.testing_utils import require_torch\n\n\ndef filter_parameters(model, filter_string):\n return {k: v for (k, v) in model.named_parameters() if filter_string in k}\n\n\n@require_torch\nclass AdapterTrainingTest(unittest.TestCase):\n\n model_names = [\"bert-base-uncased\"]\n\n def test_train_single_adapter(self):\n for model_name in self.model_names:\n with self.subTest(model_name=model_name):\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model = AutoModelWithHeads.from_pretrained(model_name)\n\n # add two adapters: one will be trained and the other should be frozen\n model.add_adapter(\"mrpc\", \"text_task\")\n model.add_adapter(\"dummy\", \"text_task\")\n model.add_classification_head(\"mrpc\")\n\n self.assertIn(\"mrpc\", model.config.adapters.adapters)\n self.assertIn(\"dummy\", model.config.adapters.adapters)\n\n # train the mrpc adapter -> should be activated & unfreezed\n model.train_adapter(\"mrpc\")\n self.assertEqual([[\"mrpc\"]], model.active_adapters)\n\n # all weights of the adapter should be activated\n for k, v in filter_parameters(model, \"text_task_adapters.mrpc\").items():\n self.assertTrue(v.requires_grad, k)\n # all weights of the adapter not used for training should be freezed\n for k, v in filter_parameters(model, \"text_task_adapters.dummy\").items():\n self.assertFalse(v.requires_grad, k)\n # weights of the model should be freezed (check on some examples)\n for k, v in filter_parameters(model, \"encoder.layer.0.attention\").items():\n self.assertFalse(v.requires_grad, k)\n\n state_dict_pre = copy.deepcopy(model.state_dict())\n\n # setup dataset\n data_args = GlueDataTrainingArguments(\n task_name=\"mrpc\", data_dir=\"./tests/fixtures/tests_samples/MRPC\", overwrite_cache=True\n )\n train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"train\")\n training_args = TrainingArguments(\n output_dir=\"./examples\", do_train=True, learning_rate=0.1, max_steps=5, no_cuda=True\n )\n\n # evaluate\n trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset,)\n trainer.train()\n\n for ((k1, v1), (k2, v2)) in zip(state_dict_pre.items(), model.state_dict().items()):\n if \"mrpc\" in k1:\n self.assertFalse(torch.equal(v1, v2))\n else:\n self.assertTrue(torch.equal(v1, v2))\n\n def test_train_adapter_fusion(self):\n for model_name in self.model_names:\n with self.subTest(model_name=model_name):\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model = AutoModelForSequenceClassification.from_pretrained(model_name)\n\n # load the adapters to be fused\n model.load_adapter(\"sts/mrpc@ukp\", with_head=False)\n model.load_adapter(\"sts/qqp@ukp\", with_head=False)\n model.load_adapter(\"sts/sts-b@ukp\", with_head=False)\n\n self.assertIn(\"mrpc\", model.config.adapters.adapters)\n self.assertIn(\"qqp\", model.config.adapters.adapters)\n self.assertIn(\"sts-b\", model.config.adapters.adapters)\n\n # setup fusion\n adapter_setup = [[\"mrpc\", \"qqp\", \"sts-b\"]]\n model.add_fusion(adapter_setup[0])\n model.train_fusion(adapter_setup[0])\n model.set_active_adapters(adapter_setup)\n self.assertEqual(adapter_setup, model.active_adapters)\n\n # all weights of the adapters should be frozen (test for one)\n for k, v in filter_parameters(model, \"text_task_adapters.mrpc\").items():\n self.assertFalse(v.requires_grad, k)\n # all weights of the fusion layer should be activated\n for k, v in filter_parameters(model, \"adapter_fusion_layer\").items():\n self.assertTrue(v.requires_grad, k)\n # weights of the model should be freezed (check on some examples)\n for k, v in filter_parameters(model, \"encoder.layer.0.attention\").items():\n self.assertFalse(v.requires_grad, k)\n\n state_dict_pre = copy.deepcopy(model.state_dict())\n\n # setup dataset\n data_args = GlueDataTrainingArguments(\n task_name=\"mrpc\", data_dir=\"./tests/fixtures/tests_samples/MRPC\", overwrite_cache=True\n )\n train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"train\")\n training_args = TrainingArguments(\n output_dir=\"./examples\", do_train=True, learning_rate=0.1, max_steps=5, no_cuda=True\n )\n\n # evaluate\n trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset,)\n trainer.train()\n\n for ((k1, v1), (k2, v2)) in zip(state_dict_pre.items(), model.state_dict().items()):\n if \"adapter_fusion_layer\" in k1 or \"classifier\" in k1:\n self.assertFalse(torch.equal(v1, v2), k1)\n else:\n self.assertTrue(torch.equal(v1, v2), k1)\n" ]
[ [ "torch.equal" ] ]
techalchemy/airflow-postgres-plugin
[ "57b8d4c8a2d5463a226e8a720e6c84108572a056" ]
[ "src/airflow_postgres_plugin/hooks/postgres_hook.py" ]
[ "# -*- coding=utf-8 -*-\n\nimport atexit\nimport contextlib\nimport csv\nimport datetime\nimport decimal\nimport hashlib\nimport io\nimport os\nfrom functools import reduce\nfrom typing import IO, Any, Dict, Generator, Iterator, List, Optional, Type, Union\n\nimport dateutil.utils\nimport pandas\nimport postgres_copy\nimport psycopg2\nimport psycopg2.sql\nimport sqlalchemy\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.dbapi_hook import DbApiHook\nfrom sqlalchemy.dialects.postgresql import insert\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.schema import MetaData\n\nTBasic = Union[str, int, bool]\n\n\nclass PostgresHook(DbApiHook):\n\n conn_name_attr: str = \"postgres_conn_id\"\n default_conn_name: str = \"postgres_default\"\n supports_autocommit: bool = True\n\n def __init__(\n self, postgres_conn_id: str, database: str = None, schema: str = None\n ) -> None:\n super(PostgresHook, self).__init__()\n self.postgres_conn_id = postgres_conn_id\n self._engine: sqlalchemy.engine.Engine = None\n self.database = database\n self.schema = schema\n\n @staticmethod\n def _serialize_cell(cell, conn):\n return cell\n\n @property\n def engine(self):\n if not self._engine:\n self._engine = self.get_sqlalchemy_engine()\n atexit.register(self._engine.dispose)\n return self._engine\n\n def get_conn(self) -> psycopg2._psycopg.connection:\n connection = self.get_connection(self.postgres_conn_id)\n if connection.schema and not self.database:\n self.database = connection.schema\n connection_args: Dict[str, TBasic]\n connection_args = {\n \"host\": connection.host,\n \"user\": connection.login,\n \"password\": connection.password,\n \"dbname\": self.database or connection.schema,\n \"port\": connection.port,\n }\n schema = next(\n iter([schema for schema in [self.database, connection.schema]]), None\n )\n if isinstance(schema, str):\n connection_args[\"options\"] = f\"-c search_path={schema}\"\n\n for (key, value) in connection.extra_dejson.items():\n if key in (\n \"sslmode\",\n \"sslcert\",\n \"sslkey\",\n \"sslrootkey\",\n \"sslcrl\",\n \"application_name\",\n \"keepalives_idle\",\n ):\n connection_args[key] = value\n\n self.log.info(f\"establishing connection to postgres at {connection.host!r}\")\n return psycopg2.connect(**connection_args)\n\n def get_sqlalchemy_sessionmaker(self) -> Type[Session]:\n self.log.info(\n f\"buliding sqlalchemy sessionmaker instance with engine {self.engine!r}\"\n )\n return scoped_session(sessionmaker(bind=self.engine, expire_on_commit=False))\n\n def get_sqlalchemy_session(self) -> Session:\n self.log.info(f\"building sqlalchemy session\")\n return self.get_sqlalchemy_sessionmaker()()\n\n def get_sqlalchemy_metadata(self, schema: str = None) -> MetaData:\n if not schema:\n schema = self.schema\n self.log.info(\n f\"building sqlalchemy metadata with engine {self.engine!r} \"\n f\"with schema {schema!r}\"\n )\n return MetaData(bind=self.engine, schema=schema)\n\n def get_sqlalchemy_table(self, name: str, schema: str = None) -> sqlalchemy.Table:\n self.log.info(f\"getting introspected sqlalchemy table {name!r}\")\n return sqlalchemy.Table(\n name,\n self.get_sqlalchemy_metadata(schema=schema),\n autoload=True,\n autoload_with=self.engine,\n )\n\n @contextlib.contextmanager\n def sqlalchemy_session(self) -> Generator[Session, None, None]:\n with contextlib.closing(self.get_sqlalchemy_session()) as session:\n self.log.info(f\"entering sqlalchemy session context with session {session!r}\")\n try:\n yield session\n if not self.get_autocommit(session):\n session.commit()\n except Exception as exc:\n self.log.exception(\n f\"exception occured during session {session!r}: {exc!r}\"\n )\n session.rollback()\n\n def duplicate_table_to_temp_table(\n self,\n from_table: str,\n temp_name: str = None,\n schema: str = None,\n include_constraints: bool = False,\n ):\n if not temp_name:\n random_hash = hashlib.sha256(os.urandom(128)).hexdigest()[:4]\n date = dateutil.utils.today().strftime(\"%Y%m%d\")\n temp_name = f\"_temp_{from_table}_{random_hash}_{date}\"\n if not schema:\n schema = self.schema\n self.log.debug(\n f\"temporary table requested, creating temporary table at {temp_name!r}\"\n )\n table = self.duplicate_table(\n from_table, temp_name, include_constraints=include_constraints, schema=schema\n )\n return table\n\n def duplicate_table(\n self,\n name: str,\n duplicate_name: str,\n include_constraints: bool = False,\n schema: str = None,\n ) -> sqlalchemy.Table:\n self.log.info(f\"duplicating table {name!r} to {duplicate_name!r}\")\n metadata = self.get_sqlalchemy_metadata(schema=schema)\n original = self.get_sqlalchemy_table(name, schema=schema)\n columns = [_.copy() for _ in original.columns] + (\n [_.copy() for _ in original.constraints] if include_constraints else []\n )\n duplicate = sqlalchemy.Table(duplicate_name, metadata, *columns, schema=schema)\n try:\n metadata.create_all(tables=[duplicate])\n except Exception as exc:\n raise AirflowException(f\"failed to duplicate table: {duplicate} => {exc!r}\")\n self.log.info(f\"Successfully duplicated table {duplicate}\")\n return duplicate\n\n def upsert(\n self,\n from_table: sqlalchemy.Table,\n to_table: sqlalchemy.Table,\n extra_constraints: List[str] = None,\n **upsert_params,\n ):\n insert_statement: sqlalchemy.sql.dml.Insert = insert(to_table).from_select(\n from_table.columns.keys(), from_table.select()\n )\n inspected: sqlalchemy.Table = sqlalchemy.inspect(to_table)\n primary_keys: List[str] = [_.name for _ in inspected.primary_key]\n if isinstance(extra_constraints, list) and len(extra_constraints) > 0:\n primary_keys = extra_constraints\n upsert_statement: sqlalchemy.sql.dml.Insert = insert_statement.on_conflict_do_update(\n index_elements=primary_keys,\n set_={\n column.name: getattr(insert_statement.excluded, column.name)\n for column in inspected.columns\n },\n )\n with self.sqlalchemy_session() as session:\n upsert_text = \"\"\n if \"start_datetime\" in upsert_params:\n upsert_text += \"SET polling_interval.start_datetime TO :start_datetime;\"\n if \"end_datetime\" in upsert_params:\n upsert_text += \"\\nSET polling_interval.end_datetime TO :end_datetime;\"\n if upsert_text:\n session.execute(sqlalchemy.text(upsert_text), upsert_params)\n session.execute(sqlalchemy.text(str(upsert_statement)))\n\n def load(self, table: str, filepath: str) -> None:\n self.log.info(f\"loading table {table!r} with content at {filepath!r}\")\n with open(filepath, \"r\") as fp:\n reader = csv.reader(fp)\n row = next(iter(reader), None)\n if not row:\n self.log.warning(\n f\"failed to load data from file {filepath!r}: File is empty.\"\n )\n return None\n columns = tuple(row[:])\n fp.seek(0)\n self.log.info(f\"found columns: {columns}\")\n target_table = self.get_sqlalchemy_table(table)\n self.log.info(f\"writing to columns: {target_table.columns.keys()}\")\n\n postgres_copy.copy_from(\n fp,\n self.get_sqlalchemy_table(table),\n self.engine,\n format=\"csv\",\n header=True,\n columns=columns,\n )\n\n def get_table_if_exists(\n self, table: str, schema: str = \"public\", engine: sqlalchemy.engine.Engine = None\n ) -> Optional[sqlalchemy.Table]:\n self.log.info(\"checking database for destination table...\")\n if not self.engine.has_table(table, schema=schema):\n self.log.warn(f\"no such table for loading data: {schema}.{table}\")\n return None\n self.log.info(f\"loading table {table!r}\")\n sqlalchemy_table = self.get_sqlalchemy_table(table, schema=schema)\n return sqlalchemy_table\n\n def get_sqlalchemy_col_types(\n self, table: sqlalchemy.Table, exclude: List[str] = None\n ) -> Dict[str, Type]:\n if exclude is None:\n exclude = []\n elif isinstance(exclude, str):\n exclude = [exclude]\n return {c.name: c.type for c in table.columns if c.name not in exclude}\n\n def get_sqlalchemy_table_python_types(\n self, table: sqlalchemy.Table, exclude: List[str] = None\n ) -> Dict[str, Type]:\n if exclude is None:\n exclude = []\n elif isinstance(exclude, str):\n exclude = [exclude]\n column_types = self.get_sqlalchemy_col_types(table, exclude=exclude)\n conversions = {\n name: c.python_type if c.python_type is not decimal.Decimal else \"float64\"\n for name, c in column_types.items()\n if c.python_type not in (datetime.date, datetime.datetime)\n }\n return conversions\n\n def load_df(\n self,\n df: pandas.DataFrame,\n table: str,\n schema: str = \"public\",\n chunksize: int = 10000,\n include_index: bool = False,\n engine: Optional[sqlalchemy.engine.Engine] = None,\n conn: Optional[psycopg2.extensions.connection] = None,\n col_type_map: Dict[str, sqlalchemy.sql.type_api.TypeEngine] = None,\n create_tables: bool = False,\n column_map: Dict[str, str] = None,\n ) -> Optional[str]:\n if not schema:\n schema = self.schema\n if not engine and not conn:\n engine = self.engine\n if engine and not create_tables and not engine.has_table(table, schema=schema):\n return None\n if not col_type_map:\n col_type_map = self.get_sqlalchemy_col_types(\n table=self.get_table_if_exists(table, schema=schema, engine=self.engine),\n exclude=[\"id\"],\n )\n # sql_args = {\n # \"name\": table,\n # \"con\": engine or conn,\n # \"schema\": schema,\n # \"if_exists\": \"append\",\n # \"chunksize\": chunksize,\n # \"method\": \"multi\",\n # \"index\": include_index,\n # \"dtype\": col_type_map,\n # }\n # df.to_sql(**sql_args)\n output = io.StringIO()\n df.to_csv(\n output,\n header=False,\n index=False,\n quoting=csv.QUOTE_MINIMAL,\n chunksize=chunksize,\n )\n if column_map:\n cols = [column_map.get(col, col) for col in df.columns if col != \"id\"]\n else:\n cols = [col for col in df.columns if col != \"id\"]\n output.seek(0)\n if not conn:\n with contextlib.closing(self.get_conn()) as conn:\n self.set_autocommit(conn, True)\n self.bulk_load(table, output, schema=schema, conn=conn, columns=cols)\n else:\n self.bulk_load(table, output, schema=schema, conn=conn, columns=cols)\n return table\n\n def stream_csv_to_df(\n self,\n csv_file: Union[IO, str],\n schema: str = \"public\",\n sep: str = \",\",\n compression: str = \"infer\",\n chunksize: int = None,\n table: Optional[str] = None,\n quoting: int = csv.QUOTE_MINIMAL,\n include_index: bool = False,\n col_type_map: Dict[str, Type] = None,\n columns: List[str] = None,\n ) -> Generator[None, None, pandas.DataFrame]:\n read_kwargs = {\n \"sep\": sep,\n \"parse_dates\": True,\n \"infer_datetime_format\": True,\n \"compression\": compression,\n \"chunksize\": chunksize,\n \"quoting\": quoting,\n \"error_bad_lines\": False,\n \"warn_bad_lines\": True,\n \"na_filter\": False,\n }\n if columns:\n read_kwargs[\"usecols\"] = columns\n if table is not None and not col_type_map:\n table_instance = self.get_table_if_exists(table, schema=schema)\n col_type_map = self.get_sqlalchemy_table_python_types(\n table_instance, exclude=[\"id\"]\n )\n if col_type_map:\n read_kwargs[\"dtype\"] = {k.lower(): v for k, v in col_type_map.items()}\n df_stream = pandas.read_csv(csv_file, **read_kwargs)\n return df_stream\n\n def load_pandas(\n self,\n table: str,\n schema: str = \"public\",\n sep: str = \",\",\n compression: str = \"infer\",\n chunksize: int = 10000,\n filepath: Union[IO, str] = None,\n quoting: int = csv.QUOTE_MINIMAL,\n include_index: bool = False,\n templates_dict: Dict[str, str] = None,\n create_tables: bool = False,\n ) -> Optional[str]:\n target_table = self.get_table_if_exists(table, schema=schema, engine=self.engine)\n if target_table is None and not create_tables:\n return None\n assert target_table is not None\n # temp_table = self.duplicate_table_to_temp_table(table, schema=schema)\n col_type_map = self.get_sqlalchemy_col_types(target_table, exclude=[\"id\"])\n csv_python_types = self.get_sqlalchemy_table_python_types(\n target_table, exclude=[\"id\"]\n )\n columns = list(col_type_map.keys())\n col_map = {k.lower(): k for k in col_type_map.keys()}\n df_stream_kwargs = {\n \"schema\": schema,\n \"sep\": sep,\n \"compression\": compression,\n \"chunksize\": chunksize,\n \"quoting\": quoting,\n \"include_index\": include_index,\n \"col_type_map\": csv_python_types,\n \"columns\": [c.lower() for c in columns],\n }\n with contextlib.closing(self.get_conn()) as conn:\n self.set_autocommit(conn, True)\n try:\n for df in self.stream_csv_to_df(filepath, **df_stream_kwargs):\n self.load_df( # type: ignore\n df,\n table=target_table.name,\n conn=conn,\n chunksize=chunksize,\n include_index=include_index,\n schema=schema,\n col_type_map=col_type_map,\n create_tables=create_tables,\n column_map=col_map,\n )\n except Exception as exc:\n raise AirflowException(\n f\"Failed loading dataframes for table {target_table}:\\n\" f\"{exc!r}\"\n )\n return target_table.name\n\n def dump(self, table: str, filepath: str) -> None:\n self.log.info(f\"dumping content of table {table!r} to {filepath!r}\")\n with self.sqlalchemy_session() as session:\n with open(filepath, \"w\") as fp:\n postgres_copy.copy_to(\n session.query(self.get_sqlalchemy_table(table)),\n fp,\n self.engine,\n format=\"csv\",\n header=True,\n )\n\n def query(\n self,\n sql: str,\n parameters: List[str],\n include_headers: bool = True,\n returns_rows: bool = True,\n ) -> Generator[Optional[List[Any]], None, None]:\n if parameters is None:\n parameters = []\n\n if len(parameters) > 0:\n sql = sql % tuple(parameters)\n\n self.log.info(f\"executing query {sql!r}\")\n sql_text = sqlalchemy.text(sql)\n with self.sqlalchemy_session() as session:\n try:\n results = session.execute(sql_text)\n except Exception as exc:\n raise AirflowException(\n f\"failed to execute query: {sql_text} with exception: {exc!r}\"\n )\n if include_headers:\n yield results.keys()\n if not returns_rows:\n return None\n for row in results:\n yield row\n\n def export(\n self,\n sql: str,\n filepath: str,\n parameters: List[str],\n include_headers: bool = True,\n **kwargs,\n ) -> str:\n self.log.info(f\"writing results of sql {sql!r} to {filepath!r}\")\n with open(filepath, \"w\") as fp:\n writer = csv.writer(fp)\n for result in self.query(\n sql, parameters=parameters, include_headers=include_headers\n ):\n if result is not None:\n writer.writerow(result)\n return filepath\n\n @contextlib.contextmanager\n def closed_conn_if_created(self, conn=None):\n created = False\n if not conn:\n created = True\n conn = self.get_conn()\n try:\n yield conn\n finally:\n if created:\n conn.close()\n\n @contextlib.contextmanager\n def open_buffer_from_file(self, filename, open=open):\n is_buffer = False\n if isinstance(filename, io.StringIO):\n is_buffer = True\n f = filename\n else:\n f = open(filename, \"r+\")\n try:\n yield f\n finally:\n if not is_buffer and not f.closed:\n f.close()\n\n def copy_expert(self, sql, filename, open=open, conn=None):\n \"\"\"\n Executes SQL using psycopg2 copy_expert method.\n Necessary to execute COPY command without access to a superuser.\n\n Note: if this method is called with a \"COPY FROM\" statement and\n the specified input file does not exist, it creates an empty\n file and no data is loaded, but the operation succeeds.\n So if users want to be aware when the input file does not exist,\n they have to check its existence by themselves.\n \"\"\"\n if not (\n isinstance(filename, (io.TextIOBase, StringIteratorIO))\n or os.path.isfile(filename)\n ):\n with open(filename, \"w\"):\n pass\n\n with self.open_buffer_from_file(filename, open=open) as f:\n with self.closed_conn_if_created(conn=conn) as conn:\n with contextlib.closing(conn.cursor()) as cur:\n cur.copy_expert(sql, f)\n f.truncate(f.tell())\n conn.commit()\n\n def bulk_load(self, table, tmp_file, schema=None, conn=None, columns=None):\n \"\"\"\n Loads a tab-delimited file into a database table\n \"\"\"\n if schema:\n table = f\"{schema}.{table}\"\n if not columns:\n sql = \"COPY {table} FROM STDIN\".format(table=table)\n else:\n columns = [f'\"{column}\"' for column in columns]\n sql = \"COPY {table} ({columns}) FROM STDIN\".format(\n table=table, columns=\", \".join(columns)\n )\n sql = f\"{sql} CSV\"\n\n self.copy_expert(sql, tmp_file, conn=conn)\n\n def bulk_dump(self, table, tmp_file):\n \"\"\"\n Dumps a database table into a tab-delimited file\n \"\"\"\n self.copy_expert(\"COPY {table} TO STDOUT\".format(table=table), tmp_file)\n\n\nclass StringIteratorIO(io.TextIOBase):\n def __init__(self, iter: Iterator[str]):\n self._iter = iter\n self._buff = \"\"\n\n def readable(self) -> bool:\n return True\n\n def _read1(self, n: Optional[int] = None) -> str:\n while not self._buff:\n try:\n self._buff = next(self._iter)\n except StopIteration:\n break\n ret = self._buff[:n]\n self._buff = self._buff[len(ret) :]\n return ret\n\n def read(self, n: Optional[int] = None) -> str:\n line = []\n if n is None or n < 0:\n while True:\n m = self._read1()\n if not m:\n break\n line.append(m)\n else:\n while n > 0:\n m = self._read1(n)\n if not m:\n break\n n -= len(m)\n line.append(m)\n return \"\".join(line)\n" ]
[ [ "pandas.read_csv" ] ]
tchewik/isanlp_rst
[ "459864b3daeeb702acf5e65543181068439ce12c" ]
[ "src/isanlp_rst/greedy_rst_parser.py" ]
[ "import numpy as np\nimport pandas as pd\nimport sys\n\nfrom isanlp.annotation_rst import DiscourseUnit\n\n\nclass GreedyRSTParser:\n def __init__(self, tree_predictor, confidence_threshold=0.05, _same_sentence_bonus=0.):\n \"\"\"\n :param RSTTreePredictor tree_predictor:\n :param float confidence_threshold: minimum relation probability to append the pair into the tree\n \"\"\"\n self.tree_predictor = tree_predictor\n self.confidence_threshold = confidence_threshold\n self._same_sentence_bonus = _same_sentence_bonus\n\n def __call__(self, edus, annot_text, annot_tokens, annot_sentences, annot_lemma, annot_morph, annot_postag,\n annot_syntax_dep_tree, genre=None):\n \"\"\"\n :param list edus: DiscourseUnit\n :param str annot_text: original text\n :param list annot_tokens: isanlp.annotation.Token\n :param list annot_sentences: isanlp.annotation.Sentence\n :param list annot_postag: lists of str for each sentence\n :param annot_lemma: lists of str for each sentence\n :param annot_syntax_dep_tree: list of isanlp.annotation.WordSynt for each sentence\n :return: list of DiscourseUnit containing each extracted tree\n \"\"\"\n\n def to_merge(_scores):\n return np.argmax(np.array(_scores))\n\n self.tree_predictor.genre = genre\n\n nodes = edus\n max_id = self._get_max_id(nodes)\n\n # initialize scores\n features = self.tree_predictor.initialize_features(nodes,\n annot_text, annot_tokens,\n annot_sentences,\n annot_lemma, annot_morph, annot_postag,\n annot_syntax_dep_tree)\n\n scores = self._get_proba(features)\n\n while len(scores) > 1 and any([score > self.confidence_threshold for score in scores]):\n # select two nodes to merge\n j = to_merge(scores) # position of the pair in list\n if j + 1 >= len(nodes):\n return nodes\n\n # make the new node by merging node[j] + node[j+1]\n relation = self._get_relation(features.iloc[j])\n relation, nuclearity = relation.split('_')\n temp = DiscourseUnit(\n id=max_id + 1,\n left=nodes[j],\n right=nodes[j + 1],\n relation=relation,\n nuclearity=nuclearity,\n proba=min(1., scores[j]),\n text=annot_text[nodes[j].start:nodes[j + 1].end].strip()\n )\n\n max_id += 1\n\n # modify the node list\n nodes = nodes[:j] + [temp] + nodes[j + 2:]\n\n # modify the scores list\n if j == 0:\n _features = self.tree_predictor.extract_features(nodes[j], nodes[j + 1],\n annot_text, annot_tokens,\n annot_sentences,\n annot_lemma, annot_morph, annot_postag,\n annot_syntax_dep_tree)\n\n _scores = self._get_proba(_features)\n scores = _scores + scores[j + 2:]\n features = pd.concat([_features, features.iloc[j + 2:]])\n\n elif j + 1 < len(nodes):\n _features = self.tree_predictor.initialize_features([nodes[j - 1], nodes[j], nodes[j + 1]],\n annot_text, annot_tokens,\n annot_sentences,\n annot_lemma, annot_morph, annot_postag,\n annot_syntax_dep_tree)\n\n _scores = self._get_proba(_features)\n features = pd.concat([features.iloc[:j - 1], _features, features.iloc[j + 2:]])\n scores = scores[:j - 1] + _scores + scores[j + 2:]\n\n else:\n _features = self.tree_predictor.extract_features(nodes[j - 1], nodes[j],\n annot_text, annot_tokens,\n annot_sentences,\n annot_lemma, annot_morph, annot_postag,\n annot_syntax_dep_tree)\n _scores = self._get_proba(_features)\n\n scores = scores[:j - 1] + _scores\n features = pd.concat([features.iloc[:j - 1], _features])\n\n if len(scores) == 1 and scores[0] > self.confidence_threshold:\n relation = self._get_relation(features.iloc[0])\n relation, nuclearity = relation.split('_')\n root = DiscourseUnit(\n id=max_id + 1,\n left=nodes[0],\n right=nodes[1],\n relation=relation,\n nuclearity=nuclearity,\n proba=min(1., scores[0]),\n text=annot_text[nodes[0].start:nodes[1].end].strip()\n )\n nodes = [root]\n\n return nodes\n\n def _get_max_id(self, dus):\n max_id = dus[-1].id\n for du in dus[:-1]:\n if du.id > max_id:\n max_id = du.id\n\n return max_id\n\n def _get_relation(self, pair_feature):\n relation = 'joint_NN'\n\n try:\n relation = self.tree_predictor.predict_label(pair_feature)\n except RuntimeError as e:\n # Some vector sizes do not fit in the model\n print(e)\n\n return relation\n\n def _get_proba(self, pair_feature):\n proba = 0.0\n\n try:\n proba = self.tree_predictor.predict_pair_proba(pair_feature, _same_sentence_bonus=self._same_sentence_bonus)\n except RuntimeError as e:\n # Some vectors sizes do not fit in the model\n print(e)\n\n return proba\n" ]
[ [ "numpy.array", "pandas.concat" ] ]
manashpratim/Classification-of-Song-Attributes-using-Lyrics
[ "801b7d45bd98f3edbb4219d7e947a1bd20ec206c" ]
[ "svm/svm.py" ]
[ "import numpy as np\nfrom sklearn.svm import SVC\nimport pickle,sys\nimport warnings\nfrom sklearn.ensemble import GradientBoostingClassifier\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n\ndid = sys.argv[1]\ndid = str(did)\n\ngenre_list = ['R&B','Country','Rap','Folk','Blues','Reggae','Religious','Metal']\nsample_song_dataset = pickle.load(open('song_vectors_for_genre_dataset_'+did+'.pickle','rb'))\ntrain=pickle.load(open(\"../../dataset/fell_80_20_train_\"+did+\".pickle\",\"rb\"))\ntest= pickle.load(open(\"../../dataset/fell_80_20_test_\"+did+\".pickle\",\"rb\"))\nid_detail= pickle.load(open(\"id_detail_\"+did+\".pickle\",\"rb\"))\ncount =0\nab=np.random.rand(len(sample_song_dataset['train']),300)\nlabel = np.chararray(len(sample_song_dataset['train']),itemsize=9)\nlabel[:]=''\n#print(label)\n#print (ab)\nfor songId in sample_song_dataset['train']:\n\tj=0\n\tfor feature in sample_song_dataset['train'][songId]['vector']:\n\t\tab[count][j]=feature\t\t\n\t\tj += 1\n\tlabel[count]=train[id_detail[count]]['genre']\n\tcount +=1\n#print (label)\nclf = SVC(random_state=10)\nclf.fit(ab,label)\n'''\t\nfor songId in sample_song_dataset['test']:\n\tj=0\n\tfor feature in sample_song_dataset['test'][songId]['vector']:\n\t\tab[count][j]=feature\n\t\tj +=1\n\tcount +=1\n\t\n#print(ab)\n'''\n\n\nprint(\"Total Count:\"+str(count)+\" Original Count:\"+str(len(sample_song_dataset['train'])))\n#logger.info(\"Done\")\n'''\ncount = 0\ncluster = {}\n\nid1=0\ntest_data ={}\nj=0\ngenre_list = ['Metal','Country','Religious','Rap','R&B','Reggae','Folk','Blues']\nfor i in range(len(train)) :\n\t#line = line.split(\"/\")\n\t#print(line[1])\n\tcount += 1\n\tif labels[i] not in cluster:\n\t\tcluster[labels[i]]= {}\n\t\tcluster[labels[i]]['count']=0\n\t\tcluster[labels[i]]['max']=0\n\t\tfor genre in genre_list:\n\t\t\tcluster[labels[i]][genre]=0\n\tcluster[labels[i]]['count'] += 1\n\tcluster[labels[i]][train[id_detail[id1]]['genre']] += 1\n\tif cluster[labels[i]][train[id_detail[id1]]['genre']] > cluster[labels[i]]['max']:\n\t\tcluster[labels[i]]['max'] = cluster[labels[i]][train[id_detail[id1]]['genre']]\n\t\tcluster[labels[i]]['genre'] = train[id_detail[id1]]['genre']\n\tid1 += 1\nprint(cluster)\nfor i in cluster:\n\tprint(cluster[i]['genre'])\nprint(count)\n\n'''\n''' test part '''\n\nconf_mat = {}\nconf_mat['R&B'] = {}\nconf_mat['Country'] = {}\nconf_mat['Rap'] = {}\nconf_mat['Folk'] = {}\nconf_mat['Blues'] = {}\nconf_mat['Reggae'] = {}\nconf_mat['Religious'] = {}\nconf_mat['Metal'] = {}\n\nfor genre in conf_mat:\n\tconf_mat[genre]['R&B'] = 0\n\tconf_mat[genre]['Country'] = 0\n\tconf_mat[genre]['Rap'] = 0\n\tconf_mat[genre]['Folk'] = 0\n\tconf_mat[genre]['Blues'] = 0\n\tconf_mat[genre]['Reggae'] = 0\n\tconf_mat[genre]['Religious'] = 0\n\tconf_mat[genre]['Metal'] = 0\n\ntotal = 0\ncorrect = 0\nn = len(test)\ni = 0\n\ntemp= np.random.rand(1,300)\nfor songId in sample_song_dataset['test']:\n\tj=0\n\tfor feature in sample_song_dataset['test'][songId]['vector']:\n\t\ttemp[0][j]=feature\n\t\tj +=1\n\t#print(temp[0],\" \",kmeans.predict(temp[0]))\n\tgenre = test[id_detail[count]]['genre']\n\tpredicted = (clf.predict(temp[0])[0]).decode('utf-8')\n\tif genre==predicted:\n\t\tcorrect += 1\n\tconf_mat[genre][predicted] +=1\n\tcount +=1\n\ttotal += 1\n\tif i%100 == 0:\n\t\tprint(i,\"/\",n,\":\",genre,\"=>\",predicted,\"\\t\",correct,\"/\",total)\n\ti += 1\n\nprint(\"\")\t\nprint(\"Accuracy :\",(correct/total))\n\npickle.dump( conf_mat, open('conf_mat_'+did+'.pickle' , 'wb') )\n\nfor genre in ['R&B','Country','Rap','Folk','Blues','Reggae','Religious','Metal']:\n for opgenre in ['R&B','Country','Rap','Folk','Blues','Reggae','Religious','Metal']:\n print(conf_mat[genre][opgenre],end=',')\n print(\"\")\n\n" ]
[ [ "numpy.random.rand", "sklearn.svm.SVC" ] ]
edwardhuahan/little-planet
[ "260f03cac3c1a27530ddfb41e932c91478961e16" ]
[ "main.py" ]
[ "import cv2 as cv\nimport numpy as np\nfrom scipy import interpolate\nimport math\n\ndef calcOffset (i,j):\n return i - j/2\n\ndef calcR (x,y):\n return np.sqrt(calcOffset(x,width)**2 + calcOffset(y,height)**2)\n\ndef calcRho (x,y):\n return np.divide(calcR(x,y),length)\n\ndef calcTheta (x,y):\n return 2 * np.arctan(calcRho(x,y))\n\ndef calcLongitude (x,y):\n return np.arctan2(calcOffset(y,height),calcOffset(x,width))\n\nimg = cv.imread(\"sample.jpg\")\n\nif img is None: \n print(\"Could not load image\")\n\nimg = cv.normalize(img.astype('float'), None, 0.0, 1.0, cv.NORM_MINMAX)\n\n#img = cv.resize(img, (int(round((img.shape[1]*0.05))),int(round((img.shape[0]*0.05)))))\nwidth = img.shape[1]\nheight = img.shape[0]\nradians = 2*math.pi/width;\nlength = width/10\n\nx = np.arange(1,width+1)\ny = np.arange(1,height+1)\n\n[a, b] = np.meshgrid(x,y)\nlatitude = calcTheta(a,b)\nlongitude = calcLongitude(a,b) - math.pi/4\nlatitude = np.mod(latitude + math.pi, math.pi) - math.pi/2\nlongitude = np.mod(longitude + math.pi*2, math.pi*2) - math.pi\n\nXq = width/2.0-(-longitude/radians)\nYq = height/2.0-(latitude/radians)\n\noutput = np.zeros([height,width,3]);\n\n# apply transformation to red green and blue channels separately\nfor i in range(0,3):\n f = interpolate.RectBivariateSpline(x,y,img[:,:,i].T)\n output[:,:,i]= f(Xq, Yq,grid=False)\n\ncv.imshow(\"Display\",output)\n\nk = cv.waitKey(0)\n\ncv.waitKey(0) # waits until key is pressed\ncv.destroyAllWindows() # destroys window\n" ]
[ [ "numpy.zeros", "scipy.interpolate.RectBivariateSpline", "numpy.arange", "numpy.meshgrid", "numpy.mod" ] ]
LalitaSharkey/multigazeinterations
[ "059e3397850d98f2505848682c010ed267a2df38" ]
[ "image_test.py" ]
[ "import cv2\nfrom heuristic_faces import HeuristicFaceClassifier\nimport pickle\nimport pandas as pd\nimport sys\n\nclf = HeuristicFaceClassifier()\n\nhorizontal_model = pickle.load(open(\"horizontal_gaze.pkcls\", \"rb\"))\nvertical_model = pickle.load(open(\"vertical_gaze.pkcls\", \"rb\"))\n\nif len(sys.argv) >= 2:\n image_file = sys.argv[1]\nelse:\n print(\"Using Test Image\")\n image_file = \"test.jpg\"\n\nframe = cv2.imread(image_file)\nfaces = clf.detect_faces(frame)\n\nfor face in faces:\n (x, y, w, h) = face[\"face\"]\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)\n for eye in face[\"eyes\"]:\n (ex, ey, ew, eh) = eye[\"eye\"]\n ex, ey = x + ex, y + ey\n cv2.rectangle(frame, (ex, ey), (ex + ew, ey + eh), (255, 255, 0), 2)\n\n face_size = face['face'][2]\n dataframe = pd.DataFrame({\n 'r_eye_px': face['eyes'][1]['pupil'][0] / face_size,\n 'l_eye_px': face['eyes'][0]['pupil'][0] / face_size,\n 'r_eye_s': face['eyes'][1]['eye'][2] / face_size,\n 'l_eye_s': face['eyes'][0]['eye'][2] / face_size,\n 'r_eye_x': face['eyes'][1]['eye'][0] / face_size,\n 'l_eye_x': face['eyes'][0]['eye'][0] / face_size,\n 'r_eye_y': face['eyes'][1]['eye'][1]/face_size,\n 'l_eye_y': face['eyes'][0]['eye'][1]/face_size,\n 'r_eye_py': face['eyes'][1]['pupil'][1]/face_size,\n 'l_eye_py': face['eyes'][0]['pupil'][1]/face_size}, index=[0])\n\n horizontal_prediction = round(horizontal_model.predict(dataframe)[0], 1)\n vertical_prediction = round(vertical_model.predict(dataframe)[0], 1)\n label = \"H: \" + str(horizontal_prediction) \\\n + \" V: \" + str(vertical_prediction)\n cv2.putText(frame, label, (x, y), thickness=2, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(255, 255, 255))\n\ncv2.imshow('frame',frame)\ncv2.waitKey()\ncv2.destroyAllWindows()\n" ]
[ [ "pandas.DataFrame" ] ]
yuanzheng625/pylmnn
[ "ca7f340b410aab20699848182107370672eb608c" ]
[ "pylmnn/embeddings/confusion_matrix_pretty_print.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nplot a pretty confusion matrix with seaborn\nCreated on Mon Jun 25 14:17:37 2018\n@author: Wagner Cipriano - wagnerbhbr - gmail - CEFETMG / MMC\nREFerences:\n https://www.mathworks.com/help/nnet/ref/plotconfusion.html\n https://stackoverflow.com/questions/28200786/how-to-plot-scikit-learn-classification-report\n https://stackoverflow.com/questions/5821125/how-to-plot-confusion-matrix-with-string-axis-rather-than-integer-in-python\n https://www.programcreek.com/python/example/96197/seaborn.heatmap\n https://stackoverflow.com/questions/19233771/sklearn-plot-confusion-matrix-with-labels/31720054\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\n\"\"\"\n\n#imports\nfrom pandas import DataFrame\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nfrom matplotlib.collections import QuadMesh\nimport seaborn as sn\n\n\ndef get_new_fig(fn, figsize=[9,9]):\n \"\"\" Init graphics \"\"\"\n fig1 = plt.figure(fn, figsize)\n ax1 = fig1.gca() #Get Current Axis\n ax1.cla() # clear existing plot\n return fig1, ax1\n#\n\ndef configcell_text_and_colors(array_df, lin, col, oText, facecolors, posi, fz, fmt, show_null_values=0):\n \"\"\"\n config cell text and colors\n and return text elements to add and to dell\n @TODO: use fmt\n \"\"\"\n text_add = []; text_del = [];\n cell_val = array_df[lin][col]\n tot_all = array_df[-1][-1]\n per = (float(cell_val) / tot_all) * 100\n curr_column = array_df[:,col]\n ccl = len(curr_column)\n\n #last line and/or last column\n if(col == (ccl - 1)) or (lin == (ccl - 1)):\n #tots and percents\n if(cell_val != 0):\n if(col == ccl - 1) and (lin == ccl - 1):\n tot_rig = 0\n for i in range(array_df.shape[0] - 1):\n tot_rig += array_df[i][i]\n per_ok = (float(tot_rig) / cell_val) * 100\n elif(col == ccl - 1):\n tot_rig = array_df[lin][lin]\n per_ok = (float(tot_rig) / cell_val) * 100\n elif(lin == ccl - 1):\n tot_rig = array_df[col][col]\n per_ok = (float(tot_rig) / cell_val) * 100\n per_err = 100 - per_ok\n else:\n per_ok = per_err = 0\n\n per_ok_s = ['%.2f%%'%(per_ok), '100%'] [per_ok == 100]\n\n #text to DEL\n text_del.append(oText)\n\n #text to ADD\n font_prop = fm.FontProperties(weight='bold', size=fz)\n text_kwargs = dict(color='w', ha=\"center\", va=\"center\", gid='sum', fontproperties=font_prop)\n lis_txt = ['%d'%(cell_val), per_ok_s, '%.2f%%'%(per_err)]\n lis_kwa = [text_kwargs]\n dic = text_kwargs.copy(); dic['color'] = 'g'; lis_kwa.append(dic);\n dic = text_kwargs.copy(); dic['color'] = 'r'; lis_kwa.append(dic);\n lis_pos = [(oText._x, oText._y-0.3), (oText._x, oText._y), (oText._x, oText._y+0.3)]\n for i in range(len(lis_txt)):\n newText = dict(x=lis_pos[i][0], y=lis_pos[i][1], text=lis_txt[i], kw=lis_kwa[i])\n #print 'lin: %s, col: %s, newText: %s' %(lin, col, newText)\n text_add.append(newText)\n #print '\\n'\n\n #set background color for sum cells (last line and last column)\n carr = [0.27, 0.30, 0.27, 1.0]\n if(col == ccl - 1) and (lin == ccl - 1):\n carr = [0.17, 0.20, 0.17, 1.0]\n facecolors[posi] = carr\n\n else:\n if(per > 0):\n txt = '%s\\n%.2f%%' %(cell_val, per)\n else:\n if(show_null_values == 0):\n txt = ''\n elif(show_null_values == 1):\n txt = '0'\n else:\n txt = '0\\n0.0%'\n oText.set_text(txt)\n\n #main diagonal\n if(col == lin):\n #set color of the textin the diagonal to white\n oText.set_color('w')\n # set background color in the diagonal to blue\n facecolors[posi] = [0.35, 0.8, 0.55, 1.0]\n else:\n oText.set_color('r')\n\n return text_add, text_del\n#\n\ndef insert_totals(df_cm):\n \"\"\" insert total column and line (the last ones) \"\"\"\n sum_col = []\n for c in df_cm.columns:\n sum_col.append( df_cm[c].sum() )\n sum_lin = []\n for item_line in df_cm.iterrows():\n sum_lin.append( item_line[1].sum() )\n df_cm['sum_lin'] = sum_lin\n sum_col.append(np.sum(sum_lin))\n df_cm.loc['sum_col'] = sum_col\n #print ('\\ndf_cm:\\n', df_cm, '\\n\\b\\n')\n#\n\ndef pretty_plot_confusion_matrix(df_cm, annot=True, cmap=\"Oranges\", fmt='.2f', fz=11,\n lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='y'):\n \"\"\"\n print conf matrix with default layout (like matlab)\n params:\n df_cm dataframe (pandas) without totals\n annot print text in each cell\n cmap Oranges,Oranges_r,YlGnBu,Blues,RdBu, ... see:\n fz fontsize\n lw linewidth\n pred_val_axis where to show the prediction values (x or y axis)\n 'col' or 'x': show predicted values in columns (x axis) instead lines\n 'lin' or 'y': show predicted values in lines (y axis)\n \"\"\"\n if(pred_val_axis in ('col', 'x')):\n xlbl = 'Predicted'\n ylbl = 'Actual'\n else:\n xlbl = 'Actual'\n ylbl = 'Predicted'\n df_cm = df_cm.T\n\n # create \"Total\" column\n insert_totals(df_cm)\n\n #this is for print allways in the same window\n fig, ax1 = get_new_fig('Conf matrix default', figsize)\n\n #thanks for seaborn\n ax = sn.heatmap(df_cm, annot=annot, annot_kws={\"size\": fz}, linewidths=lw, ax=ax1,\n cbar=cbar, cmap=cmap, linecolor='w', fmt=fmt)\n\n #set ticklabels rotation\n ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, fontsize = 10)\n ax.set_yticklabels(ax.get_yticklabels(), rotation = 25, fontsize = 10)\n\n # Turn off all the ticks\n for t in ax.xaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n for t in ax.yaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n\n #face colors list\n quadmesh = ax.findobj(QuadMesh)[0]\n facecolors = quadmesh.get_facecolors()\n\n #iter in text elements\n array_df = np.array( df_cm.to_records(index=False).tolist() )\n text_add = []; text_del = [];\n posi = -1 #from left to right, bottom to top.\n for t in ax.collections[0].axes.texts: #ax.texts:\n pos = np.array( t.get_position()) - [0.5,0.5]\n lin = int(pos[1]); col = int(pos[0]);\n posi += 1\n #print ('>>> pos: %s, posi: %s, val: %s, txt: %s' %(pos, posi, array_df[lin][col], t.get_text()))\n\n #set text\n txt_res = configcell_text_and_colors(array_df, lin, col, t, facecolors, posi, fz, fmt, show_null_values)\n\n text_add.extend(txt_res[0])\n text_del.extend(txt_res[1])\n\n #remove the old ones\n for item in text_del:\n item.remove()\n #append the new ones\n for item in text_add:\n ax.text(item['x'], item['y'], item['text'], **item['kw'])\n\n #titles and legends\n ax.set_title('Confusion matrix')\n ax.set_xlabel(xlbl)\n ax.set_ylabel(ylbl)\n plt.tight_layout() #set layout slim\n plt.show()\n#\n\ndef plot_confusion_matrix_from_data(y_test, predictions, columns=None, annot=True, cmap=\"Oranges\",\n fmt='.2f', fz=11, lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='lin'):\n \"\"\"\n plot confusion matrix function with y_test (actual values) and predictions (predic),\n whitout a confusion matrix yet\n \"\"\"\n from sklearn.metrics import confusion_matrix\n from pandas import DataFrame\n\n #data\n if(not columns):\n #labels axis integer:\n ##columns = range(1, len(np.unique(y_test))+1)\n #labels axis string:\n from string import ascii_uppercase\n columns = ['class %s' %(i) for i in list(ascii_uppercase)[0:len(np.unique(y_test))]]\n\n confm = confusion_matrix(y_test, predictions)\n cmap = 'Oranges';\n #fz = 11;\n #figsize=[9,9];\n show_null_values = 2\n df_cm = DataFrame(confm, index=columns, columns=columns)\n pretty_plot_confusion_matrix(df_cm, fz=fz, cmap=cmap, figsize=figsize, show_null_values=show_null_values, pred_val_axis=pred_val_axis)\n#\n\n\n\n#\n#TEST functions\n#\ndef _test_cm():\n #test function with confusion matrix done\n array = np.array( [[13, 0, 1, 0, 2, 0],\n [ 0, 50, 2, 0, 10, 0],\n [ 0, 13, 16, 0, 0, 3],\n [ 0, 0, 0, 13, 1, 0],\n [ 0, 40, 0, 1, 15, 0],\n [ 0, 0, 0, 0, 0, 20]])\n #get pandas dataframe\n df_cm = DataFrame(array, index=range(1,7), columns=range(1,7))\n #colormap: see this and choose your more dear\n cmap = 'PuRd'\n pretty_plot_confusion_matrix(df_cm, cmap=cmap)\n#\n\ndef _test_data_class():\n \"\"\" test function with y_test (actual values) and predictions (predic) \"\"\"\n #data\n y_test = np.array([1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5])\n predic = np.array([1,2,4,3,5, 1,2,4,3,5, 1,2,3,4,4, 1,4,3,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,3,5, 1,2,3,3,5, 1,2,3,4,4, 1,2,3,4,1, 1,2,3,4,1, 1,2,3,4,1, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5])\n \"\"\"\n Examples to validate output (confusion matrix plot)\n actual: 5 and prediction 1 >> 3\n actual: 2 and prediction 4 >> 1\n actual: 3 and prediction 4 >> 10\n \"\"\"\n columns = []\n annot = True;\n cmap = 'Oranges';\n fmt = '.2f'\n lw = 0.5\n cbar = False\n show_null_values = 2\n pred_val_axis = 'y'\n #size::\n fz = 12;\n figsize = [9,9];\n if(len(y_test) > 10):\n fz=9; figsize=[14,14];\n plot_confusion_matrix_from_data(y_test, predic, columns,\n annot, cmap, fmt, fz, lw, cbar, figsize, show_null_values, pred_val_axis)\n#\n\n\n#\n#MAIN function\n#\nif(__name__ == '__main__'):\n print('__main__')\n print('_test_cm: test function with confusion matrix done\\nand pause')\n _test_cm()\n plt.pause(5)\n print('_test_data_class: test function with y_test (actual values) and predictions (predic)')\n _test_data_class()\n\n" ]
[ [ "sklearn.metrics.confusion_matrix", "numpy.array", "matplotlib.font_manager.FontProperties", "pandas.DataFrame", "numpy.sum", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.pause", "matplotlib.pyplot.show", "numpy.unique" ] ]
aminkhod/finance
[ "d9bbd7eb5cde9a754d32f014d1c72eb4fe7d44b9" ]
[ "session3.py" ]
[ "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\n# from sklearn import svm\n# from sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LinearRegression\nfrom matplotlib import pyplot as plt\n\n\n\n################################################\n############ Cross Validation ###########\n################################################\n\n########### train_test_split\n\nfrom sklearn.model_selection import train_test_split\nboston = datasets.load_boston()\nprint(boston.data.shape, boston.target.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(\n boston.data, boston.target, test_size=0.1, random_state=0)\n\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)\n\nregr = LinearRegression()\nregr.fit(X_train,y_train)\nprint(regr.score(X_train,y_train))\npredictions= regr.predict(X_test)\n\n\n##plot\nplt.scatter(y_test, predictions)\nplt.xlabel(\"True Values\")\nplt.ylabel(\"Predictions\")\n\n# model = svm.SVC(kernel='linear', C=1)\n# scores = cross_val_score(model , boston.data, boston.target, cv=10)\n# print(scores)\n\n\n############ KFold\nfrom sklearn.model_selection import KFold\n# X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])\n# y = np.array([1, 2, 3, 4])\nkf = KFold(n_splits=5)\nkfsplit = kf.get_n_splits(boston.data)\n\nprint(kf)\n\nkfoldregr = LinearRegression()\n# kfoldregr.fit(X_train,y_train)\n# print(kfoldregr.score(X_train,y_train))\n# print(kfoldregr.predict(X_test))\nKFoldScore=[]\n\nfor train_index, test_index in kf.split(boston.data):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = boston.data[train_index], boston.data[test_index]\n y_train, y_test = boston.target[train_index], boston.target[test_index]\n kfoldregr.fit(X_train, y_train)\n KFoldScore.append(kfoldregr.score(X_train, y_train))\n\nprint(KFoldScore)\n\n######## Leave One Out Cross Validation\nfrom sklearn.model_selection import LeaveOneOut\nX = np.array([[1, 2], [3, 4]])\ny = np.array([1, 2])\nloo = LeaveOneOut()\nloo.get_n_splits(X)\n\n\nfor train_index, test_index in loo.split(X):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n print(X_train, X_test, y_train, y_test)\n\n\n########## Little sample of cross validation\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn import metrics\nlm = LinearRegression()\n\nscores = cross_val_score(lm, boston.data, boston.target, cv=6)\nprint(\"Cross-validated scores:\", scores)\n\npredictions = cross_val_predict(lm,boston.data, boston.target, cv=6)\nplt.scatter(boston.target, predictions)\n\nr2_score = metrics.r2_score(boston.target, predictions)\nprint(\"Cross-Predicted R^2\",r2_score)\n\n\n##########################################\n############## preprocessing\n##########################################\nfrom sklearn import preprocessing\nX_train, X_test, y_train, y_test = train_test_split(\n boston.data, boston.target, test_size=0.4, random_state=0)\n\nscaler = preprocessing.StandardScaler().fit(X_train)\nX_train_transformed = scaler.transform(X_train)\nclf = regr.fit(X_train_transformed, y_train)\nX_test_transformed = scaler.transform(X_test)\nprint(clf.score(X_train_transformed,y_train))\nprint(regr.predict(X_test_transformed))\n\n5+2" ]
[ [ "numpy.array", "sklearn.model_selection.LeaveOneOut", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.xlabel", "sklearn.metrics.r2_score", "sklearn.model_selection.KFold", "sklearn.model_selection.cross_val_score", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "sklearn.model_selection.cross_val_predict", "sklearn.datasets.load_boston" ] ]
kencan7749/vq-vae-2-pytorch
[ "af9fbf5fab6446123274dafaaae03406ed7d5955" ]
[ "vqvae_3d_v2.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport distributed as dist_fn\n\n\n# Copyright 2018 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\n# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch\n\n\nclass Quantize(nn.Module):\n def __init__(self, dim, n_embed, decay=0.99, eps=1e-5):\n super().__init__()\n\n self.dim = dim\n self.n_embed = n_embed\n self.decay = decay\n self.eps = eps\n\n embed = torch.randn(dim, n_embed)\n self.register_buffer(\"embed\", embed)\n self.register_buffer(\"cluster_size\", torch.zeros(n_embed))\n self.register_buffer(\"embed_avg\", embed.clone())\n\n def forward(self, input):\n flatten = input.reshape(-1, self.dim)\n dist = (\n flatten.pow(2).sum(1, keepdim=True)\n - 2 * flatten @ self.embed\n + self.embed.pow(2).sum(0, keepdim=True)\n )\n _, embed_ind = (-dist).max(1)\n embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n\n if self.training:\n embed_onehot_sum = embed_onehot.sum(0)\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\n\n dist_fn.all_reduce(embed_onehot_sum)\n dist_fn.all_reduce(embed_sum)\n\n self.cluster_size.data.mul_(self.decay).add_(\n embed_onehot_sum, alpha=1 - self.decay\n )\n self.embed_avg.data.mul_(self.decay).add_(embed_sum, alpha=1 - self.decay)\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.embed.data.copy_(embed_normalized)\n\n diff = (quantize.detach() - input).pow(2).mean()\n quantize = input + (quantize - input).detach()\n\n return quantize, diff, embed_ind\n\n def embed_code(self, embed_id):\n return F.embedding(embed_id, self.embed.transpose(0, 1))\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv3d(in_channel, channel, 3, padding=(1,1,1)),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\nclass ResBlock_2D(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channel, channel, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\nclass Squeeze(nn.Module):\n def __init__(self, *args):\n super(Squeeze, self).__init__()\n def forward(self, x):\n x = torch.squeeze(x, 2)\n return x\n\nclass Unsqueeze(nn.Module):\n def __init__(self, *args):\n super(Unsqueeze, self).__init__()\n def forward(self, x):\n x = torch.unsqueeze(x, 2)\n return x\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride):\n super().__init__()\n\n if stride == 4:\n blocks = [\n nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel // 2, channel, 4, stride=(2,2,2), padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, (3,3,3),stride=(2,1,1), padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, (3,3,3),stride=(2,1,1), padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, (3,3,3), padding=1),\n \n ]\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n elif stride == 2:\n #blocks = [\n # nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n # nn.ReLU(inplace=True),\n # nn.Conv3d(channel // 2, channel, 3, padding=1),\n #]\n blocks = [\n Squeeze(),\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 3, padding=1),\n ]\n for i in range(n_res_block):\n blocks.append(ResBlock_2D(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n blocks.append(Unsqueeze())\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass Decoder(nn.Module):\n def __init__(\n self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride\n ):\n super().__init__()\n\n blocks = [nn.Conv3d(in_channel, channel, 3, padding=(1,1,1))]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n if stride == 4:\n blocks.extend(\n [\n nn.ConvTranspose3d(channel, channel // 2, 4, stride=2, padding=(1,1,1)),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(channel //2, channel // 2, (4,1,1), stride=(2,1,1), padding=(1,0,0)),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(channel //2, channel // 2, (4,1,1), stride=(2,1,1), padding=(1,0,0)),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(channel //2, channel // 2, (4,1,1), stride=(2,1,1), padding=(1,0,0)),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(\n channel // 2, out_channel, (3,4,4), stride=(1,2,2), padding=(1,1,1)\n ),\n ]\n )\n\n elif stride == 2:\n blocks.append(\n nn.ConvTranspose3d(channel, out_channel, (3,4,4), stride=(1,2,2), padding=(1,1,1))\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass VQVAE(nn.Module):\n def __init__(\n self,\n in_channel=3,\n channel=128,\n n_res_block=2,\n n_res_channel=32,\n embed_dim=64,\n n_embed=512,\n decay=0.99,\n ):\n super().__init__()\n\n self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=4)\n self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2)\n self.quantize_conv_t = nn.Conv3d(channel, embed_dim, 1)\n self.quantize_t = Quantize(embed_dim, n_embed)\n self.dec_t = Decoder(\n embed_dim, embed_dim, channel, n_res_block, n_res_channel, stride=2\n )\n self.quantize_conv_b = nn.Conv3d(embed_dim + channel, embed_dim, 1)\n self.quantize_b = Quantize(embed_dim, n_embed)\n self.upsample_t = nn.ConvTranspose3d(\n embed_dim, embed_dim, (3,4,4), stride=(1,2,2), padding=1\n )\n self.dec = Decoder(\n embed_dim + embed_dim,\n in_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride=4,\n )\n\n def forward(self, input):\n quant_t, quant_b, diff, _, _ = self.encode(input)\n dec = self.decode(quant_t, quant_b)\n\n return dec, diff\n\n def encode(self, input):\n enc_b = self.enc_b(input)\n print(enc_b.shape)\n enc_t = self.enc_t(enc_b)\n\n quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 4, 1)\n quant_t, diff_t, id_t = self.quantize_t(quant_t)\n quant_t = quant_t.permute(0, 4,1, 2, 3)\n diff_t = diff_t.unsqueeze(0)\n\n dec_t = self.dec_t(quant_t)\n enc_b = torch.cat([dec_t, enc_b], 1)\n\n quant_b = self.quantize_conv_b(enc_b).permute(0, 2, 3, 4,1)\n quant_b, diff_b, id_b = self.quantize_b(quant_b)\n quant_b = quant_b.permute(0, 4, 1, 2, 3)\n diff_b = diff_b.unsqueeze(0)\n\n return quant_t, quant_b, diff_t + diff_b, id_t, id_b\n\n def decode(self, quant_t, quant_b):\n upsample_t = self.upsample_t(quant_t)\n quant = torch.cat([upsample_t, quant_b], 1)\n dec = self.dec(quant)\n\n return dec\n\n def decode_code(self, code_t, code_b):\n quant_t = self.quantize_t.embed_code(code_t)\n quant_t = quant_t.permute(0, 4, 1, 2, 3)\n quant_b = self.quantize_b.embed_code(code_b)\n quant_b = quant_b.permute(0, 4, 1, 2, 3)\n\n dec = self.decode(quant_t, quant_b)\n\n return dec\n\n\nif __name__ == '__main__':\n print('Check')\n\n model = VQVAE()\n print(model)\n\n #rand\n input = torch.randn(1, 3, 16, 256, 256)\n\n quant_t, quant_b, diff, _, _ = model.encode(input)\n print(quant_t.shape)\n print(quant_b.shape)\n\n out = model(input)\n print('output')\n print(out[0].shape)\n\n \n\n #rand\n input_quant = torch.randn(1, 128, 2, 64, 64)\n\n out = model.dec(input_quant)\n #print(out)\n print(out[0].detach().numpy().shape)" ]
[ [ "torch.zeros", "torch.cat", "torch.nn.functional.one_hot", "torch.nn.ConvTranspose3d", "torch.nn.Sequential", "torch.unsqueeze", "torch.nn.ReLU", "torch.squeeze", "torch.nn.Conv2d", "torch.nn.Conv3d", "torch.randn" ] ]
ChenyangTang1/bark
[ "c4215be6464c249639b8c7b390684bd13100b41e" ]
[ "modules/models/tests/py_behavior_model_test.py" ]
[ "# Copyright (c) 2019 fortiss GmbH\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\n\nimport unittest\nimport os\nimport numpy as np\nfrom modules.runtime.scenario.scenario_generation.deterministic \\\n import DeterministicScenarioGeneration\nfrom modules.runtime.scenario.scenario_generation.scenario_generation \\\n import ScenarioGeneration\nfrom bark.world.goal_definition import GoalDefinition, GoalDefinitionPolygon\nfrom bark.geometry import *\nfrom bark.world import World\nfrom modules.runtime.commons.parameters import ParameterServer\nfrom modules.runtime.runtime import Runtime\nfrom modules.runtime.viewer.matplotlib_viewer import MPViewer\nfrom bark.models.behavior import BehaviorModel, DynamicBehaviorModel\nfrom bark.models.dynamic import SingleTrackModel\n\n\nclass PythonBehaviorModelWrapper(BehaviorModel):\n \"\"\"Dummy Python behavior model\n \"\"\"\n def __init__(self,\n dynamic_model = None,\n params = None):\n # DynamicBehaviorModel.__init__(self, dynamic_model, params)\n BehaviorModel.__init__(self, params)\n self._dynamic_model = dynamic_model\n self._params = params\n\n def Plan(self, delta_time, world):\n super(PythonBehaviorModelWrapper, self).SetLastAction(\n np.array([2., 1.], dtype=np.float32))\n # print(super(PythonBehaviorModelWrapper, self).GetLastAction())\n trajectory = np.array([[0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.]], dtype=np.float32)\n super(PythonBehaviorModelWrapper, self).SetLastTrajectory(trajectory)\n return trajectory\n\n def Clone(self):\n return self\n\n\nclass PythonBehaviorModelWrapperInheritance(BehaviorModel):\n \"\"\"Dummy Python behavior model\n \"\"\"\n def __init__(self,\n dynamic_model = None,\n params = None):\n BehaviorModel.__init__(\n self, params)\n self._dynamic_behavior_model = DynamicBehaviorModel(dynamic_model, params)\n \n def Plan(self, delta_time, world):\n self._dynamic_behavior_model.SetLastAction(\n np.array([2., 1.], dtype=np.float32))\n trajectory = self._dynamic_behavior_model.Plan(delta_time, world)\n super(PythonBehaviorModelWrapperInheritance, self).SetLastTrajectory(trajectory)\n return trajectory\n\n def Clone(self):\n return self\n\n\nclass PyBehaviorModelTests(unittest.TestCase):\n def test_python_model(self):\n param_server = ParameterServer(\n filename=\"modules/runtime/tests/data/deterministic_scenario.json\")\n scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,\n random_seed=0,\n params=param_server)\n viewer = MPViewer(params=param_server,\n follow_agent_id=False,\n use_world_bounds=True)\n scenario, idx = scenario_generation.get_next_scenario()\n world = scenario.get_world_state()\n single_track_model = SingleTrackModel(param_server)\n behavior_model = PythonBehaviorModelWrapper(\n single_track_model, param_server)\n world.GetAgent(0).behavior_model = behavior_model\n world.GetAgent(0).behavior_model.SetLastAction(\n np.array([1., 1.], dtype=np.float32))\n world.Step(0.2)\n\n def test_python_model_inheritance(self):\n param_server = ParameterServer(\n filename=\"modules/runtime/tests/data/deterministic_scenario.json\")\n scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,\n random_seed=0,\n params=param_server)\n viewer = MPViewer(params=param_server,\n follow_agent_id=False,\n use_world_bounds=True)\n scenario, idx = scenario_generation.get_next_scenario()\n world = scenario.get_world_state()\n single_track_model = SingleTrackModel(param_server)\n\n behavior_model = PythonBehaviorModelWrapperInheritance(\n single_track_model, param_server)\n \n world.GetAgent(0).behavior_model = behavior_model\n world.GetAgent(0).behavior_model.SetLastAction(\n np.array([1., 1.], dtype=np.float32))\n world.Step(0.2)\n\n\nif __name__ == '__main__':\n unittest.main()" ]
[ [ "numpy.array" ] ]
Yottaxx/T-LSTM
[ "92618d8c3ee2418b194a2e1592512548da955b77" ]
[ "data_utils/load_uds.py" ]
[ "from data_utils import DataStruct\nfrom tqdm import trange\nimport torch\nfrom data_utils.save_uds_utils import data_load\nimport numpy as np\n\n\ndef S_get_g_data_loader_split():\n text_list, edge_index_list, data_confidence, test_mask, dev_mask, train_mask, data_trigger_index = data_load()\n train_list = []\n dev_list = []\n test_list = []\n for i in trange(len(data_confidence)):\n x = text_list[i]\n # print(\"----------------\")\n # print(\"edge\")\n # print(edge_index_list[i][0])\n # print(edge_index_list[i][1])\n # print(x)\n\n edge = np.stack([edge_index_list[i][0], edge_index_list[i][1]], 0)\n #\n # print(len(x))\n edge_index = torch.sparse_coo_tensor(torch.tensor(edge), torch.ones(len(edge[0])),\n (len(x), len(x))).to_dense()\n eep = torch.tensor(data_confidence[i]).unsqueeze(0)\n # print(eep)\n trigger = [\"uds\"]\n trigger_index = torch.tensor(np.array(data_trigger_index[i], dtype=np.int)).unsqueeze(0)\n # print(x[data_trigger_index[i]])\n if test_mask[i] :\n data = DataStruct(tuple(text_list[i]), edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple(eep.numpy().tolist()), tuple([len(test_list)]))\n test_list.append(data)\n if train_mask[i]:\n data = DataStruct(tuple(text_list[i]), edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple(eep.numpy().tolist()), tuple([len(train_list)]))\n train_list.append(data)\n if dev_mask[i] :\n data = DataStruct(tuple(text_list[i]), edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple(eep.numpy().tolist()), tuple([len(dev_list)]))\n dev_list.append(data)\n\n return train_list, dev_list, test_list\n\ndef S_get_g_data_loader_split_xlnet():\n text_list, text_list_emb,edge_index_list, data_confidence, test_mask, dev_mask, train_mask, data_trigger_index = data_load()\n train_list = []\n dev_list = []\n test_list = []\n for i in trange(len(data_confidence)):\n x = text_list[i]\n x_emb = torch.tensor(text_list_emb[i])\n # print(\"----------------\")\n # print(\"edge\")\n # print(edge_index_list[i][0])\n # print(edge_index_list[i][1])\n # print(x)\n\n edge = np.stack([edge_index_list[i][0], edge_index_list[i][1]], 0)\n #\n # print(len(x))\n edge_index = torch.sparse_coo_tensor(torch.tensor(edge), torch.ones(len(edge[0])),\n (len(x), len(x))).to_dense()\n eep = torch.tensor(data_confidence[i]).unsqueeze(0)\n # print(eep)\n trigger = [\"uds\"]\n trigger_index = torch.tensor(np.array(data_trigger_index[i], dtype=np.int)).unsqueeze(0)\n # print(x[data_trigger_index[i]])\n if test_mask[i] :\n data = DataStruct(tuple(text_list[i]), x_emb,edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple([eep.numpy().tolist()]), tuple([len(test_list)]))\n test_list.append(data)\n if train_mask[i]:\n data = DataStruct(tuple(text_list[i]),x_emb ,edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple([eep.numpy().tolist()]), tuple([len(train_list)]))\n train_list.append(data)\n if dev_mask[i] :\n data = DataStruct(tuple(text_list[i]),x_emb ,edge_index.numpy().tolist(),\n tuple(trigger), tuple(trigger_index.numpy().tolist()),\n tuple([eep.numpy().tolist()]), tuple([len(dev_list)]))\n dev_list.append(data)\n\n return train_list, dev_list, test_list" ]
[ [ "numpy.array", "numpy.stack", "torch.tensor" ] ]
lei940324/Quantile
[ "5aab668ebc022b8fa12fc90fa4a79972e975e96f" ]
[ "func.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 30 20:15:06 2020\n\n@author: Administrator\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom queue import Queue\nfrom scipy import interpolate\nimport statsmodels.formula.api as smf\nimport math\nimport logging\nimport threading\nimport re\n\nEXIT = False # 线程退出信号\n\n\nclass Quantile_Granger():\n \"\"\"\n 创建分位数Granger因果检验类\n \"\"\"\n def set_range(self, start, end, num):\n '''\n 生成估计区间\n\n Parameters\n ----------\n start : 区间起点\n end : 区间终点\n num : 个数\n\n Returns\n -------\n qrange : 估计区间列表\n qr_name : 构建区间名\n\n '''\n qrange = np.linspace(start, end, num)\n T = len(qrange)\n qr_name = [\n f'[{qrange[i]:.2f}-{qrange[i+1]:.2f}]' for i in range(T - 1)\n ]\n return qrange, qr_name\n\n def pattern(self, df, sign):\n '''\n 得到待估计数据字典,受pattern模式影响\n\n Parameters\n ----------\n df : 原始数据\n sign : 模式选择,包括1,2,3\n\n Returns\n -------\n DataList : 待估计数据字典\n '''\n DataList = {}\n if sign == '单因素对各市场':\n X = df.iloc[:, 0]\n Xname = df.columns[0]\n for i in range(1, df.shape[1]):\n Y = df.iloc[:, i]\n Yname = df.columns[i]\n DataList[f'{Xname}——>{Yname}'] = pd.DataFrame([X, Y]).T\n elif sign == '相互影响':\n for i in range(df.shape[1]):\n for j in range(df.shape[1]):\n if i != j:\n X = df.iloc[:, i]\n Xname = df.columns.values[i]\n Y = df.iloc[:, j]\n Yname = df.columns.values[j]\n DataList[f'{Xname}——>{Yname}'] = pd.DataFrame([X, Y]).T\n elif sign == '多因素对单市场':\n Y = df.iloc[:, df.shape[1] - 1]\n Yname = df.columns.values[df.shape[1] - 1]\n for i in range(df.shape[1] - 1):\n X = df.iloc[:, i]\n Xname = df.columns.values[i]\n DataList[f'{Xname}——>{Yname}'] = pd.DataFrame([X, Y]).T\n return DataList\n\n def lag_list(self, Y, X, p=1, q=1):\n '''\n 构造待估计滞后序列函数\n\n Parameters\n ----------\n Y : 被估计变量\n X : 估计变量\n p : X滞后阶数,默认为1\n q : Y滞后阶数,默认为1\n\n Returns\n -------\n data : 滞后序列\n\n '''\n data = pd.DataFrame()\n T = len(Y)\n data['y'] = list(Y[max(p, q):T])\n for i in range(1, p + 1):\n name = f'y_{i}'\n data[name] = list(Y[max(p, q) - i:T - i])\n for i in range(1, q + 1):\n name = f'x_{i}'\n data[name] = list(X[max(p, q) - i:T - i])\n return data\n\n def qreg(self, data, Q):\n '''\n 构造待估计模型函数\n\n Parameters\n ----------\n data : 滞后序列\n Q : 分位点\n\n Returns\n -------\n res : 模型估计结果\n\n '''\n for i, value in enumerate(data):\n if i == 0:\n model = f'{value} ~'\n else:\n model += f' + {value}'\n model = model.replace('~ +', '~')\n mod = smf.quantreg(model, data)\n res = mod.fit(q=Q)\n # print(res.summary())\n return res\n\n def calculate(self,\n DataList,\n qrange,\n qr_name,\n max_lag,\n info_type,\n WaldNum,\n sign_num,\n AicNum,\n objects=[logging.info]):\n \"\"\"\n 循环计算得到sup_wald值\n\n Parameters\n ----------\n DataList : 待估计数据字典\n qrange : 估计区间列表\n qr_name : 构建区间名\n max_lag : 最大估计阶数\n info_type : 信息准则类型:AIC或者BIC\n WaldNum : 估计wald值个数,默认1000\n sign_num : 有效数字\n AicNum : 滞后阶数估计数\n objects :输出信息载体,列表形式\n\n Returns\n -------\n results : 各区间sup_wald值\n\n \"\"\"\n global EXIT\n # 日志设定\n if logging.info in objects:\n logging.basicConfig(\n filename=r'.\\运行结果\\运行细节.txt',\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s')\n for object in objects:\n object('程序开始运行')\n\n results = pd.DataFrame()\n for key in DataList:\n relation = key\n X = DataList[key].iloc[:, 0]\n Y = DataList[key].iloc[:, 1]\n\n # 构建滞后阶数队列\n LagQueue = Queue()\n Qspaces = []\n for i in range(AicNum):\n Qspaces.append(i * 0.9 / (AicNum - 1) + 0.05)\n for Q in Qspaces:\n for p in range(1, max_lag + 1):\n for q in range(1, max_lag + 1):\n LagQueue.put([Q, p, q])\n\n # 多线程计算信息准则,选取最优滞后阶数\n self.AicDict = {}\n\n def info_cri():\n while not EXIT:\n try:\n Q, p, q = LagQueue.get(block=False)\n except:\n break\n data = self.lag_list(Y, X, p, q)\n res = self.qreg(data, Q)\n n = len(Y) - max(p, q)\n ssr = []\n for i in res.resid:\n if i >= 0:\n ssr.append(Q * i)\n else:\n ssr.append((Q - 1) * i)\n SSE = sum(ssr) / n\n L = math.log(SSE)\n k = p + q + 1\n if info_type == 'AIC':\n AIC = L + k / n\n NAME = 'AIC'\n else:\n AIC = L + (math.log(n) * k) / (2 * n)\n NAME = 'BIC'\n if Q in self.AicDict:\n self.AicDict[Q].append([p, q, AIC])\n else:\n self.AicDict[Q] = [[p, q, AIC]]\n for object in objects:\n object(\n f'正在进行{relation},分位点:{Q:.2f}的{NAME}计算,滞后阶数为[{p},{q}],{NAME}值为{AIC:.2f}'\n )\n\n threadCrawl = []\n for i in range(10):\n threadObj = threading.Thread(target=info_cri)\n threadObj.start()\n threadCrawl.append(threadObj)\n for single in threadCrawl:\n single.join()\n for object in objects:\n object(f\"计算{relation}最优滞后阶数线程退出循环\")\n\n # 选取最优阶数\n if EXIT:\n for object in objects:\n object(f\"程序已终止运行\")\n return 0\n LagDict = {}\n for i in range(len(qr_name)):\n QAICS = []\n for Q in self.AicDict:\n if qrange[i] <= Q <= qrange[i + 1]:\n for QAIC in self.AicDict[Q]:\n QAICS.append(QAIC)\n QAICS = sorted(QAICS, key=lambda x: x[2])\n LagDict[qr_name[i]] = QAICS[0]\n\n # 生成待估计分位点及滞后阶数组合队列\n QregQueue = Queue()\n Qs = []\n for i in range(WaldNum):\n Qs.append(i * 0.9 / (WaldNum - 1) + 0.05)\n for i in range(len(qrange) - 1):\n for Q in Qs:\n if qrange[i] <= Q <= qrange[i + 1]:\n QregQueue.put([Q] + LagDict[qr_name[i]] + [i])\n\n # 11.多线程计算wald值\n self.WaldDict = {}\n\n def wald_text():\n while not EXIT:\n try:\n Q, p, q, aic, index = QregQueue.get(block=False)\n except:\n break\n data = self.lag_list(Y, X, p, q)\n res = self.qreg(data, Q)\n wald = ''\n for i, value in enumerate(data):\n if i > p:\n wald += f'{value}='\n wald = wald + '0'\n wald = str(res.f_test(wald))\n walds = float(re.findall('array\\(\\[\\[(.*?)\\]\\]', wald)[0])\n self.WaldDict[(Q, index)] = [p, q, walds]\n for object in objects:\n object(\n f'正在进行{relation},分位区间:{qr_name[index]},分位点:{Q:.2f}的wald值,滞后阶数为[{p},{q}],wald值为{walds:.2f}'\n )\n\n threadCrawl = []\n for i in range(10):\n threadObj = threading.Thread(target=wald_text)\n threadObj.start()\n threadCrawl.append(threadObj)\n for single in threadCrawl:\n single.join()\n for object in objects:\n object(f\"计算{relation}的wald线程退出循环\")\n\n # 12.计算Sup-Wald值\n if EXIT:\n for object in objects:\n object(f\"程序已终止运行\")\n return 0\n SupDict = {}\n for i in range(len(qrange) - 1):\n SUP = []\n for Q in self.WaldDict:\n if Q[1] == i:\n SUP.append(self.WaldDict[Q])\n SUP = sorted(SUP, key=lambda x: x[2], reverse=True)\n SupDict[qr_name[i]] = SUP[0]\n\n # 13.判断Sup-Wald显著性\n Swl = pd.read_excel(r'.\\data\\Sup_wald_lag.xlsx')\n tao = []\n for i in range(len(qr_name)):\n fenzi = qrange[i + 1] * (1 - qrange[i])\n fenmu = qrange[i] * (1 - qrange[i + 1])\n tao.append(fenzi / fenmu)\n x = Swl[0]\n y = Swl.drop(0, axis=1)\n # 插值拟合\n qr_list = []\n wald_list = []\n for i, qr in enumerate(SupDict):\n q = SupDict[qr][1]\n wald = SupDict[qr][2]\n walds = round(wald, sign_num)\n index = [f'{q}.2', f'{q}.1', q]\n f3 = interpolate.interp1d(x, y[index[0]], kind=\"quadratic\")\n f2 = interpolate.interp1d(x, y[index[1]], kind=\"quadratic\")\n f1 = interpolate.interp1d(x, y[index[2]], kind=\"quadratic\")\n # walds = str(wald)[:str(wald).find('.')+sign_num+1]\n if wald >= f3(tao[i]):\n wald = f'{walds}***\\n[{q}]'\n elif wald >= f2(tao[i]):\n wald = f'{walds}**\\n[{q}]'\n elif wald >= f1(tao[i]):\n wald = f'{walds}*\\n[{q}]'\n else:\n wald = f'{walds}\\n[{q}]'\n qr_list.append(qr)\n wald_list.append(wald)\n results[relation] = pd.Series(wald_list, index=qr_list)\n for object in objects:\n object('分位数Granger因果检验计算结束')\n results.to_excel('./运行结果/Granger.xlsx')\n for object in objects:\n object('估计结果已保存在“运行结果/Granger.xlsx”文件内!')\n object('程序运行结束')\n print('\\n*******************************************')\n print('最终结果展示:')\n print(results)\n\n\nif __name__ == \"__main__\": # 用于当前窗体测试\n\n ex = Quantile_Granger()\n df = pd.read_excel(r'.\\data\\测试数据.xlsx')\n df = df.drop(df.columns[0], axis=1)\n\n # 设定参数\n start = 0.1 # 区间起点\n end = 0.9 # 区间终点\n num = 17 # 区间个数\n sign = '单因素对各市场' # 模式选择\n max_lag = 1 # 最大滞后阶数选择,默认为5\n info_type = 'BIC' # 信息准则选择\n WaldNum = 35 # 估计wald个数,默认1000个\n sign_num = 2 # 有效数字,默认为3\n AicNum = 20 # 估计各区间最优滞后阶数,默认50个\n\n # 开始计算\n qrange, qr_name = ex.set_range(start, end, num)\n DataList = ex.pattern(df, sign)\n results = ex.calculate(DataList, qrange, qr_name, max_lag, info_type,\n WaldNum, sign_num, AicNum)\n results.to_excel('./运行结果/Granger.xlsx')\n" ]
[ [ "scipy.interpolate.interp1d", "pandas.DataFrame", "pandas.read_excel", "pandas.Series", "numpy.linspace" ] ]
asascience-open/QARTOD
[ "2f940b99f64974e6c6ad450f341382b302fe790c" ]
[ "ioos_qartod/qc_tests/auxillary_checks.py" ]
[ "import numpy as np\n\n\ndef check_timestamps(times, max_time_interval=None):\n \"\"\"\n Checks that the times supplied are in monotonically increasing\n chronological order, and optionally that time intervals between\n measurements do not exceed a value `max_time_interval`. Note that this is\n not a QARTOD test, but rather a utility test to make sure times are in the\n proper order and optionally do not have large gaps prior to processing\n the data.\n\n \"\"\"\n time_diff = np.diff(times)\n sort_diff = np.diff(sorted(times))\n # Check if there are differences between sorted and unsorted, and then\n # see if if there are any duplicate times. Then check that none of the\n # diffs exceeds the sorted time.\n zero = np.array(0, dtype=time_diff.dtype)\n if not np.array_equal(time_diff, sort_diff) or np.any(sort_diff == zero):\n return False\n elif (max_time_interval is not None and\n np.any(sort_diff > max_time_interval)):\n return False\n else:\n return True\n" ]
[ [ "numpy.any", "numpy.array", "numpy.array_equal", "numpy.diff" ] ]
matwey/ad_examples
[ "78b01e9c9502523c5341243e1a8dca6befcefbc3" ]
[ "ad_examples/aad/test_hard_data.py" ]
[ "import os\nimport logging\nimport numpy as np\nimport numpy.random as rnd\nimport matplotlib.pyplot as plt\n\nfrom ..common.utils import get_command_args, configure_logger, dir_create\nfrom ..common.gen_samples import get_hard_samples\nfrom ..common.data_plotter import DataPlotter\n\n\n\"\"\"\npythonw -m ad_examples.aad.test_hard_data\n\"\"\"\n\n\ndef plot_dataset(x, cls_cols, orig_labels, pl):\n plt.xlim([np.min(x[:, 0]), np.max(x[:, 0])])\n plt.ylim([np.min(x[:, 1]), np.max(x[:, 1])])\n for cls in np.unique(orig_labels):\n X = x[np.where(orig_labels == cls)[0], :]\n pl.scatter(X[:, 0], X[:, 1], c=cls_cols.get(cls, \"grey\"), marker='x',\n linewidths=2.0, s=24, label=\"class %d (%s)\" % (cls, \"nominal\" if cls == 0 else \"anomaly\"))\n pl.legend(loc='lower right', prop={'size': 4})\n\n\ndef np_mat_to_str(x):\n n = x.shape[0]\n s = \"\"\n for i in range(n):\n s += \",\".join(x[i, :])\n s += os.linesep\n return s\n\n\nif __name__ == \"__main__\":\n\n logger = logging.getLogger(__name__)\n\n args = get_command_args(debug=True, debug_args=[\"--debug\",\n \"--plot\",\n \"--log_file=temp/test_hard_data.log\"])\n # print \"log file: %s\" % args.log_file\n configure_logger(args)\n\n rnd.seed(42)\n\n x, y, ns = get_hard_samples()\n orig_labels = np.repeat(np.arange(len(ns))+1, ns)\n orig_labels[np.where(orig_labels <= 2)] = 0\n\n n = x.shape[0]\n idxs = np.arange(n, dtype=int)\n np.random.shuffle(idxs)\n\n x = x[idxs, :]\n y = y[idxs]\n orig_labels = orig_labels[idxs]\n\n dp = DataPlotter(pdfpath=\"./temp/test_hard_data.pdf\", rows=2, cols=2)\n for i in range(3, len(ns)+1):\n pl = dp.get_next_plot()\n cls_cols = {0: \"grey\", 1: \"blue\", 2: \"green\", i: \"red\"}\n plot_dataset(x, cls_cols, orig_labels, pl)\n dp.close()\n\n dp = DataPlotter(pdfpath=\"./temp/test_hard_data_all.pdf\", rows=1, cols=1)\n pl = dp.get_next_plot()\n cls_cols = {3: \"blue\", 4: \"green\", 5: \"red\", 6: \"cyan\", 7: \"brown\", 8: \"orange\", 9: \"pink\"}\n plot_dataset(x, cls_cols, orig_labels, pl)\n dp.close()\n\n dataset = \"toy_hard\"\n out_dir = os.path.join(\".\", \"temp\", dataset, \"fullsamples\")\n dir_create(out_dir)\n out_file_dat = os.path.join(out_dir, \"%s_1.csv\" % dataset)\n out_file_cls = os.path.join(out_dir, \"%s_1_orig_labels.csv\" % dataset)\n y = [\"anomaly\" if v == 1 else \"nominal\" for v in y]\n with open(out_file_dat, 'w') as f:\n f.write(\"label,x,y\" + os.linesep)\n for i in range(n):\n f.write(\"%s,%f,%f%s\" % (y[i], x[i, 0], x[i, 1], os.linesep))\n with open(out_file_cls, 'w') as f:\n f.write(\"ground.truth,label\" + os.linesep)\n for cls in zip(y, orig_labels):\n f.write(\"%s,%d%s\" % (cls[0], cls[1], os.linesep))\n" ]
[ [ "numpy.max", "numpy.random.seed", "numpy.min", "numpy.random.shuffle", "numpy.where", "numpy.arange", "numpy.unique" ] ]
pirakd/DeepProp
[ "e43f6e12220da38a3bda51918bd75bb7c48dec31" ]
[ "scripts/d2d_main.py" ]
[ "from os import path\nimport sys\nsys.path.append(path.dirname(path.dirname(path.realpath(__file__))))\nfrom os import path, makedirs\nimport torch\nfrom utils import read_data, get_root_path, train_test_split, get_time, \\\n gen_propagation_scores, redirect_output\nfrom D2D import eval_D2D, eval_D2D_2, generate_D2D_features_from_propagation_scores\nimport numpy as np\nfrom presets import experiments_0\nimport json\nimport argparse\nfrom scripts.scripts_utils import sources_filenmae_dict, terminals_filenmae_dict\nimport pickle\n\n\ndef run(sys_args):\n output_folder = 'output'\n output_file_path = path.join(get_root_path(), output_folder, path.basename(__file__).split('.')[0], get_time())\n makedirs(output_file_path, exist_ok=True)\n redirect_output(path.join(output_file_path, 'log'))\n\n n_experiments = sys_args.n_experiments\n args = experiments_0\n\n if sys_args.device:\n device = torch.device(\"cuda:{}\".format(sys_args.device))\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n args['data']['load_prop_scores'] = sys_args.load_prop_scores\n args['data']['save_prop_scores'] = sys_args.save_prop_scores\n args['data']['prop_scores_filename'] = sys_args.prop_scores_filename\n args['train']['train_val_test_split'] = sys_args.train_val_test_split\n args['data']['directed_interactions_filename'] = sys_args.directed_interactions_filename\n args['data']['sources_filename'] = sources_filenmae_dict[sys_args.experiments_type]\n args['data']['terminals_filename'] = terminals_filenmae_dict[sys_args.experiments_type]\n args['data']['n_experiments'] = n_experiments\n print(json.dumps(args, indent=4))\n\n # data read and filtering\n rng = np.random.RandomState(args['data']['random_seed'])\n\n network, directed_interactions, sources, terminals, id_to_degree = \\\n read_data(args['data']['network_filename'], args['data']['directed_interactions_filename'],\n args['data']['sources_filename'], args['data']['terminals_filename'],\n args['data']['n_experiments'], args['data']['max_set_size'], rng)\n n_experiments = len(sources)\n\n # merged_network = pandas.concat([directed_interactions, network.drop(directed_interactions.index & network.index)])\n directed_interactions_pairs_list = np.array(directed_interactions.index)\n directed_interactions_source_type = np.array(directed_interactions.source)\n genes_ids_to_keep = sorted(list(set([x for pair in directed_interactions_pairs_list for x in pair])))\n\n propagation_scores, row_id_to_idx, col_id_to_idx, normalization_constants_dict = \\\n gen_propagation_scores(args, network, sources, terminals, genes_ids_to_keep, directed_interactions_pairs_list)\n\n train_indexes, val_indexes, test_indexes = train_test_split(args['data']['split_type'],\n len(directed_interactions_pairs_list),\n args['train']['train_val_test_split'],\n random_state=rng) # feature generation\n d2d_train_indexes = np.concatenate([train_indexes, val_indexes])\n\n # d2d evaluation\n sources_indexes = [[row_id_to_idx[gene_id] for gene_id in gene_set] for gene_set in sources.values()]\n terminals_indexes = [[row_id_to_idx[gene_id] for gene_id in gene_set] for gene_set in terminals.values()]\n pairs_indexes = [(col_id_to_idx[pair[0]], col_id_to_idx[pair[1]]) for pair in directed_interactions_pairs_list]\n features, deconstructed_features = generate_D2D_features_from_propagation_scores(propagation_scores,\n pairs_indexes,\n sources_indexes,\n terminals_indexes)\n d2d_results_dict, d2d_model = eval_D2D(features[d2d_train_indexes], features[test_indexes],\n directed_interactions_source_type[test_indexes])\n d2d_2_results_dict, d2d_2_model = eval_D2D_2(deconstructed_features[d2d_train_indexes],\n deconstructed_features[test_indexes],\n directed_interactions_source_type[test_indexes])\n d2d_stats = ({type: {x: xx for x, xx in values.items() if x in ['acc', 'auc']} for type, values in\n d2d_results_dict.items()})\n d2d_2_stats = {type: {x: xx for x, xx in values.items() if x in ['acc', 'auc']} for type, values in\n d2d_2_results_dict.items()}\n\n results_dict = {'d2d':d2d_stats,\n 'd2d_2': d2d_2_stats,\n 'n_experiments':n_experiments}\n\n models = {'d2d':d2d_model,\n 'd2d_2': d2d_2_model}\n\n with open(path.join(output_file_path, 'args'), 'w') as f:\n json.dump(args, f, indent=4, separators=(',', ': '))\n with open(path.join(output_file_path, 'results'), 'w') as f:\n json.dump(results_dict, f, indent=4, separators=(',', ': '))\n with open(path.join(output_file_path, 'd2d_models'), 'wb') as f:\n pickle.dump(models, f)\n\n\nif __name__ == '__main__':\n n_folds = 5\n input_type = 'drug'\n load_prop = False\n save_prop = False\n n_exp = 2\n split = [0.66, 0.14, 0.2]\n interaction_type = ['KPI', 'STKE']\n prop_scores_filename = 'drug_KPI'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-ex', '--ex_type', dest='experiments_type', type=str,\n help='name of experiment type(drug, colon, etc.)', default=input_type)\n parser.add_argument('-n,', '--n_exp', dest='n_experiments', type=int,\n help='num of experiments used (0 for all)', default=n_exp)\n parser.add_argument('-s', '--save_prop', dest='save_prop_scores', action='store_true', default=False,\n help='Whether to save propagation scores')\n parser.add_argument('-l', '--load_prop', dest='load_prop_scores', action='store_true', default=False,\n help='Whether to load prop scores')\n parser.add_argument('-sp', '--split', dest='train_val_test_split', nargs=3, help='[train, val, test] sums to 1',\n default=split, type=float)\n parser.add_argument('-in', '--inter_file', dest='directed_interactions_filename', nargs='*', type=str,\n help='KPI/STKE', default=interaction_type)\n parser.add_argument('-p', '--prop_file', dest='prop_scores_filename', type=str,\n help='Name of prop score file(save/load)', default=prop_scores_filename)\n parser.add_argument('-f', '--n_folds', dest='n_folds', type=str,\n help='Name of prop score file(save/load)', default=n_folds)\n parser.add_argument('-w', dest='n_workers', type=int,\n help='number of dataloader workers', default=0)\n parser.add_argument('-d', dest='device', type=int, help='gpu number', default=None)\n args = parser.parse_args()\n args.directed_interactions_filename = sorted(args.directed_interactions_filename)\n args.prop_scores_filename = args.experiments_type + '_' + '_'.join(args.directed_interactions_filename) + '_{}'.format(args.n_experiments)\n\n # args.load_prop_scores = True\n # args.save_prop_scores = True\n run(args)\n" ]
[ [ "numpy.concatenate", "numpy.array", "torch.cuda.is_available", "numpy.random.RandomState" ] ]
vishalbelsare/AffineFlowCausalInf
[ "3c7e2be0ad194e4dcbb0eea35c8fe1d7e88b1b8f" ]
[ "runners/intervention_trials.py" ]
[ "# trial interventional predictions of flow models\n#\n#\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\nimport seaborn as sns\n\nfrom data.generate_synth_data import intervention_sem\nfrom models import ANM, CAREFL\n\n\ndef res_save_name(config, algo):\n if 'carefl' not in algo.lower():\n return 'int_{}{}{}.p'.format(config.data.n_points, 'r' * config.data.random, 'm' * config.data.multiplicative)\n return 'int_{}{}{}_{}_{}_{}_{}.p'.format(config.data.n_points,\n 'r' * config.data.random,\n 'm' * config.data.multiplicative,\n config.flow.architecture.lower(),\n config.flow.net_class.lower(),\n config.flow.nl,\n config.flow.nh)\n\n\ndef fig_save_name(config):\n return 'int_mse_{}{}{}_{}_{}_{}_{}.pdf'.format('r' * config.data.random,\n 'e' * config.data.expected,\n 'm' * config.data.multiplicative,\n config.flow.architecture.lower(),\n config.flow.net_class.lower(),\n config.flow.nl,\n config.flow.nh)\n\n\ndef run_interventions(args, config):\n n_obs = config.data.n_points\n model = config.algorithm.lower()\n print(\"** {} observations **\".format(n_obs))\n # generate coeffcients for equation (12), and data from that SEM\n data, coeffs, dag = intervention_sem(n_obs, dim=4, seed=config.data.seed, random=config.data.random,\n multiplicative=config.data.multiplicative)\n print(\"fitting a {} model\".format(model))\n # fit to an affine autoregressive flow or ANM with gp/linear functions\n mod = CAREFL(config) if model == 'carefl' else ANM(method=model)\n mod.fit_to_sem(data, dag)\n # intervene on X_1 and get a sample of {x | do(X_1=a)} for a in [-3, 3]\n avals = np.arange(-3, 3, .1)\n x_int_sample = []\n x_int_exp = []\n for a in avals:\n res = mod.predict_intervention(a, n_samples=20, iidx=0)\n x_int_sample.append(res[0].mean(axis=0))\n x_int_exp.append(res[1].mean(axis=0))\n x_int_sample = np.array(x_int_sample)\n x_int_exp = np.array(x_int_exp)\n # compute the MSE between the true E[x_3|x_1=a] to the empirical expectation from the sample\n # we know that the true E[x_3|x_1=a] = a\n mse_x3 = np.mean((x_int_sample[:, 2] - avals) ** 2)\n mse_x3e = np.mean((x_int_exp[:, 2] - avals) ** 2)\n # do the same for x_4; true E[x_4|x_1=a] = c_1*a^2\n mse_x4 = np.mean((x_int_sample[:, 3] - coeffs[1] * avals * avals) ** 2)\n mse_x4e = np.mean((x_int_exp[:, 3] - coeffs[1] * avals * avals) ** 2)\n # store results\n results = {}\n results[\"x3\"] = mse_x3\n results[\"x4\"] = mse_x4\n results[\"x3e\"] = mse_x3e\n results[\"x4e\"] = mse_x4e\n pickle.dump(results, open(os.path.join(args.output, res_save_name(config, model)), 'wb'))\n\n\ndef plot_interventions(args, config):\n from configs.plotting import color_dict, label_dict, font_dict\n # plot the MSEs\n n_obs_list = [250, 500, 750, 1000, 1250, 1500, 2000, 2500]\n models = ['carefl', 'careflns', 'gp', 'linear']\n to_models = lambda s: s.split('/')[0]\n # load results from disk\n variables = ['x3', 'x3e', 'x4', 'x4e']\n results = {mod: {x: [] for x in variables} for mod in models}\n\n _flow = os.path.join('carefl', config.flow.architecture.lower())\n _flow_ns = os.path.join('careflns', config.flow.architecture.lower())\n int_list = [_flow, _flow_ns, 'gp', 'linear']\n\n for a in int_list:\n for n in n_obs_list:\n config.data.n_points = n\n res = pickle.load(\n open(os.path.join(args.run, 'interventions', a, res_save_name(config, to_models(a))), 'rb'))\n for x in variables:\n results[to_models(a)][x].append(res[x])\n # produce plot\n sns.set_style(\"whitegrid\")\n # sns.set_palette(sns.color_palette(\"muted\", 8))\n fig, axs = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n for a in models:\n # plot E[X_3|do(X_1=a)]\n if config.data.expected:\n axs[0].plot(n_obs_list, results[a][\"x3e\"], color=color_dict[a], linestyle='-.',\n marker='o', linewidth=2, alpha=.8)\n else:\n axs[0].plot(n_obs_list, results[a][\"x3\"], color=color_dict[a], linestyle='-',\n marker='o', linewidth=2, alpha=.8)\n # plot E[X_4|do(X_1=a)]\n if config.data.expected:\n axs[1].plot(n_obs_list, results[a][\"x4e\"], color=color_dict[a], label=label_dict[a], linestyle='-.',\n marker='o', linewidth=2, alpha=.8)\n else:\n axs[1].plot(n_obs_list, results[a][\"x4\"], color=color_dict[a], label=label_dict[a], linestyle='-',\n marker='o', linewidth=2, alpha=.8)\n axs[0].set_title(r'$\\mathbb{E}[X_3|do(X_1=a)]$', fontsize=font_dict['title'])\n axs[1].set_title(r'$\\mathbb{E}[X_4|do(X_1=a)]$', fontsize=font_dict['title'])\n for ax in axs:\n ax.set_xlabel(r'Sample size', fontsize=font_dict['xlabel'])\n ax.set_ylabel(r'MSE', fontsize=font_dict['ylabel'])\n ax.set_yscale('log')\n fig.legend( # The labels for each line\n # loc=\"center right\", # Position of legend\n # borderaxespad=0.2, # Small spacing around legend box\n title=\"Algorithm\", # Title for the legend\n fontsize=11,\n bbox_to_anchor=(0.75, 0.7),\n framealpha=.7,\n )\n plt.tight_layout()\n # plt.subplots_adjust(right=0.85)\n plt.savefig(os.path.join(args.run, fig_save_name(config)), dpi=300)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.arange", "matplotlib.pyplot.tight_layout" ] ]
wesm/zipline
[ "279e125ab8e61b8d04cc487e52f17a6e1cadeb6e" ]
[ "zipline/algorithm.py" ]
[ "#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom copy import copy\n\nimport pytz\nimport pandas as pd\nimport numpy as np\n\nfrom datetime import datetime\n\nfrom itertools import groupby\nfrom operator import attrgetter\n\nfrom zipline.errors import (\n UnsupportedSlippageModel,\n OverrideSlippagePostInit,\n UnsupportedCommissionModel,\n OverrideCommissionPostInit\n)\nfrom zipline.sources import DataFrameSource, DataPanelSource\nfrom zipline.utils.factory import create_simulation_parameters\nfrom zipline.transforms.utils import StatefulTransform\nfrom zipline.finance.slippage import (\n VolumeShareSlippage,\n FixedSlippage,\n transact_partial\n)\nfrom zipline.finance.commission import PerShare, PerTrade\nfrom zipline.finance.constants import ANNUALIZER\n\nfrom zipline.gens.composites import (\n date_sorted_sources,\n sequential_transforms,\n alias_dt\n)\nfrom zipline.gens.tradesimulation import TradeSimulationClient as tsc\n\nDEFAULT_CAPITAL_BASE = float(\"1.0e5\")\n\n\nclass TradingAlgorithm(object):\n \"\"\"Base class for trading algorithms. Inherit and overload\n initialize() and handle_data(data).\n\n A new algorithm could look like this:\n ```\n class MyAlgo(TradingAlgorithm):\n def initialize(amount):\n self.amount = amount\n\n def handle_data(data):\n sid = self.sids[0]\n self.order(sid, amount)\n ```\n To then to run this algorithm:\n\n >>> my_algo = MyAlgo([0], 100) # first argument has to be list of sids\n >>> stats = my_algo.run(data)\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize sids and other state variables.\n\n :Arguments:\n data_frequency : str (daily, hourly or minutely)\n The duration of the bars.\n annualizer : int <optional>\n Which constant to use for annualizing risk metrics.\n If not provided, will extract from data_frequency.\n capital_base : float <default: 1.0e5>\n How much capital to start with.\n \"\"\"\n self.order = None\n self._portfolio = None\n self.datetime = None\n\n self.registered_transforms = {}\n self.transforms = []\n self.sources = []\n\n self._recorded_vars = {}\n\n self.logger = None\n\n # default components for transact\n self.slippage = VolumeShareSlippage()\n self.commission = PerShare()\n\n if 'data_frequency' in kwargs:\n self.set_data_frequency(kwargs.pop('data_frequency'))\n else:\n self.data_frequency = None\n\n # Override annualizer if set\n if 'annualizer' in kwargs:\n self.annualizer = kwargs['annualizer']\n\n # set the capital base\n self.capital_base = kwargs.get('capital_base', DEFAULT_CAPITAL_BASE)\n\n self.sim_params = kwargs.pop('sim_params', None)\n\n # an algorithm subclass needs to set initialized to True when\n # it is fully initialized.\n self.initialized = False\n\n # call to user-defined constructor method\n self.initialize(*args, **kwargs)\n\n def _create_generator(self, sim_params):\n \"\"\"\n Create a basic generator setup using the sources and\n transforms attached to this algorithm.\n \"\"\"\n\n self.date_sorted = date_sorted_sources(*self.sources)\n self.with_tnfms = sequential_transforms(self.date_sorted,\n *self.transforms)\n self.with_alias_dt = alias_dt(self.with_tnfms)\n # Group together events with the same dt field. This depends on the\n # events already being sorted.\n self.grouped_by_dt = groupby(self.with_alias_dt, attrgetter('dt'))\n self.trading_client = tsc(self, sim_params)\n\n transact_method = transact_partial(self.slippage, self.commission)\n self.set_transact(transact_method)\n\n return self.trading_client.simulate(self.grouped_by_dt)\n\n def get_generator(self):\n \"\"\"\n Override this method to add new logic to the construction\n of the generator. Overrides can use the _create_generator\n method to get a standard construction generator.\n \"\"\"\n return self._create_generator(self.sim_params)\n\n def initialize(self, *args, **kwargs):\n pass\n\n # TODO: make a new subclass, e.g. BatchAlgorithm, and move\n # the run method to the subclass, and refactor to put the\n # generator creation logic into get_generator.\n def run(self, source, sim_params=None):\n \"\"\"Run the algorithm.\n\n :Arguments:\n source : can be either:\n - pandas.DataFrame\n - zipline source\n - list of zipline sources\n\n If pandas.DataFrame is provided, it must have the\n following structure:\n * column names must consist of ints representing the\n different sids\n * index must be DatetimeIndex\n * array contents should be price info.\n\n :Returns:\n daily_stats : pandas.DataFrame\n Daily performance metrics such as returns, alpha etc.\n\n \"\"\"\n if isinstance(source, (list, tuple)):\n assert self.sim_params is not None or sim_params is not None, \\\n \"\"\"When providing a list of sources, \\\n sim_params have to be specified as a parameter\n or in the constructor.\"\"\"\n elif isinstance(source, pd.DataFrame):\n # if DataFrame provided, wrap in DataFrameSource\n source = DataFrameSource(source)\n elif isinstance(source, pd.Panel):\n source = DataPanelSource(source)\n\n if not isinstance(source, (list, tuple)):\n self.sources = [source]\n else:\n self.sources = source\n\n # Check for override of sim_params.\n # If it isn't passed to this function,\n # use the default params set with the algorithm.\n # Else, we create simulation parameters using the start and end of the\n # source provided.\n if not sim_params:\n if not self.sim_params:\n start = source.start\n end = source.end\n\n sim_params = create_simulation_parameters(\n start=start,\n end=end,\n capital_base=self.capital_base\n )\n else:\n sim_params = self.sim_params\n\n # Create transforms by wrapping them into StatefulTransforms\n self.transforms = []\n for namestring, trans_descr in self.registered_transforms.iteritems():\n sf = StatefulTransform(\n trans_descr['class'],\n *trans_descr['args'],\n **trans_descr['kwargs']\n )\n sf.namestring = namestring\n\n self.transforms.append(sf)\n\n # create transforms and zipline\n self.gen = self._create_generator(sim_params)\n\n # loop through simulated_trading, each iteration returns a\n # perf dictionary\n perfs = list(self.gen)\n\n # convert perf dict to pandas dataframe\n daily_stats = self._create_daily_stats(perfs)\n\n return daily_stats\n\n def _create_daily_stats(self, perfs):\n # create daily and cumulative stats dataframe\n daily_perfs = []\n cum_perfs = []\n # TODO: the loop here could overwrite expected properties\n # of daily_perf. Could potentially raise or log a\n # warning.\n for perf in perfs:\n if 'daily_perf' in perf:\n\n perf['daily_perf'].update(\n perf['daily_perf'].pop('recorded_vars')\n )\n daily_perfs.append(perf['daily_perf'])\n else:\n cum_perfs.append(perf)\n\n daily_dts = [np.datetime64(perf['period_close'], utc=True)\n for perf in daily_perfs]\n daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)\n\n return daily_stats\n\n def add_transform(self, transform_class, tag, *args, **kwargs):\n \"\"\"Add a single-sid, sequential transform to the model.\n\n :Arguments:\n transform_class : class\n Which transform to use. E.g. mavg.\n tag : str\n How to name the transform. Can later be access via:\n data[sid].tag()\n\n Extra args and kwargs will be forwarded to the transform\n instantiation.\n\n \"\"\"\n self.registered_transforms[tag] = {'class': transform_class,\n 'args': args,\n 'kwargs': kwargs}\n\n def record(self, **kwargs):\n \"\"\"\n Track and record local variable (i.e. attributes) each day.\n \"\"\"\n for name, value in kwargs.items():\n self._recorded_vars[name] = value\n\n @property\n def recorded_vars(self):\n return copy(self._recorded_vars)\n\n @property\n def portfolio(self):\n return self._portfolio\n\n def set_portfolio(self, portfolio):\n self._portfolio = portfolio\n\n def set_order(self, order_callable):\n self.order = order_callable\n\n def set_logger(self, logger):\n self.logger = logger\n\n def set_datetime(self, dt):\n assert isinstance(dt, datetime), \\\n \"Attempt to set algorithm's current time with non-datetime\"\n assert dt.tzinfo == pytz.utc, \\\n \"Algorithm expects a utc datetime\"\n self.datetime = dt\n\n def get_datetime(self):\n \"\"\"\n Returns a copy of the datetime.\n \"\"\"\n date_copy = copy(self.datetime)\n assert date_copy.tzinfo == pytz.utc, \\\n \"Algorithm should have a utc datetime\"\n return date_copy\n\n def set_transact(self, transact):\n \"\"\"\n Set the method that will be called to create a\n transaction from open orders and trade events.\n \"\"\"\n self.trading_client.ordering_client.transact = transact\n\n def set_slippage(self, slippage):\n if not isinstance(slippage, (VolumeShareSlippage, FixedSlippage)):\n raise UnsupportedSlippageModel()\n if self.initialized:\n raise OverrideSlippagePostInit()\n self.slippage = slippage\n\n def set_commission(self, commission):\n if not isinstance(commission, (PerShare, PerTrade)):\n raise UnsupportedCommissionModel()\n\n if self.initialized:\n raise OverrideCommissionPostInit()\n self.commission = commission\n\n def set_sources(self, sources):\n assert isinstance(sources, list)\n self.sources = sources\n\n def set_transforms(self, transforms):\n assert isinstance(transforms, list)\n self.transforms = transforms\n\n def set_data_frequency(self, data_frequency):\n assert data_frequency in ('daily', 'minute')\n self.data_frequency = data_frequency\n self.annualizer = ANNUALIZER[self.data_frequency]\n" ]
[ [ "pandas.DataFrame", "numpy.datetime64" ] ]
stbalduin/memobuilder
[ "c99eb8e711d5109c1322f443441b5a07c079e2f0" ]
[ "memobuilder/mutil/schedules.py" ]
[ "import numpy\r\nimport pyDOE as doe\r\nimport random\r\n\r\n\r\nclass UniformScheduleGenerator():\r\n\r\n @staticmethod\r\n def generate_schedules(num_schedules, resolution, duration, num_slots,\r\n min, max):\r\n if num_schedules == 0:\r\n return []\r\n # compute slot duration and number of datapoints per slot:\r\n slot_duration = duration / num_slots\r\n datapoints_per_slot = slot_duration / resolution\r\n\r\n schedules = []\r\n for i in range(num_schedules):\r\n # draw random values between min and max\r\n raw = [random.uniform(min, max) for i in range(num_slots)]\r\n # reapeat each value if slots of the target schedule have\r\n # several values\r\n schedule = numpy.repeat(raw, datapoints_per_slot)\r\n # convert numpy array to simple list\r\n schedule = list(schedule)\r\n schedules.append(schedule)\r\n return schedules\r\n\r\n\r\nclass LHSScheduleGenerator():\r\n\r\n @staticmethod\r\n def generate_schedules(num_schedules, resolution, duration, num_slots,\r\n min, max):\r\n \"\"\"\r\n This function may be used to generate test schedules. The\r\n duration of each schedule and the number of equally sized\r\n slots of the schedule may be specified by the user. Within\r\n a slot the value of a schedule is constant and has a randomly\r\n chosen value between *min* and *max*. For the construction of\r\n schedules a latin-hypercube sampling approach is used, which\r\n comprises following steps:\r\n\r\n * Latin hypercube sampling is used to create a plan for\r\n *num_schedules* experiments and *num_slots* factors.\r\n * Each row of this plan is then translated into a device schedule\r\n by\r\n * denormalizing its values\r\n * by repeating each value *datapoints_per_slot* times\r\n\r\n :param num_schedules: int, number of schedules to generate.\r\n\r\n :param resolution: int, step size of the controlled device\r\n in seconds (e.g. simulator step_size).\r\n\r\n :param duration: int, total duration of each target schedule in\r\n seconds.\r\n\r\n :param num_slots: int, number of equally sized time slots for\r\n each target schedule.\r\n\r\n :param min: float, minimal allowed schedule value\r\n\r\n :param max: float, maximal allowed schedule value\r\n\r\n :return: schedule_description[*num_schedules*],\r\n a list of json encoded schedules to be interpreted by DataSeriesSim\r\n (https://ecode.offis.de/simulators/DataSeriesSim)\r\n\r\n \"\"\"\r\n if num_schedules == 0:\r\n return []\r\n\r\n # compute slot duration and number of datapoints per slot:\r\n slot_duration = duration / num_slots\r\n datapoints_per_slot = slot_duration / resolution\r\n\r\n # create a latin hypercube design:\r\n plan = doe.lhs(num_slots, samples=num_schedules)\r\n\r\n # translate each row of the sampled plan into a schedule by\r\n # denormalizing it and by repeating each value\r\n # *datapoints_per_slot* times:\r\n test_schedules = []\r\n for i in range(num_schedules):\r\n schedule = plan[i]\r\n schedule = min + (max - min) * schedule\r\n schedule = numpy.repeat(schedule, datapoints_per_slot)\r\n schedule = list(schedule)\r\n test_schedules.append(schedule)\r\n return test_schedules\r\n" ]
[ [ "numpy.repeat" ] ]
lascivaroma/PaPie
[ "0ca4311a57b2439994e5fcdc02f4d008ee268a9c" ]
[ "pie/models/highway.py" ]
[ "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom pie import initialization\n\n\nclass Highway(nn.Module):\n \"\"\"\n Highway network\n \"\"\"\n def __init__(self, in_features, num_layers, act='relu'):\n self.in_features = in_features\n\n self.act = act\n super().__init__()\n\n self.layers = nn.ModuleList(\n [nn.Linear(in_features, in_features*2) for _ in range(num_layers)])\n\n self.init()\n\n def init(self):\n for layer in self.layers:\n initialization.init_linear(layer)\n # bias gate to let information go untouched\n nn.init.constant_(layer.bias[self.in_features:], 1.)\n\n def forward(self, inp):\n current = inp\n for layer in self.layers:\n inp, gate = layer(current).chunk(2, dim=-1)\n inp, gate = getattr(F, self.act)(inp), torch.sigmoid(gate)\n current = gate * current + (1 - gate) * inp\n\n return current\n\n\n" ]
[ [ "torch.nn.Linear", "torch.sigmoid", "torch.nn.init.constant_" ] ]
Saiprasad16/federated
[ "9c08381a172a26957d7c50f74214c74fe9a9fb1c" ]
[ "tensorflow_federated/python/core/impl/types/type_conversions_test.py" ]
[ "# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom absl.testing import parameterized\nimport attr\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import structure\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.core.impl.types import computation_types\nfrom tensorflow_federated.python.core.impl.types import placements\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.impl.types import typed_object\n\n\nclass InferTypeTest(parameterized.TestCase, test_case.TestCase):\n\n def test_with_none(self):\n self.assertIsNone(type_conversions.infer_type(None))\n\n def test_with_typed_object(self):\n\n class DummyTypedObject(typed_object.TypedObject):\n\n @property\n def type_signature(self):\n return computation_types.TensorType(tf.bool)\n\n whimsy_type = type_conversions.infer_type(DummyTypedObject())\n self.assertEqual(whimsy_type.compact_representation(), 'bool')\n\n def test_with_scalar_int_tensor(self):\n self.assertEqual(str(type_conversions.infer_type(tf.constant(1))), 'int32')\n self.assertEqual(\n str(type_conversions.infer_type(tf.constant(2**40))), 'int64')\n self.assertEqual(\n str(type_conversions.infer_type(tf.constant(-2**40))), 'int64')\n with self.assertRaises(ValueError):\n type_conversions.infer_type(tf.constant(-2**64 + 1))\n with self.assertRaises(ValueError):\n type_conversions.infer_type(tf.constant(2**64))\n\n def test_with_scalar_bool_tensor(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.constant(False))), 'bool')\n\n def test_with_int_array_tensor(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.constant([10, 20]))), 'int32[2]')\n self.assertEqual(\n str(type_conversions.infer_type(tf.constant([0, 2**40, -2**60, 0]))),\n 'int64[4]')\n with self.assertRaises(ValueError):\n type_conversions.infer_type(tf.constant([2**64, 0]))\n\n def test_with_scalar_int_variable_tensor(self):\n self.assertEqual(str(type_conversions.infer_type(tf.Variable(10))), 'int32')\n\n def test_with_scalar_bool_variable_tensor(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.Variable(True))), 'bool')\n\n def test_with_scalar_float_variable_tensor(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.Variable(0.5))), 'float32')\n\n def test_with_scalar_int_array_variable_tensor(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.Variable([10]))), 'int32[1]')\n\n def test_with_int_dataset(self):\n self.assertEqual(\n str(type_conversions.infer_type(tf.data.Dataset.from_tensors(10))),\n 'int32*')\n\n def test_with_ordered_dict_dataset(self):\n self.assertEqual(\n str(\n type_conversions.infer_type(\n tf.data.Dataset.from_tensors(\n collections.OrderedDict([\n ('b', 20),\n ('a', 10),\n ])))), '<b=int32,a=int32>*')\n\n def test_with_int(self):\n self.assertEqual(str(type_conversions.infer_type(10)), 'int32')\n\n def test_with_float(self):\n self.assertEqual(str(type_conversions.infer_type(0.5)), 'float32')\n\n def test_with_bool(self):\n self.assertEqual(str(type_conversions.infer_type(True)), 'bool')\n\n def test_with_string(self):\n self.assertEqual(str(type_conversions.infer_type('abc')), 'string')\n\n def test_with_np_int32(self):\n self.assertEqual(str(type_conversions.infer_type(np.int32(10))), 'int32')\n\n def test_with_np_int64(self):\n self.assertEqual(str(type_conversions.infer_type(np.int64(10))), 'int64')\n\n def test_with_np_float32(self):\n self.assertEqual(\n str(type_conversions.infer_type(np.float32(10))), 'float32')\n\n def test_with_np_float64(self):\n self.assertEqual(\n str(type_conversions.infer_type(np.float64(10))), 'float64')\n\n def test_with_np_bool(self):\n self.assertEqual(str(type_conversions.infer_type(np.bool(True))), 'bool')\n\n def test_with_unicode_string(self):\n self.assertEqual(str(type_conversions.infer_type(u'abc')), 'string')\n\n def test_with_numpy_int_array(self):\n self.assertEqual(\n str(type_conversions.infer_type(np.array([10, 20]))), 'int64[2]')\n\n def test_with_numpy_nested_int_array(self):\n self.assertEqual(\n str(type_conversions.infer_type(np.array([[10], [20]]))), 'int64[2,1]')\n\n def test_with_numpy_float64_scalar(self):\n self.assertEqual(str(type_conversions.infer_type(np.float64(1))), 'float64')\n\n def test_with_int_list(self):\n t = type_conversions.infer_type([1, 2, 3])\n self.assertEqual(str(t), '<int32,int32,int32>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, list)\n\n def test_with_nested_float_list(self):\n t = type_conversions.infer_type([[0.1], [0.2], [0.3]])\n self.assertEqual(str(t), '<<float32>,<float32>,<float32>>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, list)\n\n def test_with_structure(self):\n t = type_conversions.infer_type(\n structure.Struct([\n ('a', 10),\n (None, False),\n ]))\n self.assertEqual(str(t), '<a=int32,bool>')\n self.assertIsInstance(t, computation_types.StructType)\n self.assertNotIsInstance(t, computation_types.StructWithPythonType)\n\n def test_with_nested_structure(self):\n t = type_conversions.infer_type(\n structure.Struct([\n ('a', 10),\n (None, structure.Struct([\n (None, True),\n (None, 0.5),\n ])),\n ]))\n self.assertEqual(str(t), '<a=int32,<bool,float32>>')\n self.assertIsInstance(t, computation_types.StructType)\n self.assertNotIsInstance(t, computation_types.StructWithPythonType)\n\n def test_with_namedtuple(self):\n test_named_tuple = collections.namedtuple('TestNamedTuple', 'y x')\n t = type_conversions.infer_type(test_named_tuple(1, True))\n self.assertEqual(str(t), '<y=int32,x=bool>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, test_named_tuple)\n\n def test_with_dict(self):\n v1 = {\n 'a': 1,\n 'b': 2.0,\n }\n inferred_type = type_conversions.infer_type(v1)\n self.assertEqual(str(inferred_type), '<a=int32,b=float32>')\n self.assertIsInstance(inferred_type, computation_types.StructWithPythonType)\n self.assertIs(inferred_type.python_container, dict)\n\n v2 = {\n 'b': 2.0,\n 'a': 1,\n }\n inferred_type = type_conversions.infer_type(v2)\n self.assertEqual(str(inferred_type), '<a=int32,b=float32>')\n self.assertIsInstance(inferred_type, computation_types.StructWithPythonType)\n self.assertIs(inferred_type.python_container, dict)\n\n def test_with_ordered_dict(self):\n t = type_conversions.infer_type(\n collections.OrderedDict([('b', 2.0), ('a', 1)]))\n self.assertEqual(str(t), '<b=float32,a=int32>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, collections.OrderedDict)\n\n def test_with_nested_attrs_class(self):\n\n @attr.s\n class TestAttrClass(object):\n a = attr.ib()\n b = attr.ib()\n\n t = type_conversions.infer_type(TestAttrClass(a=0, b={'x': True, 'y': 0.0}))\n self.assertEqual(str(t), '<a=int32,b=<x=bool,y=float32>>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, TestAttrClass)\n self.assertIs(t.b.python_container, dict)\n\n def test_with_dataset_list(self):\n t = type_conversions.infer_type(\n [tf.data.Dataset.from_tensors(x) for x in [1, True, [0.5]]])\n self.assertEqual(str(t), '<int32*,bool*,float32[1]*>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, list)\n\n def test_with_nested_dataset_list_tuple(self):\n t = type_conversions.infer_type(\n tuple([(tf.data.Dataset.from_tensors(x),) for x in [1, True, [0.5]]]))\n self.assertEqual(str(t), '<<int32*>,<bool*>,<float32[1]*>>')\n self.assertIsInstance(t, computation_types.StructWithPythonType)\n self.assertIs(t.python_container, tuple)\n\n def test_with_dataset_of_named_tuple(self):\n test_named_tuple = collections.namedtuple('_', 'A B')\n t = type_conversions.infer_type(\n tf.data.Dataset.from_tensor_slices({\n 'x': [0.0],\n 'y': [1],\n }).map(lambda v: test_named_tuple(v['x'], v['y'])))\n self.assertEqual(str(t), '<A=float32,B=int32>*')\n self.assertIsInstance(t.element, computation_types.StructWithPythonType)\n self.assertIs(t.element.python_container, test_named_tuple)\n\n def test_with_empty_tuple(self):\n t = type_conversions.infer_type(())\n self.assertEqual(t, computation_types.StructWithPythonType([], tuple))\n\n def test_with_ragged_tensor(self):\n t = type_conversions.infer_type(\n tf.RaggedTensor.from_row_splits([0, 0, 0, 0], [0, 1, 4]))\n self.assert_types_identical(\n t,\n computation_types.StructWithPythonType([\n ('flat_values', computation_types.TensorType(tf.int32, [4])),\n ('nested_row_splits',\n computation_types.StructWithPythonType(\n [(None, computation_types.TensorType(tf.int64, [3]))], tuple)),\n ], tf.RaggedTensor))\n\n def test_with_sparse_tensor(self):\n # sparse_tensor = [0, 2, 0, 0, 0]\n sparse_tensor = tf.SparseTensor(indices=[[1]], values=[2], dense_shape=[5])\n t = type_conversions.infer_type(sparse_tensor)\n self.assert_types_identical(\n t,\n computation_types.StructWithPythonType([\n ('indices', computation_types.TensorType(tf.int64, [1, 1])),\n ('values', computation_types.TensorType(tf.int32, [1])),\n ('dense_shape', computation_types.TensorType(tf.int64, [1])),\n ], tf.SparseTensor))\n\n\nclass TypeToTfDtypesAndShapesTest(test_case.TestCase):\n\n def test_with_int_scalar(self):\n type_signature = computation_types.TensorType(tf.int32)\n dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(\n type_signature)\n self.assert_nested_struct_eq(dtypes, tf.int32)\n self.assert_nested_struct_eq(shapes, tf.TensorShape([]))\n\n def test_with_int_vector(self):\n type_signature = computation_types.TensorType(tf.int32, [10])\n dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(\n type_signature)\n self.assert_nested_struct_eq(dtypes, tf.int32)\n self.assert_nested_struct_eq(shapes, tf.TensorShape([10]))\n\n def test_with_tensor_triple(self):\n type_signature = computation_types.StructWithPythonType([\n ('a', computation_types.TensorType(tf.int32, [5])),\n ('b', computation_types.TensorType(tf.bool)),\n ('c', computation_types.TensorType(tf.float32, [3])),\n ], collections.OrderedDict)\n dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(\n type_signature)\n self.assert_nested_struct_eq(dtypes, {\n 'a': tf.int32,\n 'b': tf.bool,\n 'c': tf.float32\n })\n self.assert_nested_struct_eq(shapes, {\n 'a': tf.TensorShape([5]),\n 'b': tf.TensorShape([]),\n 'c': tf.TensorShape([3])\n })\n\n def test_with_two_level_tuple(self):\n type_signature = computation_types.StructWithPythonType([\n ('a', tf.bool),\n ('b',\n computation_types.StructWithPythonType([\n ('c', computation_types.TensorType(tf.float32)),\n ('d', computation_types.TensorType(tf.int32, [20])),\n ], collections.OrderedDict)),\n ('e', computation_types.StructType([])),\n ], collections.OrderedDict)\n dtypes, shapes = type_conversions.type_to_tf_dtypes_and_shapes(\n type_signature)\n self.assert_nested_struct_eq(dtypes, {\n 'a': tf.bool,\n 'b': {\n 'c': tf.float32,\n 'd': tf.int32\n },\n 'e': (),\n })\n self.assert_nested_struct_eq(\n shapes, {\n 'a': tf.TensorShape([]),\n 'b': {\n 'c': tf.TensorShape([]),\n 'd': tf.TensorShape([20])\n },\n 'e': (),\n })\n\n\nclass TypeToTfTensorSpecsTest(test_case.TestCase):\n\n def test_with_int_scalar(self):\n type_signature = computation_types.TensorType(tf.int32)\n tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)\n self.assert_nested_struct_eq(tensor_specs, tf.TensorSpec([], tf.int32))\n\n def test_with_int_vector(self):\n type_signature = computation_types.TensorType(tf.int32, [10])\n tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)\n self.assert_nested_struct_eq(tensor_specs, tf.TensorSpec([10], tf.int32))\n\n def test_with_tensor_triple(self):\n type_signature = computation_types.StructWithPythonType([\n ('a', computation_types.TensorType(tf.int32, [5])),\n ('b', computation_types.TensorType(tf.bool)),\n ('c', computation_types.TensorType(tf.float32, [3])),\n ], collections.OrderedDict)\n tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)\n self.assert_nested_struct_eq(\n tensor_specs, {\n 'a': tf.TensorSpec([5], tf.int32),\n 'b': tf.TensorSpec([], tf.bool),\n 'c': tf.TensorSpec([3], tf.float32)\n })\n\n def test_with_two_level_tuple(self):\n type_signature = computation_types.StructWithPythonType([\n ('a', tf.bool),\n ('b',\n computation_types.StructWithPythonType([\n ('c', computation_types.TensorType(tf.float32)),\n ('d', computation_types.TensorType(tf.int32, [20])),\n ], collections.OrderedDict)),\n ('e', computation_types.StructType([])),\n ], collections.OrderedDict)\n tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)\n self.assert_nested_struct_eq(\n tensor_specs, {\n 'a': tf.TensorSpec([], tf.bool),\n 'b': {\n 'c': tf.TensorSpec([], tf.float32),\n 'd': tf.TensorSpec([20], tf.int32)\n },\n 'e': (),\n })\n\n def test_with_invalid_type(self):\n with self.assertRaises(TypeError):\n type_conversions.type_to_tf_tensor_specs(tf.constant([0.0]))\n\n def test_with_unnamed_element(self):\n type_signature = computation_types.StructType([tf.int32])\n tensor_specs = type_conversions.type_to_tf_tensor_specs(type_signature)\n self.assert_nested_struct_eq(tensor_specs, (tf.TensorSpec([], tf.int32),))\n\n\nclass TypeToTfStructureTest(test_case.TestCase):\n\n def test_with_names(self):\n expected_structure = collections.OrderedDict([\n ('a', tf.TensorSpec(shape=(), dtype=tf.bool)),\n ('b',\n collections.OrderedDict([\n ('c', tf.TensorSpec(shape=(), dtype=tf.float32)),\n ('d', tf.TensorSpec(shape=(20,), dtype=tf.int32)),\n ])),\n ])\n type_spec = computation_types.StructWithPythonType(expected_structure,\n collections.OrderedDict)\n tf_structure = type_conversions.type_to_tf_structure(type_spec)\n with tf.Graph().as_default():\n ds = tf.data.experimental.from_variant(\n tf.compat.v1.placeholder(tf.variant, shape=[]),\n structure=tf_structure)\n actual_structure = ds.element_spec\n self.assertEqual(expected_structure, actual_structure)\n\n def test_without_names(self):\n expected_structure = (\n tf.TensorSpec(shape=(), dtype=tf.bool),\n tf.TensorSpec(shape=(), dtype=tf.int32),\n )\n type_spec = computation_types.StructWithPythonType(expected_structure,\n tuple)\n tf_structure = type_conversions.type_to_tf_structure(type_spec)\n with tf.Graph().as_default():\n ds = tf.data.experimental.from_variant(\n tf.compat.v1.placeholder(tf.variant, shape=[]),\n structure=tf_structure)\n actual_structure = ds.element_spec\n self.assertEqual(expected_structure, actual_structure)\n\n def test_with_none(self):\n with self.assertRaises(TypeError):\n type_conversions.type_to_tf_structure(None)\n\n def test_with_sequence_type(self):\n with self.assertRaises(ValueError):\n type_conversions.type_to_tf_structure(\n computation_types.SequenceType(tf.int32))\n\n def test_with_inconsistently_named_elements(self):\n with self.assertRaises(ValueError):\n type_conversions.type_to_tf_structure(\n computation_types.StructType([('a', tf.int32), tf.bool]))\n\n def test_with_no_elements(self):\n with self.assertRaises(ValueError):\n type_conversions.type_to_tf_structure(computation_types.StructType([]))\n\n\nclass TypeFromTensorsTest(test_case.TestCase):\n\n def test_with_single(self):\n v = tf.Variable(0.0, name='a', dtype=tf.float32, shape=[])\n result = type_conversions.type_from_tensors(v)\n self.assertEqual(str(result), 'float32')\n\n def test_with_non_convert_tensors(self):\n v1 = tf.Variable(0, name='foo', dtype=tf.int32, shape=[])\n v2 = {'bar'}\n d = collections.OrderedDict([('v1', v1), ('v2', v2)])\n # TODO(b/122081673): Change Exception back to ValueError once TFF moves to\n # be TF 2.0 only\n with self.assertRaisesRegex(Exception, 'supported type'):\n type_conversions.type_from_tensors(d)\n\n def test_with_nested_tensors(self):\n v1 = tf.Variable(0, name='foo', dtype=tf.int32, shape=[])\n v2 = tf.Variable(0, name='bar', dtype=tf.int32, shape=[])\n d = collections.OrderedDict([('v1', v1), ('v2', v2)])\n result = type_conversions.type_from_tensors(d)\n self.assertEqual(str(result), '<v1=int32,v2=int32>')\n\n def test_with_list_tensors(self):\n v1 = tf.Variable(0.0, name='a', dtype=tf.float32, shape=[])\n v2 = tf.Variable(0, name='b', dtype=tf.int32, shape=[])\n l = [v1, v2]\n result = type_conversions.type_from_tensors(l)\n self.assertEqual(str(result), '<float32,int32>')\n\n def test_with_named_tuple(self):\n test_type = collections.namedtuple('NestedTensors', ['x', 'y'])\n v1 = tf.Variable(0.0, name='a', dtype=tf.float32, shape=[])\n v2 = tf.Variable(0, name='b', dtype=tf.int32, shape=[])\n result = type_conversions.type_from_tensors(test_type(v1, v2))\n self.assertEqual(str(result), '<x=float32,y=int32>')\n\n\nclass TypeToPyContainerTest(test_case.TestCase):\n\n def test_not_anon_tuple_passthrough(self):\n value = (1, 2.0)\n result = type_conversions.type_to_py_container(\n (1, 2.0),\n computation_types.StructWithPythonType([tf.int32, tf.float32],\n container_type=list))\n self.assertEqual(result, value)\n\n def test_anon_tuple_return(self):\n anon_tuple = structure.Struct([(None, 1), (None, 2.0)])\n self.assertEqual(\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructType([tf.int32, tf.float32])),\n anon_tuple)\n\n def test_anon_tuple_without_names_to_container_without_names(self):\n anon_tuple = structure.Struct([(None, 1), (None, 2.0)])\n types = [tf.int32, tf.float32]\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, list)),\n [1, 2.0])\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, tuple)),\n (1, 2.0))\n\n def test_succeeds_with_federated_namedtupletype(self):\n anon_tuple = structure.Struct([(None, 1), (None, 2.0)])\n types = [tf.int32, tf.float32]\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.FederatedType(\n computation_types.StructWithPythonType(types, list),\n placements.SERVER)), [1, 2.0])\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.FederatedType(\n computation_types.StructWithPythonType(types, tuple),\n placements.SERVER)), (1, 2.0))\n\n def test_client_placed_tuple(self):\n value = [\n structure.Struct([(None, 1), (None, 2)]),\n structure.Struct([(None, 3), (None, 4)])\n ]\n type_spec = computation_types.FederatedType(\n computation_types.StructWithPythonType([(None, tf.int32),\n (None, tf.int32)], tuple),\n placements.CLIENTS)\n self.assertEqual([(1, 2), (3, 4)],\n type_conversions.type_to_py_container(value, type_spec))\n\n def test_anon_tuple_with_names_to_container_without_names_fails(self):\n anon_tuple = structure.Struct([(None, 1), ('a', 2.0)])\n types = [tf.int32, tf.float32]\n with self.assertRaisesRegex(ValueError,\n 'contains a mix of named and unnamed elements'):\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, tuple))\n anon_tuple = structure.Struct([('a', 1), ('b', 2.0)])\n with self.assertRaisesRegex(ValueError, 'which does not support names'):\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, list))\n\n def test_anon_tuple_with_names_to_container_with_names(self):\n anon_tuple = structure.Struct([('a', 1), ('b', 2.0)])\n types = [('a', tf.int32), ('b', tf.float32)]\n self.assertDictEqual(\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, dict)), {\n 'a': 1,\n 'b': 2.0\n })\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.StructWithPythonType(types,\n collections.OrderedDict)),\n collections.OrderedDict([('a', 1), ('b', 2.0)]))\n test_named_tuple = collections.namedtuple('TestNamedTuple', ['a', 'b'])\n self.assertSequenceEqual(\n type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.StructWithPythonType(types, test_named_tuple)),\n test_named_tuple(a=1, b=2.0))\n\n @attr.s\n class TestFoo(object):\n a = attr.ib()\n b = attr.ib()\n\n self.assertEqual(\n type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, TestFoo)),\n TestFoo(a=1, b=2.0))\n\n def test_anon_tuple_without_names_promoted_to_container_with_names(self):\n anon_tuple = structure.Struct([(None, 1), (None, 2.0)])\n types = [('a', tf.int32), ('b', tf.float32)]\n dict_converted_value = type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, dict))\n odict_converted_value = type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.StructWithPythonType(types, collections.OrderedDict))\n\n test_named_tuple = collections.namedtuple('TestNamedTuple', ['a', 'b'])\n named_tuple_converted_value = type_conversions.type_to_py_container(\n anon_tuple,\n computation_types.StructWithPythonType(types, test_named_tuple))\n\n @attr.s\n class TestFoo(object):\n a = attr.ib()\n b = attr.ib()\n\n attr_converted_value = type_conversions.type_to_py_container(\n anon_tuple, computation_types.StructWithPythonType(types, TestFoo))\n\n self.assertIsInstance(dict_converted_value, dict)\n self.assertIsInstance(odict_converted_value, collections.OrderedDict)\n self.assertIsInstance(named_tuple_converted_value, test_named_tuple)\n self.assertIsInstance(attr_converted_value, TestFoo)\n\n def test_nested_py_containers(self):\n anon_tuple = structure.Struct([\n (None, 1), (None, 2.0),\n ('dict_key',\n structure.Struct([('a', 3),\n ('b', structure.Struct([(None, 4), (None, 5)]))]))\n ])\n\n dict_subtype = computation_types.StructWithPythonType(\n [('a', tf.int32),\n ('b',\n computation_types.StructWithPythonType([tf.int32, tf.int32], tuple))],\n dict)\n type_spec = computation_types.StructType([(None, tf.int32),\n (None, tf.float32),\n ('dict_key', dict_subtype)])\n\n expected_nested_structure = structure.Struct([\n (None, 1),\n (None, 2.0),\n ('dict_key', {\n 'a': 3,\n 'b': (4, 5)\n }),\n ])\n\n self.assertEqual(\n type_conversions.type_to_py_container(anon_tuple, type_spec),\n expected_nested_structure)\n\n def test_sequence_type_with_collections_sequence_elements(self):\n dataset_yielding_sequences = tf.data.Dataset.range(5).map(lambda t: (t, t))\n converted_dataset = type_conversions.type_to_py_container(\n dataset_yielding_sequences,\n computation_types.SequenceType((tf.int64, tf.int64)))\n actual_elements = list(converted_dataset)\n expected_elements = list(dataset_yielding_sequences)\n self.assertAllEqual(actual_elements, expected_elements)\n\n def test_sequence_type_with_collections_mapping_elements(self):\n dataset_yielding_mappings = tf.data.Dataset.range(5).map(\n lambda t: collections.OrderedDict(a=t, b=t))\n converted_dataset = type_conversions.type_to_py_container(\n dataset_yielding_mappings,\n computation_types.SequenceType(\n collections.OrderedDict(a=tf.int64, b=tf.int64)))\n actual_elements = list(converted_dataset)\n expected_elements = list(dataset_yielding_mappings)\n self.assertAllEqual(actual_elements, expected_elements)\n\n def test_ragged_tensor(self):\n value = structure.Struct([\n ('flat_values', [0, 0, 0, 0]),\n ('nested_row_splits', [[0, 1, 4]]),\n ])\n value_type = computation_types.StructWithPythonType([\n ('flat_values', computation_types.TensorType(tf.int32, [4])),\n ('nested_row_splits',\n computation_types.StructWithPythonType(\n [(None, computation_types.TensorType(tf.int64, [3]))], tuple)),\n ], tf.RaggedTensor)\n result = type_conversions.type_to_py_container(value, value_type)\n self.assertIsInstance(result, tf.RaggedTensor)\n self.assertAllEqual(result.flat_values, [0, 0, 0, 0])\n self.assertEqual(len(result.nested_row_splits), 1)\n self.assertAllEqual(result.nested_row_splits[0], [0, 1, 4])\n\n def test_sparse_tensor(self):\n value = structure.Struct([\n ('indices', [[1]]),\n ('values', [2]),\n ('dense_shape', [5]),\n ])\n value_type = computation_types.StructWithPythonType([\n ('indices', computation_types.TensorType(tf.int64, [1, 1])),\n ('values', computation_types.TensorType(tf.int32, [1])),\n ('dense_shape', computation_types.TensorType(tf.int64, [1])),\n ], tf.SparseTensor)\n result = type_conversions.type_to_py_container(value, value_type)\n self.assertIsInstance(result, tf.SparseTensor)\n self.assertEqual(len(result.indices), 1)\n self.assertAllEqual(result.indices[0], [1])\n self.assertAllEqual(result.values, [2])\n self.assertAllEqual(result.dense_shape, [5])\n\n\nclass StructureFromTensorTypeTreeTest(test_case.TestCase):\n\n def get_incrementing_function(self):\n i = -1\n\n def fn(ignored):\n del ignored\n nonlocal i\n i += 1\n return i\n\n return fn\n\n def test_single_tensor(self):\n\n def expect_tfint32_return_5(tensor_type):\n self.assert_types_identical(tensor_type,\n computation_types.TensorType(tf.int32))\n return 5\n\n result = type_conversions.structure_from_tensor_type_tree(\n expect_tfint32_return_5, tf.int32)\n self.assertEqual(result, 5)\n\n def test_structure(self):\n struct_type = computation_types.StructType([('a', tf.int32),\n (None, tf.int32)])\n return_incr = self.get_incrementing_function()\n result = type_conversions.structure_from_tensor_type_tree(\n return_incr, struct_type)\n self.assertEqual(result, structure.Struct([('a', 0), (None, 1)]))\n\n def test_nested_python_type(self):\n return_incr = self.get_incrementing_function()\n result = type_conversions.structure_from_tensor_type_tree(\n return_incr, [tf.int32, (tf.string, tf.int32)])\n self.assertEqual(result, [0, (1, 2)])\n\n def test_weird_result_elements(self):\n result = type_conversions.structure_from_tensor_type_tree(\n lambda _: set(), [tf.int32, (tf.string, tf.int32)])\n self.assertEqual(result, [set(), (set(), set())])\n\n\nclass TypeToNonAllEqualTest(test_case.TestCase):\n\n def test_with_bool(self):\n for x in [True, False]:\n self.assertEqual(\n str(\n type_conversions.type_to_non_all_equal(\n computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=x))),\n '{int32}@CLIENTS')\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.SparseTensor", "tensorflow.TensorSpec", "numpy.array", "tensorflow.data.Dataset.range", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.Graph", "tensorflow.RaggedTensor.from_row_splits", "tensorflow.Variable", "tensorflow.TensorShape", "numpy.int64", "numpy.float64", "tensorflow.constant", "numpy.float32", "tensorflow.test.main", "numpy.bool", "numpy.int32", "tensorflow.data.Dataset.from_tensors" ] ]
eternal-forces/profielwerkstuk
[ "efcd5a7b796dec66b95b99a40f4c43ea5958fb8f" ]
[ "dqn_tutorial.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport base64\nimport imageio\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image\n\nimport tensorflow as tf\nfrom tensorflow.python.ops.variables import Variable\n\nfrom tf_agents.agents.dqn import dqn_agent\nfrom tf_agents.drivers import dynamic_step_driver\nfrom tf_agents.environments import suite_gym\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.eval import metric_utils\nfrom tf_agents.metrics import tf_metrics\nfrom tf_agents.networks import q_network\nfrom tf_agents.policies import random_tf_policy\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\n\ntf.compat.v1.enable_v2_behavior()\n\nnum_iterations = 20000 # @param {type:\"integer\"}\n\ninitial_collect_steps = 100 # @param {type:\"integer\"} \ncollect_steps_per_iteration = 1 # @param {type:\"integer\"}\nreplay_buffer_max_length = 100000 # @param {type:\"integer\"}\n\nbatch_size = 64 # @param {type:\"integer\"}\nlearning_rate = 1e-3 # @param {type:\"number\"}\nlog_interval = 200 # @param {type:\"integer\"}\n\nnum_eval_episodes = 10 # @param {type:\"integer\"}\neval_interval = 1000 # @param {type:\"integer\"}\n\n\"\"\"## Environment\n\nIn Reinforcement Learning (RL), an environment represents the task or problem to be solved. Standard environments can be created in TF-Agents using `tf_agents.environments` suites. TF-Agents has suites for loading environments from sources such as the OpenAI Gym, Atari, and DM Control.\n\nLoad the CartPole environment from the OpenAI Gym suite. \n\"\"\"\n\nenv_name = 'CartPole-v1'\n\n\"\"\"Usually two environments are instantiated: one for training and one for evaluation. \"\"\"\n\ntrain_py_env = suite_gym.load(env_name)\neval_py_env = suite_gym.load(env_name)\n\n\"\"\"The Cartpole environment, like most environments, is written in pure Python. This is converted to TensorFlow using the `TFPyEnvironment` wrapper.\n\nThe original environment's API uses Numpy arrays. The `TFPyEnvironment` converts these to `Tensors` to make it compatible with Tensorflow agents and policies.\n\n\"\"\"\n\ntrain_env = tf_py_environment.TFPyEnvironment(train_py_env)\neval_env = tf_py_environment.TFPyEnvironment(eval_py_env)\n\n\"\"\"## Agent\n\nThe algorithm used to solve an RL problem is represented by an `Agent`. TF-Agents provides standard implementations of a variety of `Agents`, including:\n\n- [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) (used in this tutorial)\n- [REINFORCE](https://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)\n- [DDPG](https://arxiv.org/pdf/1509.02971.pdf)\n- [TD3](https://arxiv.org/pdf/1802.09477.pdf)\n- [PPO](https://arxiv.org/abs/1707.06347)\n- [SAC](https://arxiv.org/abs/1801.01290).\n\nThe DQN agent can be used in any environment which has a discrete action space.\n\nAt the heart of a DQN Agent is a `QNetwork`, a neural network model that can learn to predict `QValues` (expected returns) for all actions, given an observation from the environment.\n\nUse `tf_agents.networks.q_network` to create a `QNetwork`, passing in the `observation_spec`, `action_spec`, and a tuple describing the number and size of the model's hidden layers.\n\n\"\"\"\n\nfc_layer_params = (100,)\n\nq_net = q_network.QNetwork(\n train_env.observation_spec(),\n train_env.action_spec(),\n fc_layer_params=fc_layer_params)\n\n\"\"\"Now use `tf_agents.agents.dqn.dqn_agent` to instantiate a `DqnAgent`. In addition to the `time_step_spec`, `action_spec` and the QNetwork, the agent constructor also requires an optimizer (in this case, `AdamOptimizer`), a loss function, and an integer step counter.\"\"\"\n\noptimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n\ntrain_step_counter = tf.Variable(0)\n\nagent = dqn_agent.DqnAgent(\n train_env.time_step_spec(),\n train_env.action_spec(),\n q_network=q_net,\n optimizer=optimizer,\n td_errors_loss_fn=common.element_wise_squared_loss,\n train_step_counter=train_step_counter)\n\nagent.initialize()\n\n\"\"\"## Policies\n\nA policy defines the way an agent acts in an environment. Typically, the goal of reinforcement learning is to train the underlying model until the policy produces the desired outcome.\n\nIn this tutorial:\n\n- The desired outcome is keeping the pole balanced upright over the cart.\n- The policy returns an action (left or right) for each `time_step` observation.\n\nAgents contain two policies: \n\n- `agent.policy` — The main policy that is used for evaluation and deployment.\n- `agent.collect_policy` — A second policy that is used for data collection.\n\n\"\"\"\n\neval_policy = agent.policy\ncollect_policy = agent.collect_policy\n\n\"\"\"Policies can be created independently of agents. For example, use `tf_agents.policies.random_tf_policy` to create a policy which will randomly select an action for each `time_step`.\"\"\"\n\nrandom_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),\n train_env.action_spec())\n\n\"\"\"## Metrics and Evaluation\n\nThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode. Several episodes are run, creating an average return.\n\nThe following function computes the average return of a policy, given the policy, environment, and a number of episodes.\n\n\"\"\"\n\n#@test {\"skip\": true}\ndef compute_avg_return(environment, policy, num_episodes=10):\n\n total_return = 0.0\n for _ in range(num_episodes):\n\n time_step = environment.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n total_return += episode_return\n\n avg_return = total_return / num_episodes\n return avg_return.numpy()[0]\n\n\n# See also the metrics module for standard implementations of different metrics.\n# https://github.com/tensorflow/agents/tree/master/tf_agents/metrics\n\n\"\"\"Running this computation on the `random_policy` shows a baseline performance in the environment.\"\"\"\n\ncompute_avg_return(eval_env, random_policy, num_eval_episodes)\n\n\"\"\"## Replay Buffer\n\nThe replay buffer keeps track of data collected from the environment. This tutorial uses `tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer`, as it is the most common. \n\nThe constructor requires the specs for the data it will be collecting. This is available from the agent using the `collect_data_spec` method. The batch size and maximum buffer length are also required.\n\n\"\"\"\n\nreplay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.collect_data_spec,\n batch_size=train_env.batch_size,\n max_length=replay_buffer_max_length)\n\n\"\"\"For most agents, `collect_data_spec` is a named tuple called `Trajectory`, containing the specs for observations, actions, rewards, and other items.\"\"\"\n\n\"\"\"## Data Collection\n\nNow execute the random policy in the environment for a few steps, recording the data in the replay buffer.\n\"\"\"\n\nepisode_counter = tf.Variable(0)\n\n#@test {\"skip\": true}\ndef collect_step(environment, policy, buffer, epcounter: Variable):\n time_step = environment.current_time_step()\n action_step = policy.action(time_step)\n next_time_step = environment.step(action_step.action)\n traj = trajectory.from_transition(time_step, action_step, next_time_step)\n\n # Add trajectory to the replay buffer\n buffer.add_batch(traj)\n\ndef collect_data(env, policy, buffer, steps, episode_counter):\n for _ in range(steps):\n collect_step(env, policy, buffer, episode_counter)\n\ncollect_data(train_env, random_policy, replay_buffer, initial_collect_steps, episode_counter)\n\n# Dataset generates trajectories with shape [Bx2x...]\ndataset = replay_buffer.as_dataset(\n num_parallel_calls=3, \n sample_batch_size=batch_size, \n num_steps=2).prefetch(3)\n\n\n\niterator = iter(dataset)\n\nprint(iterator)\n\n# (Optional) Optimize by wrapping some of the code in a graph using TF function.\nagent.train = common.function(agent.train)\n\n# Reset the train step\nagent.train_step_counter.assign(0)\n\n# Evaluate the agent's policy once before training.\navg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)\nreturns = [avg_return]\n\nfor _ in range(num_iterations):\n\n # Collect a few steps using collect_policy and save to the replay buffer.\n collect_data(train_env, agent.collect_policy, replay_buffer, collect_steps_per_iteration, episode_counter)\n\n # Sample a batch of data from the buffer and update the agent's network.\n experience, unused_info = next(iterator)\n train_loss = agent.train(experience).loss\n\n step = agent.train_step_counter.numpy()\n\n if step % log_interval == 0:\n print('episode = {2}, step = {0}: loss = {1}'.format(step, train_loss, episode_counter.numpy()))\n\n if step % eval_interval == 0:\n avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)\n print('step = {0}: Average Return = {1}'.format(step, avg_return))\n returns.append(avg_return)\n\n#@test {\"skip\": true}\n\niterations = range(0, num_iterations + 1, eval_interval)\nplt.plot(iterations, returns)\nplt.ylabel('Average Return')\nplt.xlabel('Iterations')\nplt.savefig('DQN_Pole.png', bbox_inches='tight')" ]
[ [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.compat.v1.train.AdamOptimizer", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "tensorflow.Variable", "matplotlib.pyplot.ylabel" ] ]
xinyu-intel/incubator-mxnet
[ "f32b58ecec6b1c4e44f77d8ede073484c89e05e9" ]
[ "tests/nightly/test_large_array.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport mxnet as mx\nfrom mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d\nfrom mxnet import gluon, nd\nfrom tests.python.unittest.common import with_seed\n\n# dimension constants\nMEDIUM_X = 10000\nLARGE_X = 100000000\nSMALL_Y = 50\nLARGE_SIZE = LARGE_X * SMALL_Y\n\n\ndef test_gluon_embedding():\n m = gluon.nn.Embedding(SMALL_Y, MEDIUM_X)\n m.initialize()\n a = nd.zeros((MEDIUM_X, SMALL_Y))\n b = m(a)\n assert b.shape == (MEDIUM_X, SMALL_Y, MEDIUM_X)\n assert b.asnumpy().size == LARGE_SIZE\n\n\ndef test_ndarray_zeros():\n a = nd.zeros(shape=(LARGE_X, SMALL_Y))\n assert a[-1][0] == 0\n assert a.shape == (LARGE_X, SMALL_Y)\n assert a.size == LARGE_SIZE\n\n\ndef test_ndarray_ones():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n assert a[-1][0] == 1\n assert nd.sum(a).asnumpy() == LARGE_SIZE\n\n\ndef test_ndarray_convert():\n a = nd.zeros(shape=(LARGE_X, SMALL_Y))\n b = a.astype(np.int32)\n b.wait_to_read()\n assert b.dtype == np.int32\n b = a.tostype('row_sparse')\n b.wait_to_read()\n assert isinstance(b, mx.nd.sparse.RowSparseNDArray)\n\n\n@with_seed()\ndef test_ndarray_random_uniform():\n a = nd.random.uniform(shape=(LARGE_X, SMALL_Y))\n assert a[-1][0] != 0\n\n\n@with_seed()\ndef test_ndarray_random_randint():\n a = nd.random.randint(100, 10000, shape=(LARGE_X, SMALL_Y))\n assert a.shape == (LARGE_X, SMALL_Y)\n # check if randint can generate value greater than 2**32 (large)\n low_large_value = 2**32\n high_large_value = 2**34\n a = nd.random.randint(low_large_value, high_large_value, dtype=np.int64)\n low = mx.nd.array([low_large_value], dtype='int64')\n high = mx.nd.array([high_large_value], dtype='int64')\n assert a.__gt__(low) and a.__lt__(high)\n\n\ndef test_ndarray_empty():\n a = nd.empty((LARGE_X, SMALL_Y))\n assert a.shape == (LARGE_X, SMALL_Y)\n\n\ndef test_elementwise():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(LARGE_X, SMALL_Y))\n res = a + b\n assert np.sum(res[-1].asnumpy() == 2) == a.shape[1]\n res = a + 1\n assert np.sum(res[-1].asnumpy() == 2) == a.shape[1]\n res = nd.sqrt(a + 3)\n assert np.sum(res[-1].asnumpy() == 2) == a.shape[1]\n\n\ndef test_reduce():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1]\n\n\ndef test_dot():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(SMALL_Y, SMALL_Y))\n res = nd.dot(a, b)\n assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1]\n\n\ndef test_FullyConnected():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(SMALL_Y, SMALL_Y))\n res = nd.FullyConnected(a, b, num_hidden=b.shape[1], no_bias=True)\n assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1]\n\n\ndef test_broadcast():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)\n res = nd.broadcast_to(b, shape=(b.shape[0], SMALL_Y))\n assert np.sum(res[-1].asnumpy() == LARGE_X) == res.shape[1]\n res = mx.nd.broadcast_like(b, a)\n assert np.sum(res[-1].asnumpy() == LARGE_X) == a.shape[1]\n\n\ndef test_clip():\n a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)\n res = nd.clip(a, a_min=100, a_max=1000)\n assert np.sum(res[-1].asnumpy() == 1000) == a.shape[1]\n\n\ndef test_split():\n a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)\n outs = nd.split(a, num_outputs=SMALL_Y, axis=1)\n result = sum(1 for i, v in enumerate(outs) if i == v[0].asnumpy())\n assert result == a.shape[1]\n\n\ndef test_argmin():\n a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)\n idx = mx.nd.argmin(a, axis=0)\n assert idx.shape[0] == SMALL_Y\n\n\ndef test_tile():\n a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)\n b = nd.tile(a, reps=(1, SMALL_Y))\n assert np.sum(b[-1].asnumpy() == LARGE_X) == b.shape[1]\n\n\ndef test_take():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n idx = nd.arange(LARGE_X - 1000, LARGE_X)\n res = nd.take(a, idx)\n assert np.sum(res[-1].asnumpy() == 1) == res.shape[1]\n\n\ndef test_slice():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n res = nd.slice(a, begin=(LARGE_X-1000, 1), end=(LARGE_X, SMALL_Y))\n assert np.sum(res[-1].asnumpy() == 1) == res.shape[1]\n\n\ndef test_slice_assign():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n a[LARGE_X-1:LARGE_X] = 1000\n assert np.sum(a[-1].asnumpy() == 1000) == a.shape[1]\n\n\ndef test_expand_dims():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n res = nd.expand_dims(a, axis=1)\n assert res.shape == (a.shape[0], 1, a.shape[1])\n\n\ndef test_squeeze():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n data = nd.expand_dims(a, axis=1)\n res = nd.squeeze(data)\n assert res.shape == a.shape\n\n\ndef test_broadcast_div():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(LARGE_X, 1)) * 2\n res = a / b\n assert np.sum(res[-1].asnumpy() == 0.5) == a.shape[1]\n\n\ndef test_Dense(ctx=mx.cpu(0)):\n data = mx.nd.ones(shape=(50*1000*1000, 100))\n linear = gluon.nn.Dense(100)\n linear.initialize(ctx=ctx)\n res = linear(data)\n res.wait_to_read()\n assert res.shape == (50000000, 100)\n\n\ndef test_where():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)\n res = nd.where(b > 100, a, b)\n assert np.sum(res[-1].asnumpy() == 1) == b.shape[1]\n csr_cond = nd.sparse.cast_storage(b < 10, 'csr')\n res = nd.sparse.where(csr_cond, a, b)\n assert np.sum(res[0].asnumpy() == 1) == 10\n\n\ndef test_pick():\n a = mx.nd.ones(shape=(256 * 35, 1024 * 1024))\n b = mx.nd.ones(shape=(256 * 35, ))\n res = mx.nd.pick(a, b)\n assert res.shape == b.shape\n\n\ndef test_depthtospace():\n def numpy_depth_to_space(x, blocksize):\n b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]\n tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\n tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\n y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n return y\n\n shape_inp = (LARGE_X, 8, 4, 2)\n data = rand_ndarray(shape_inp, 'default')\n data_np = data.asnumpy()\n expected = numpy_depth_to_space(data_np, 2)\n output = mx.nd.depth_to_space(data, 2)\n assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)\n\n\ndef test_spacetodepth():\n def numpy_space_to_depth(x, blocksize):\n b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]\n tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])\n tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])\n y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])\n return y\n\n shape_inp = (LARGE_X, 2, 8, 4)\n data = rand_ndarray(shape_inp, 'default')\n data_np = data.asnumpy()\n expected = numpy_space_to_depth(data_np, 2)\n output = mx.nd.space_to_depth(data, 2)\n assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)\n\n@with_seed()\ndef test_diag():\n a_np = np.random.random((LARGE_X, SMALL_Y)).astype(np.float32)\n a = mx.nd.array(a_np)\n\n # k == 0\n r = mx.nd.diag(a)\n assert_almost_equal(r.asnumpy(), np.diag(a_np))\n\n # k == 1\n k = 1\n r = mx.nd.diag(a, k=k)\n assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))\n\n # k == -1\n k = -1\n r = mx.nd.diag(a, k=k)\n assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))\n\n # random k\n k = np.random.randint(-min(LARGE_X, SMALL_Y) + 1, min(LARGE_X, SMALL_Y))\n r = mx.nd.diag(a, k=k)\n assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))\n\n\n@with_seed()\ndef test_ravel_multi_index():\n x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y)\n x2, y2 = rand_coord_2d((LARGE_X - 200), LARGE_X, 9, SMALL_Y)\n x3, y3 = rand_coord_2d((LARGE_X - 300), LARGE_X, 8, SMALL_Y)\n indices_2d = [[x1, x2, x3], [y1, y2, y3]]\n idx = mx.nd.ravel_multi_index(mx.nd.array(indices_2d, dtype=np.int64), shape=(LARGE_X, SMALL_Y))\n idx_numpy = np.ravel_multi_index(indices_2d, (LARGE_X, SMALL_Y))\n assert np.sum(1 for i in range(idx.size) if idx[i] == idx_numpy[i]) == 3\n\n\n@with_seed()\ndef test_unravel_index():\n x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y)\n x2, y2 = rand_coord_2d((LARGE_X - 200), LARGE_X, 9, SMALL_Y)\n x3, y3 = rand_coord_2d((LARGE_X - 300), LARGE_X, 8, SMALL_Y)\n original_2d_indices = [[x1, x2, x3], [y1, y2, y3]]\n idx_numpy = np.ravel_multi_index(original_2d_indices, (LARGE_X, SMALL_Y))\n indices_2d = mx.nd.unravel_index(mx.nd.array(idx_numpy, dtype=np.int64), shape=(LARGE_X, SMALL_Y))\n assert (indices_2d.asnumpy() == np.array(original_2d_indices)).all()\n\n\ndef create_2d_tensor(rows, columns, dtype=np.int64):\n a = np.arange(0, rows).reshape(rows, 1)\n b = np.broadcast_to(a, shape=(a.shape[0], columns))\n return nd.array(b, dtype=dtype)\n\n\ndef test_transpose():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n t = b.T\n assert t.shape == (SMALL_Y, LARGE_X)\n assert np.sum(t[:, -1].asnumpy() == (LARGE_X - 1)) == b.shape[1]\n\n\ndef test_swapaxes():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n t = nd.swapaxes(b, dim1=0, dim2=1)\n assert t.shape == (SMALL_Y, LARGE_X)\n assert np.sum(t[:, -1].asnumpy() == (LARGE_X - 1)) == b.shape[1]\n\n\ndef test_flip():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n t = nd.flip(b, axis=0)\n assert t.shape == (LARGE_X, SMALL_Y)\n assert np.sum(t[-1, :].asnumpy() == 0) == b.shape[1]\n\n\ndef test_softmax():\n input_data = mx.nd.ones((SMALL_Y, LARGE_X))\n true_output = np.full((SMALL_Y, LARGE_X), (1 / SMALL_Y))\n output = nd.softmax(input_data, axis=0)\n assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, atol=1e-5)\n\n\ndef test_argsort():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64)\n mx.nd.waitall()\n assert (s[0].asnumpy() == (LARGE_X - 1)).all()\n\n\ndef test_sort():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n s = nd.sort(b, axis=0, is_ascend=False)\n assert np.sum(s[-1][SMALL_Y//2:SMALL_Y].asnumpy() == 0).all()\n s = nd.sort(b, is_ascend=False)\n assert np.sum(s[0].asnumpy() == 0).all()\n\n\ndef test_topk():\n b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)\n k = nd.topk(b, k=10, axis=0, dtype=np.int64)\n assert np.sum(k.asnumpy() == (LARGE_X - 1)) == SMALL_Y\n ind, val = mx.nd.topk(b, k=3, axis=0, dtype=np.int64, ret_typ=\"both\", is_ascend=False)\n assert np.all(ind == val)\n b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X)\n l = nd.topk(b, k=1, axis=-1, dtype=np.int64, ret_typ=\"value\")\n assert l.sum() == np.sum(np.arange(0, SMALL_Y))\n\n\ndef test_add():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__add__(a)\n assert c[0][-1] == 2\n assert c.shape == a.shape\n\n\ndef test_sub():\n a = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__sub__(a)\n assert c[0][-1] == -2\n assert c.shape == a.shape\n\n\ndef test_rsub():\n a = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__rsub__(a)\n assert c[0][-1] == 2\n assert c.shape == a.shape\n\n\ndef test_neg():\n a = nd.ones(shape=(LARGE_X, SMALL_Y))\n c = a\n c = c.__neg__()\n assert c[0][-1] == -1\n assert c.shape == a.shape\n\n\ndef test_mul():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__mul__(a)\n assert c[0][-1] == 6\n assert c.shape == a.shape\n\n\ndef test_div():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__div__(a)\n assert c[0][-1] == 3/2\n assert c.shape == a.shape\n\n\ndef test_rdiv():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__rdiv__(a)\n assert c[0][-1] == 2/3\n assert c.shape == a.shape\n\n\ndef test_mod():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__mod__(a)\n assert c[0][-1] == 1\n assert c.shape == a.shape\n\n\ndef test_rmod():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__rmod__(a)\n assert c[0][-1] == 2\n assert c.shape == a.shape\n\n\ndef test_imod():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__imod__(a)\n assert c[0][-1] == 1\n assert c.shape == a.shape\n\n\ndef test_pow():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__pow__(a)\n assert c[0][-1] == 9\n assert c.shape == a.shape\n\n\ndef test_rpow():\n a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))\n b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))\n c = b\n c = c.__rpow__(a)\n assert c[0][-1] == 8\n assert c.shape == a.shape\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.full", "numpy.array", "numpy.reshape", "numpy.transpose", "numpy.diag", "numpy.arange", "numpy.all", "numpy.random.random", "numpy.ravel_multi_index", "numpy.broadcast_to" ] ]
mrbeann/MultiBench
[ "7da0a493b8e2cd857a5c22c0be04748ab7487494" ]
[ "datasets/affect/get_data.py" ]
[ "import os\nimport sys\nfrom typing import *\nimport pickle\nimport h5py\nimport numpy as np\nfrom numpy.core.numeric import zeros_like\nfrom torch.nn.functional import pad\nfrom torch.nn import functional as F\n\nsys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))\nimport torch\nimport torchtext as text\nfrom collections import defaultdict\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom robustness.text_robust import text_robustness\nfrom robustness.timeseries_robust import timeseries_robustness\n\nnp.seterr(divide='ignore', invalid='ignore')\n\ndef drop_entry(dataset):\n drop = []\n for ind, k in enumerate(dataset[\"text\"]):\n if k.sum() == 0:\n drop.append(ind)\n # for ind, k in enumerate(dataset[\"vision\"]):\n # if k.sum() == 0:\n # if ind not in drop:\n # drop.append(ind)\n # for ind, k in enumerate(dataset[\"audio\"]):\n # if k.sum() == 0:\n # if ind not in drop:\n # drop.append(ind)\n # print(drop)\n for modality in list(dataset.keys()):\n dataset[modality] = np.delete(dataset[modality], drop, 0)\n return dataset\n\n\ndef z_norm(dataset, max_seq_len=50):\n processed = {}\n text = dataset['text'][:, :max_seq_len, :]\n vision = dataset['vision'][:, :max_seq_len, :]\n audio = dataset['audio'][:, :max_seq_len, :]\n for ind in range(dataset[\"text\"].shape[0]):\n vision[ind] = np.nan_to_num(\n (vision[ind] - vision[ind].mean(0, keepdims=True)) / (np.std(vision[ind], axis=0, keepdims=True)))\n audio[ind] = np.nan_to_num(\n (audio[ind] - audio[ind].mean(0, keepdims=True)) / (np.std(audio[ind], axis=0, keepdims=True)))\n text[ind] = np.nan_to_num(\n (text[ind] - text[ind].mean(0, keepdims=True)) / (np.std(text[ind], axis=0, keepdims=True)))\n\n processed['vision'] = vision\n processed['audio'] = audio\n processed['text'] = text\n processed['labels'] = dataset['labels']\n return processed\n\n\ndef get_rawtext(path, data_kind, vids):\n if data_kind == 'hdf5':\n f = h5py.File(path, 'r')\n else:\n with open(path, 'rb') as f_r:\n f = pickle.load(f_r)\n text_data = []\n new_vids = []\n\n for vid in vids:\n text = []\n # (id, seg) = re.match(r'([-\\w]*)_(\\w+)', vid).groups()\n # vid_id = '{}[{}]'.format(id, seg)\n vid_id = vid\n try:\n for word in f['words'][vid_id]['features']:\n if word[0] != b'sp':\n text.append(word[0].decode('utf-8'))\n text_data.append(' '.join(text))\n new_vids.append(vid_id)\n except:\n print(\"missing\", vid, vid_id)\n return text_data, new_vids\n\n\ndef get_word2id(text_data, vids):\n word2id = defaultdict(lambda: len(word2id))\n UNK = word2id['unk']\n data_processed = dict()\n for i, segment in enumerate(text_data):\n words = []\n _words = segment.split()\n for word in _words:\n words.append(word2id[word])\n words = np.asarray(words)\n data_processed[vids[i]] = words\n\n def return_unk():\n return UNK\n\n word2id.default_factory = return_unk\n return data_processed, word2id\n\n\ndef get_word_embeddings(word2id, save=False):\n vec = text.vocab.GloVe(name='840B', dim=300)\n tokens = []\n for w, _ in word2id.items():\n tokens.append(w)\n # print('Vocab Length: {}'.format(len(tokens)))\n ret = vec.get_vecs_by_tokens(tokens, lower_case_backup=True)\n return ret\n\n\ndef glove_embeddings(text_data, vids, paddings=50):\n data_prod, w2id = get_word2id(text_data, vids)\n word_embeddings_looks_up = get_word_embeddings(w2id)\n looks_up = word_embeddings_looks_up.numpy()\n embedd_data = []\n for vid in vids:\n d = data_prod[vid]\n tmp = []\n look_up = [looks_up[x] for x in d]\n # Padding with zeros at the front\n # TODO: fix some segs have more than 50 words (FIXed)\n if len(d) > paddings:\n for x in d[:paddings]:\n tmp.append(looks_up[x])\n else:\n for i in range(paddings - len(d)):\n tmp.append(np.zeros(300, ))\n for x in d:\n tmp.append(looks_up[x])\n # try:\n # tmp = [looks_up[x] for x in d]\n # except:\n # print(d)\n embedd_data.append(np.array(tmp))\n return np.array(embedd_data)\n\n\nclass Affectdataset(Dataset):\n\n def __init__(self, data: Dict, flatten_time_series: bool, aligned: bool = True, task: str = None, max_pad=False, max_pad_num=50) -> None:\n self.dataset = data\n self.flatten = flatten_time_series\n self.aligned = aligned\n self.task = task\n self.max_pad = max_pad\n self.max_pad_num = max_pad_num\n\n def __getitem__(self, ind):\n\n # vision = torch.tensor(vision)\n # audio = torch.tensor(audio)\n # text = torch.tensor(text)\n\n vision = torch.tensor(self.dataset['vision'][ind])\n audio = torch.tensor(self.dataset['audio'][ind])\n text = torch.tensor(self.dataset['text'][ind])\n\n # print(vision.shape)\n # print(audio.shape)\n # print(text.shape)\n\n if self.aligned:\n try:\n start = text.nonzero(as_tuple=False)[0][0]\n except:\n print(text, ind)\n exit()\n vision = vision[start:].float()\n audio = audio[start:].float()\n text = text[start:].float()\n else:\n vision = vision[vision.nonzero()[0][0]:].float()\n audio = audio[audio.nonzero()[0][0]:].float()\n text = text[text.nonzero()[0][0]:].float()\n\n # z-normalize data\n vision = torch.nan_to_num((vision - vision.mean(0, keepdims=True)) / (torch.std(vision, axis=0, keepdims=True)))\n audio = torch.nan_to_num((audio - audio.mean(0, keepdims=True)) / (torch.std(audio, axis=0, keepdims=True)))\n text = torch.nan_to_num((text - text.mean(0, keepdims=True)) / (torch.std(text, axis=0, keepdims=True)))\n\n def get_class(flag, data_type='mosi'):\n if data_type in ['mosi', 'mosei']:\n if flag >= 0:\n return 1\n else:\n return 0\n else:\n return flag\n\n label = torch.tensor(get_class(self.dataset['labels'][ind])).long() if self.task == \"classification\" else torch.tensor(\n self.dataset['labels'][ind]).float()\n\n if self.flatten:\n return [vision.flatten(), audio.flatten(), text.flatten(), ind, \\\n label]\n else:\n if self.max_pad:\n tmp = [vision, audio, text, ind, label]\n for i in range(len(tmp) - 2):\n tmp[i] = F.pad(tmp[i], (0, 0, 0, self.max_pad_num - tmp[i].shape[0]))\n else:\n tmp = [vision, audio, text, ind, label]\n return tmp\n\n def __len__(self):\n return self.dataset['vision'].shape[0]\n\n\ndef get_dataloader(\n filepath: str, batch_size: int = 32, max_seq_len=50, max_pad=False, train_shuffle: bool = True,\n num_workers: int = 2, flatten_time_series: bool = False, task=None, robust_test=True,\n raw_path='/home/paul/MultiBench/mosi.hdf5') -> DataLoader:\n with open(filepath, \"rb\") as f:\n alldata = pickle.load(f)\n\n processed_dataset = {'train': {}, 'test': {}, 'valid': {}}\n alldata['train'] = drop_entry(alldata['train'])\n alldata['valid'] = drop_entry(alldata['valid'])\n alldata['test'] = drop_entry(alldata['test'])\n\n process = eval(\"process_2\") if max_pad else eval(\"process_1\")\n\n for dataset in alldata:\n processed_dataset[dataset] = alldata[dataset]\n\n train = DataLoader(Affectdataset(processed_dataset['train'], flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), \\\n shuffle=train_shuffle, num_workers=num_workers, batch_size=batch_size, \\\n collate_fn=process)\n valid = DataLoader(Affectdataset(processed_dataset['valid'], flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), \\\n shuffle=False, num_workers=num_workers, batch_size=batch_size, \\\n collate_fn=process)\n # test = DataLoader(Affectdataset(processed_dataset['test'], flatten_time_series, task=task), \\\n # shuffle=False, num_workers=num_workers, batch_size=batch_size, \\\n # collate_fn=process)\n if robust_test:\n vids = [id for id in alldata['test']['id']]\n\n file_type = raw_path.split('.')[-1] # hdf5\n rawtext, vids = get_rawtext(raw_path, file_type, vids)\n\n # Add text noises\n robust_text = []\n robust_text_numpy = []\n for i in range(10):\n test = dict()\n test['vision'] = alldata['test'][\"vision\"]\n test['audio'] = alldata['test'][\"audio\"]\n test['text'] = glove_embeddings(text_robustness(rawtext, noise_level=i / 10), vids)\n test['labels'] = alldata['test'][\"labels\"]\n test = drop_entry(test)\n\n robust_text_numpy.append(test['text'])\n\n robust_text.append(\n DataLoader(Affectdataset(test, flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), shuffle=False, num_workers=num_workers,\n batch_size=batch_size, collate_fn=process))\n\n # Add visual noises\n robust_vision = []\n for i in range(10):\n test = dict()\n test['vision'] = timeseries_robustness([alldata['test']['vision'].copy()], noise_level=i / 10, rand_drop=False)[0]\n # print('vision shape: {}'.format(test['vision'].shape))\n test['audio'] = alldata['test'][\"audio\"].copy()\n test['text'] = alldata['test']['text'].copy()\n test['labels'] = alldata['test'][\"labels\"]\n test = drop_entry(test)\n print('test entries: {}'.format(test['vision'].shape))\n\n robust_vision.append(\n DataLoader(Affectdataset(test, flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), shuffle=False, num_workers=num_workers,\n batch_size=batch_size, collate_fn=process))\n\n # Add audio noises\n robust_audio = []\n for i in range(10):\n test = dict()\n test['vision'] = alldata['test'][\"vision\"].copy()\n test['audio'] = timeseries_robustness([alldata['test']['audio'].copy()], noise_level=i / 10, rand_drop=False)[0]\n test['text'] = alldata['test']['text'].copy()\n test['labels'] = alldata['test'][\"labels\"]\n test = drop_entry(test)\n print('test entries: {}'.format(test['vision'].shape))\n\n robust_audio.append(\n DataLoader(Affectdataset(test, flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), shuffle=False, num_workers=num_workers,\n batch_size=batch_size, collate_fn=process))\n\n # Add timeseries noises\n\n # for i, text in enumerate(robust_text_numpy):\n # print(text.shape)\n # alldata_test = timeseries_robustness([alldata['test']['vision'], alldata['test']['audio'], text], noise_level=i/10)\n # test.append(alldata_test)\n\n robust_timeseries = []\n # alldata['test'] = drop_entry(alldata['test'])\n for i in range(10):\n robust_timeseries_tmp = timeseries_robustness(\n [alldata['test']['vision'].copy(), alldata['test']['audio'].copy(), alldata['test']['text'].copy()],\n noise_level=i / 10, rand_drop=False)\n # print('shape: {}'.format(robust_timeseries_tmp[1].shape))\n test = dict()\n test['vision'] = robust_timeseries_tmp[0]\n test['audio'] = robust_timeseries_tmp[1]\n test['text'] = robust_timeseries_tmp[2]\n test['labels'] = alldata['test']['labels']\n test = drop_entry(test)\n print('test entries: {}'.format(test['vision'].shape))\n\n robust_timeseries.append(\n DataLoader(Affectdataset(test, flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), shuffle=False, num_workers=num_workers,\n batch_size=batch_size, collate_fn=process))\n test_robust_data = dict()\n test_robust_data['robust_text'] = robust_text\n test_robust_data['robust_vision'] = robust_vision\n test_robust_data['robust_audio'] = robust_audio\n test_robust_data['robust_timeseries'] = robust_timeseries\n return train, valid, test_robust_data\n else:\n test = dict()\n test['all'] = [DataLoader(Affectdataset(processed_dataset['test'], flatten_time_series, task=task, max_pad=max_pad, max_pad_num=max_seq_len), \\\n shuffle=False, num_workers=num_workers, batch_size=batch_size, \\\n collate_fn=process)]\n return train, valid, test\n\ndef process_1(inputs: List):\n processed_input = []\n processed_input_lengths = []\n inds = []\n labels = []\n\n for i in range(len(inputs[0]) - 2):\n feature = []\n for sample in inputs:\n feature.append(sample[i])\n processed_input_lengths.append(torch.as_tensor([v.size(0) for v in feature]))\n pad_seq = pad_sequence(feature, batch_first=True)\n processed_input.append(pad_seq)\n\n for sample in inputs:\n # print(sample[-1].shape)\n inds.append(sample[-2])\n # if len(sample[-2].shape) > 2:\n # labels.append(torch.where(sample[-2][:, 1] == 1)[0])\n # else:\n labels.append(sample[-1])\n\n return processed_input, processed_input_lengths, \\\n torch.tensor(inds).view(len(inputs), 1), torch.tensor(labels).view(len(inputs), 1)\n\n\ndef process_2(inputs: List):\n processed_input = []\n processed_input_lengths = []\n inds = []\n labels = []\n\n for i in range(len(inputs[0]) - 2):\n feature = []\n for sample in inputs:\n feature.append(sample[i])\n processed_input_lengths.append(torch.as_tensor([v.size(0) for v in feature]))\n pad_seq = pad_sequence(feature, batch_first=True)\n processed_input.append(pad_seq)\n\n for sample in inputs:\n # print(sample[-1].shape)\n inds.append(sample[-2])\n # if len(sample[-2].shape) > 2:\n # labels.append(torch.where(sample[-2][:, 1] == 1)[0])\n # else:\n labels.append(sample[-1])\n\n return processed_input[0], processed_input[1], processed_input[2], torch.tensor(labels).view(len(inputs), 1)\n\n\nif __name__ == '__main__':\n traindata, validdata, test_robust = \\\n get_dataloader('/home/paul/MultiBench/mosi_data.pkl', robust_test=False, max_pad=False)\n\n keys = list(test_robust.keys())\n # print(keys)\n\n # test_robust[keys[0]][1]\n for batch in test_robust[keys[0]][0]:\n for b in batch:\n print(b.shape)\n print(b)\n # print(b[0].shape)\n # print(batch[1])\n # print(batch[-1])\n break\n\n\n" ]
[ [ "numpy.array", "numpy.delete", "numpy.asarray", "numpy.zeros", "torch.nn.utils.rnn.pad_sequence", "torch.std", "numpy.seterr", "numpy.std", "torch.tensor", "torch.nn.functional.pad" ] ]
Himydata/himydata-python
[ "6bd57fcee8e48c926d8f887a2635b4c28210e483" ]
[ "himydata/hmd/utils/datasets.py" ]
[ "import sqlalchemy as sa\nimport pandas as pd\nfrom himydata.hmd.utils.datastores import Datastore\n\n\nclass Dataset(object):\n\n def __init__(self, hmd_dataset, name):\n \"\"\"\n :param hmd_dataset: class Api of hmddataset\n :param name: dataset name\n \"\"\"\n self.hmd_dataset = hmd_dataset\n self.name = name\n\n self.conf = eval(self.__get_config())\n self.engine = sa.create_engine(self.conf['config'])\n\n def set_name(self, name):\n \"\"\"\n :param name: dataset name\n \"\"\"\n self.name = name\n\n def __get_config(self):\n \"\"\"private class, used to return the config necessary to make a direct sqlAlchemy connection to the database\"\"\"\n return self.hmd_dataset.get_config(self.name)\n\n def get_dataset_sql_name(self):\n \"\"\"\n :return: dataset name as stored in database\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n return self.conf['name']\n\n def get_dataset_as_dataframe(self):\n \"\"\"\n :return: pandas dataframe\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n return pd.read_sql(\"SELECT * FROM %s\" % self.conf['name'], self.engine)\n\n def get_engine(self):\n \"\"\"\n :return: SQLAlchemy engine\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n return self.engine\n\n def get_dataset_table(self):\n \"\"\"\n :return: SQLAlchemy table object\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n metadata = sa.MetaData()\n sql_tabel = sa.Table(self.conf['name'], metadata, autoload=True, autoload_with=self.engine)\n\n return sql_tabel\n\n def get_query_as_list(self, query):\n \"\"\"\n :param query: SqlAlchemy query\n :return: list with results\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n connection = self.engine.connect()\n result_proxy = connection.execute(query)\n result_set = result_proxy.fetchall()\n result_proxy.close()\n return result_set\n\n def get_query_as_dataframe(self, query):\n \"\"\"\n :param query: SqlAlchemy query\n :return: pandas dataframe with query results\n \"\"\"\n if not self.engine.has_table(self.conf['name']):\n return None\n\n return pd.read_sql_query(query, self.engine)\n\n def get_datastore(self):\n datastore = Datastore(self.hmd_dataset, self.name)\n return datastore\n" ]
[ [ "pandas.read_sql", "pandas.read_sql_query" ] ]
Kyungpyo-Kim/object_classification
[ "17bba66a119719548b2f07710c4de5e2bc726eaa" ]
[ "scripts/utils/eulerangles.py" ]
[ "# -*- coding: utf-8 -*-\n\n# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Module implementing Euler angle rotations and their conversions\nSee:\n* http://en.wikipedia.org/wiki/Rotation_matrix\n* http://en.wikipedia.org/wiki/Euler_angles\n* http://mathworld.wolfram.com/EulerAngles.html\nSee also: *Representing Attitude with Euler Angles and Quaternions: A\nReference* (2006) by James Diebel. A cached PDF link last found here:\nhttp://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134\nEuler's rotation theorem tells us that any rotation in 3D can be\ndescribed by 3 angles. Let's call the 3 angles the *Euler angle vector*\nand call the angles in the vector :math:`alpha`, :math:`beta` and\n:math:`gamma`. The vector is [ :math:`alpha`,\n:math:`beta`. :math:`gamma` ] and, in this description, the order of the\nparameters specifies the order in which the rotations occur (so the\nrotation corresponding to :math:`alpha` is applied first).\nIn order to specify the meaning of an *Euler angle vector* we need to\nspecify the axes around which each of the rotations corresponding to\n:math:`alpha`, :math:`beta` and :math:`gamma` will occur.\nThere are therefore three axes for the rotations :math:`alpha`,\n:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,\n:math:`k`.\nLet us express the rotation :math:`alpha` around axis `i` as a 3 by 3\nrotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3\nmatrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the\nwhole rotation expressed by the Euler angle vector [ :math:`alpha`,\n:math:`beta`. :math:`gamma` ], `R` is given by::\n R = np.dot(G, np.dot(B, A))\nSee http://mathworld.wolfram.com/EulerAngles.html\nThe order :math:`G B A` expresses the fact that the rotations are\nperformed in the order of the vector (:math:`alpha` around axis `i` =\n`A` first).\nTo convert a given Euler angle vector to a meaningful rotation, and a\nrotation matrix, we need to define:\n* the axes `i`, `j`, `k`\n* whether a rotation matrix should be applied on the left of a vector to\n be transformed (vectors are column vectors) or on the right (vectors\n are row vectors).\n* whether the rotations move the axes as they are applied (intrinsic\n rotations) - compared the situation where the axes stay fixed and the\n vectors move within the axis frame (extrinsic)\n* the handedness of the coordinate system\nSee: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities\nWe are using the following conventions:\n* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus\n an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]\n in our convention implies a :math:`alpha` radian rotation around the\n `z` axis, followed by a :math:`beta` rotation around the `y` axis,\n followed by a :math:`gamma` rotation around the `x` axis.\n* the rotation matrix applies on the left, to column vectors on the\n right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix\n with N column vectors, the transformed vector set `vdash` is given by\n ``vdash = np.dot(R, v)``.\n* extrinsic rotations - the axes are fixed, and do not move with the\n rotations.\n* a right-handed coordinate system\nThe convention of rotation around ``z``, followed by rotation around\n``y``, followed by rotation around ``x``, is known (confusingly) as\n\"xyz\", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.\n'''\n\nimport math\n\nimport sys\nif sys.version_info >= (3,0):\n from functools import reduce\n\nimport numpy as np\n\n\n_FLOAT_EPS_4 = np.finfo(float).eps * 4.0\n\n\ndef euler2mat(z=0, y=0, x=0):\n ''' Return matrix for rotations around z, y and x axes\n Uses the z, then y, then x convention above\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n Returns\n -------\n M : array shape (3,3)\n Rotation matrix giving same rotation as for given angles\n Examples\n --------\n >>> zrot = 1.3 # radians\n >>> yrot = -0.1\n >>> xrot = 0.2\n >>> M = euler2mat(zrot, yrot, xrot)\n >>> M.shape == (3, 3)\n True\n The output rotation matrix is equal to the composition of the\n individual rotations\n >>> M1 = euler2mat(zrot)\n >>> M2 = euler2mat(0, yrot)\n >>> M3 = euler2mat(0, 0, xrot)\n >>> composed_M = np.dot(M3, np.dot(M2, M1))\n >>> np.allclose(M, composed_M)\n True\n You can specify rotations by named arguments\n >>> np.all(M3 == euler2mat(x=xrot))\n True\n When applying M to a vector, the vector should column vector to the\n right of M. If the right hand side is a 2D array rather than a\n vector, then each column of the 2D array represents a vector.\n >>> vec = np.array([1, 0, 0]).reshape((3,1))\n >>> v2 = np.dot(M, vec)\n >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array\n >>> vecs2 = np.dot(M, vecs)\n Rotations are counter-clockwise.\n >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))\n >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])\n True\n >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))\n >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])\n True\n >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))\n >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])\n True\n Notes\n -----\n The direction of rotation is given by the right-hand rule (orient\n the thumb of the right hand along the axis around which the rotation\n occurs, with the end of the thumb at the positive end of the axis;\n curl your fingers; the direction your fingers curl is the direction\n of rotation). Therefore, the rotations are counterclockwise if\n looking along the axis of rotation from positive to negative.\n '''\n Ms = []\n if z:\n cosz = math.cos(z)\n sinz = math.sin(z)\n Ms.append(np.array(\n [[cosz, -sinz, 0],\n [sinz, cosz, 0],\n [0, 0, 1]]))\n if y:\n cosy = math.cos(y)\n siny = math.sin(y)\n Ms.append(np.array(\n [[cosy, 0, siny],\n [0, 1, 0],\n [-siny, 0, cosy]]))\n if x:\n cosx = math.cos(x)\n sinx = math.sin(x)\n Ms.append(np.array(\n [[1, 0, 0],\n [0, cosx, -sinx],\n [0, sinx, cosx]]))\n if Ms:\n return reduce(np.dot, Ms[::-1])\n return np.eye(3)\n\n\ndef mat2euler(M, cy_thresh=None):\n ''' Discover Euler angle vector from 3x3 matrix\n Uses the conventions above.\n Parameters\n ----------\n M : array-like, shape (3,3)\n cy_thresh : None or scalar, optional\n threshold below which to give up on straightforward arctan for\n estimating x rotation. If None (default), estimate from\n precision of input.\n Returns\n -------\n z : scalar\n y : scalar\n x : scalar\n Rotations in radians around z, y, x axes, respectively\n Notes\n -----\n If there was no numerical error, the routine could be derived using\n Sympy expression for z then y then x rotation matrix, which is::\n [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],\n [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],\n [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]\n with the obvious derivations for z, y, and x\n z = atan2(-r12, r11)\n y = asin(r13)\n x = atan2(-r23, r33)\n Problems arise when cos(y) is close to zero, because both of::\n z = atan2(cos(y)*sin(z), cos(y)*cos(z))\n x = atan2(cos(y)*sin(x), cos(x)*cos(y))\n will be close to atan2(0, 0), and highly unstable.\n The ``cy`` fix for numerical instability below is from: *Graphics\n Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:\n 0123361559. Specifically it comes from EulerAngles.c by Ken\n Shoemake, and deals with the case where cos(y) is close to zero:\n See: http://www.graphicsgems.org/\n The code appears to be licensed (from the website) as \"can be used\n without restrictions\".\n '''\n M = np.asarray(M)\n if cy_thresh is None:\n try:\n cy_thresh = np.finfo(M.dtype).eps * 4\n except ValueError:\n cy_thresh = _FLOAT_EPS_4\n r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat\n # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)\n cy = math.sqrt(r33*r33 + r23*r23)\n if cy > cy_thresh: # cos(y) not close to zero, standard form\n z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))\n y = math.atan2(r13, cy) # atan2(sin(y), cy)\n x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))\n else: # cos(y) (close to) zero, so x -> 0.0 (see above)\n # so r21 -> sin(z), r22 -> cos(z) and\n z = math.atan2(r21, r22)\n y = math.atan2(r13, cy) # atan2(sin(y), cy)\n x = 0.0\n return z, y, x\n\n\ndef euler2quat(z=0, y=0, x=0):\n ''' Return quaternion corresponding to these Euler angles\n Uses the z, then y, then x convention above\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n Returns\n -------\n quat : array shape (4,)\n Quaternion in w, x, y z (real, then vector) format\n Notes\n -----\n We can derive this formula in Sympy using:\n 1. Formula giving quaternion corresponding to rotation of theta radians\n about arbitrary axis:\n http://mathworld.wolfram.com/EulerParameters.html\n 2. Generated formulae from 1.) for quaternions corresponding to\n theta radians rotations about ``x, y, z`` axes\n 3. Apply quaternion multiplication formula -\n http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to\n formulae from 2.) to give formula for combined rotations.\n '''\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = math.cos(z)\n sz = math.sin(z)\n cy = math.cos(y)\n sy = math.sin(y)\n cx = math.cos(x)\n sx = math.sin(x)\n return np.array([\n cx*cy*cz - sx*sy*sz,\n cx*sy*sz + cy*cz*sx,\n cx*cz*sy - sx*cy*sz,\n cx*cy*sz + sx*cz*sy])\n\n\ndef quat2euler(q):\n ''' Return Euler angles corresponding to quaternion `q`\n Parameters\n ----------\n q : 4 element sequence\n w, x, y, z of quaternion\n Returns\n -------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n Notes\n -----\n It's possible to reduce the amount of calculation a little, by\n combining parts of the ``quat2mat`` and ``mat2euler`` functions, but\n the reduction in computation is small, and the code repetition is\n large.\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n return mat2euler(nq.quat2mat(q))\n\n\ndef euler2angle_axis(z=0, y=0, x=0):\n ''' Return angle, axis corresponding to these Euler angles\n Uses the z, then y, then x convention above\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n Returns\n -------\n theta : scalar\n angle of rotation\n vector : array shape (3,)\n axis around which rotation occurs\n Examples\n --------\n >>> theta, vec = euler2angle_axis(0, 1.5, 0)\n >>> print(theta)\n 1.5\n >>> np.allclose(vec, [0, 1, 0])\n True\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n return nq.quat2angle_axis(euler2quat(z, y, x))\n\n\ndef angle_axis2euler(theta, vector, is_normalized=False):\n ''' Convert angle, axis pair to Euler angles\n Parameters\n ----------\n theta : scalar\n angle of rotation\n vector : 3 element sequence\n vector specifying axis for rotation.\n is_normalized : bool, optional\n True if vector is already normalized (has norm of 1). Default\n False\n Returns\n -------\n z : scalar\n y : scalar\n x : scalar\n Rotations in radians around z, y, x axes, respectively\n Examples\n --------\n >>> z, y, x = angle_axis2euler(0, [1, 0, 0])\n >>> np.allclose((z, y, x), 0)\n True\n Notes\n -----\n It's possible to reduce the amount of calculation a little, by\n combining parts of the ``angle_axis2mat`` and ``mat2euler``\n functions, but the reduction in computation is small, and the code\n repetition is large.\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n M = nq.angle_axis2mat(theta, vector, is_normalized)\n return mat2euler(M)" ]
[ [ "numpy.finfo", "numpy.array", "numpy.asarray", "numpy.eye" ] ]
awesome-archive/snorkel
[ "d68bf18cf9ee0cec5958ab7802059a0c77c34b9e" ]
[ "test/classification/training/test_trainer.py" ]
[ "import copy\nimport tempfile\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom snorkel.classification import (\n DictDataLoader,\n DictDataset,\n MultitaskClassifier,\n Operation,\n Task,\n Trainer,\n)\nfrom snorkel.classification.training.loggers import LogWriter, TensorBoardWriter\n\nTASK_NAMES = [\"task1\", \"task2\"]\nbase_config = {\"n_epochs\": 1, \"progress_bar\": False}\nNUM_EXAMPLES = 6\nBATCH_SIZE = 2\nBATCHES_PER_EPOCH = NUM_EXAMPLES / BATCH_SIZE\n\n\ndef create_dataloader(task_name=\"task\", split=\"train\"):\n X = torch.FloatTensor([[i, i] for i in range(NUM_EXAMPLES)])\n Y = torch.ones(NUM_EXAMPLES).long()\n\n dataset = DictDataset(\n name=\"dataset\", split=split, X_dict={\"data\": X}, Y_dict={task_name: Y}\n )\n\n dataloader = DictDataLoader(dataset, batch_size=BATCH_SIZE)\n return dataloader\n\n\ndef create_task(task_name, module_suffixes=(\"\", \"\")):\n module1_name = f\"linear1{module_suffixes[0]}\"\n module2_name = f\"linear2{module_suffixes[1]}\"\n\n module_pool = nn.ModuleDict(\n {\n module1_name: nn.Sequential(nn.Linear(2, 10), nn.ReLU()),\n module2_name: nn.Linear(10, 2),\n }\n )\n\n op1 = Operation(module_name=module1_name, inputs=[(\"_input_\", \"data\")])\n op2 = Operation(module_name=module2_name, inputs=[op1.name])\n\n op_sequence = [op1, op2]\n\n task = Task(name=task_name, module_pool=module_pool, op_sequence=op_sequence)\n\n return task\n\n\ndataloaders = [create_dataloader(task_name) for task_name in TASK_NAMES]\ntasks = [\n create_task(TASK_NAMES[0], module_suffixes=[\"A\", \"A\"]),\n create_task(TASK_NAMES[1], module_suffixes=[\"A\", \"B\"]),\n]\nmodel = MultitaskClassifier([tasks[0]])\n\n\nclass TrainerTest(unittest.TestCase):\n def test_trainer_onetask(self):\n \"\"\"Train a single-task model\"\"\"\n trainer = Trainer(**base_config)\n trainer.fit(model, [dataloaders[0]])\n\n def test_trainer_twotask(self):\n \"\"\"Train a model with overlapping modules and flows\"\"\"\n multitask_model = MultitaskClassifier(tasks)\n trainer = Trainer(**base_config)\n trainer.fit(multitask_model, dataloaders)\n\n def test_trainer_errors(self):\n dataloader = copy.deepcopy(dataloaders[0])\n\n # No train split\n trainer = Trainer(**base_config)\n dataloader.dataset.split = \"valid\"\n with self.assertRaisesRegex(ValueError, \"Cannot find any dataloaders\"):\n trainer.fit(model, [dataloader])\n\n # Unused split\n trainer = Trainer(**base_config, valid_split=\"val\")\n with self.assertRaisesRegex(ValueError, \"Dataloader splits must be\"):\n trainer.fit(model, [dataloader])\n\n def test_checkpointer_init(self):\n with tempfile.TemporaryDirectory() as temp_dir:\n more_config = {\n \"checkpointing\": True,\n \"checkpointer_config\": {\"checkpoint_dir\": temp_dir},\n \"log_writer_config\": {\"log_dir\": temp_dir},\n }\n trainer = Trainer(**base_config, **more_config, logging=True)\n trainer.fit(model, [dataloaders[0]])\n self.assertIsNotNone(trainer.checkpointer)\n\n broken_config = {\n \"checkpointing\": True,\n \"checkpointer_config\": {\"checkpoint_dir\": None},\n \"log_writer_config\": {\"log_dir\": temp_dir},\n }\n with self.assertRaises(TypeError):\n trainer = Trainer(**base_config, **broken_config, logging=False)\n trainer.fit(model, [dataloaders[0]])\n\n def test_log_writer_init(self):\n with tempfile.TemporaryDirectory() as temp_dir:\n log_writer_config = {\"log_dir\": temp_dir}\n trainer = Trainer(\n **base_config,\n logging=True,\n log_writer=\"json\",\n log_writer_config=log_writer_config,\n )\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.log_writer, LogWriter)\n\n log_writer_config = {\"log_dir\": temp_dir}\n trainer = Trainer(\n **base_config,\n logging=True,\n log_writer=\"tensorboard\",\n log_writer_config=log_writer_config,\n )\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.log_writer, TensorBoardWriter)\n\n log_writer_config = {\"log_dir\": temp_dir}\n with self.assertRaisesRegex(ValueError, \"Unrecognized writer\"):\n trainer = Trainer(\n **base_config,\n logging=True,\n log_writer=\"foo\",\n log_writer_config=log_writer_config,\n )\n trainer.fit(model, [dataloaders[0]])\n\n def test_optimizer_init(self):\n trainer = Trainer(**base_config, optimizer=\"sgd\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.optimizer, optim.SGD)\n\n trainer = Trainer(**base_config, optimizer=\"adam\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.optimizer, optim.Adam)\n\n trainer = Trainer(**base_config, optimizer=\"adamax\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.optimizer, optim.Adamax)\n\n with self.assertRaisesRegex(ValueError, \"Unrecognized optimizer\"):\n trainer = Trainer(**base_config, optimizer=\"foo\")\n trainer.fit(model, [dataloaders[0]])\n\n def test_scheduler_init(self):\n trainer = Trainer(**base_config, lr_scheduler=\"constant\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsNone(trainer.lr_scheduler)\n\n trainer = Trainer(**base_config, lr_scheduler=\"linear\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.lr_scheduler, optim.lr_scheduler.LambdaLR)\n\n trainer = Trainer(**base_config, lr_scheduler=\"exponential\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.lr_scheduler, optim.lr_scheduler.ExponentialLR)\n\n trainer = Trainer(**base_config, lr_scheduler=\"step\")\n trainer.fit(model, [dataloaders[0]])\n self.assertIsInstance(trainer.lr_scheduler, optim.lr_scheduler.StepLR)\n\n with self.assertRaisesRegex(ValueError, \"Unrecognized lr scheduler\"):\n trainer = Trainer(**base_config, lr_scheduler=\"foo\")\n trainer.fit(model, [dataloaders[0]])\n\n def test_warmup(self):\n lr_scheduler_config = {\"warmup_steps\": 1, \"warmup_unit\": \"batches\"}\n trainer = Trainer(**base_config, lr_scheduler_config=lr_scheduler_config)\n trainer.fit(model, [dataloaders[0]])\n self.assertEqual(trainer.warmup_steps, 1)\n\n lr_scheduler_config = {\"warmup_steps\": 1, \"warmup_unit\": \"epochs\"}\n trainer = Trainer(**base_config, lr_scheduler_config=lr_scheduler_config)\n trainer.fit(model, [dataloaders[0]])\n self.assertEqual(trainer.warmup_steps, BATCHES_PER_EPOCH)\n\n lr_scheduler_config = {\"warmup_percentage\": 1 / BATCHES_PER_EPOCH}\n trainer = Trainer(**base_config, lr_scheduler_config=lr_scheduler_config)\n trainer.fit(model, [dataloaders[0]])\n self.assertEqual(trainer.warmup_steps, 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.ones" ] ]
gracetian6/mai21-learned-smartphone-isp
[ "487368d0abd860f070f3fc4f2836352857b10ad6" ]
[ "ckpt2pb.py" ]
[ "#################################################\n# Convert checkpoint to frozen graph (protobuf) #\n#################################################\n\nimport argparse\nimport tensorflow as tf\n\n\ndef freeze_graph(input_checkpoint,output_graph,output_node_names):\n \"\"\"Freeze model weights to get the pb file\n\n Args:\n input_checkpoint: path to input checkpoint.\n output_graph: path to output pb file.\n\n \"\"\"\n\n # output name in the model graph (may need to check it using tensorboard)\n saver = tf.compat.v1.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)\n \n with tf.compat.v1.Session() as sess:\n saver.restore(sess, input_checkpoint) # restore the model parameters\n output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants( # freeze the parameters\n sess=sess,\n input_graph_def=sess.graph_def,\n output_node_names=output_node_names.split(\",\")) # seperate multiple output names using \",\"\n \n with tf.io.gfile.GFile(output_graph, \"wb\") as f: # save the model\n f.write(output_graph_def.SerializeToString()) \n print(\"%d ops in the final graph.\" % len(output_graph_def.node)) # obtain node #\n\n\ndef _parse_argument():\n \"\"\"Return arguments for Model Freezer for NeuroPilot Model Hub.\"\"\"\n parser = argparse.ArgumentParser(\n description='Model Freezer for NeuroPilot Model Hub.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '--in_path', help='Path to input checkpoint.', type=str, default='model.ckpt', required=True)\n parser.add_argument(\n '--out_path', help='Path to the output pb.', type=str, default='model.pb', required=True)\n parser.add_argument(\n '--out_nodes', help='Output node names.', type=str, default='generator/add_308', required=True)\n return parser.parse_args()\n\n\ndef main(args):\n \"\"\"Entry point of Model Freezer Top Level Wrapper for NeuroPilot Model Hub.\n\n Args:\n args: A `argparse.ArgumentParser` includes arguments for processing.\n\n Raises:\n ValueError: If process type is wrong.\n \"\"\"\n freeze_graph(args.in_path, args.out_path, args.out_nodes)\n\nif __name__ == '__main__':\n arguments = _parse_argument()\n main(arguments)\n" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.compat.v1.train.import_meta_graph", "tensorflow.io.gfile.GFile" ] ]
mraspaud/pyproj
[ "57eeaf50d1ac0e24ed3a4351a896d8bbdf747430" ]
[ "test/test_transformer.py" ]
[ "from distutils.version import LooseVersion\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nimport pyproj\nfrom pyproj import Proj, Transformer, itransform, transform\nfrom pyproj.enums import TransformDirection\nfrom pyproj.exceptions import ProjError\nfrom pyproj.transformer import AreaOfInterest, TransformerGroup\n\n\ndef test_tranform_wgs84_to_custom():\n custom_proj = pyproj.Proj(\n \"+proj=geos +lon_0=0.000000 +lat_0=0 +h=35807.414063\"\n \" +a=6378.169000 +b=6356.583984\"\n )\n wgs84 = pyproj.Proj(\"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\")\n lat, lon = 51.04715, 3.23406\n xx, yy = pyproj.transform(wgs84, custom_proj, lon, lat)\n assert \"{:.3f} {:.3f}\".format(xx, yy) == \"212.623 4604.975\"\n\n\ndef test_transform_wgs84_to_alaska():\n with pytest.warns(FutureWarning):\n lat_lon_proj = pyproj.Proj(init=\"epsg:4326\", preserve_units=False)\n alaska_aea_proj = pyproj.Proj(init=\"epsg:2964\", preserve_units=False)\n test = (-179.72638, 49.752533)\n xx, yy = pyproj.transform(lat_lon_proj, alaska_aea_proj, *test)\n assert \"{:.3f} {:.3f}\".format(xx, yy) == \"-1824924.495 330822.800\"\n\n\ndef test_illegal_transformation():\n # issue 202\n with pytest.warns(FutureWarning):\n p1 = pyproj.Proj(init=\"epsg:4326\")\n p2 = pyproj.Proj(init=\"epsg:3857\")\n xx, yy = pyproj.transform(\n p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -90)\n )\n assert np.all(np.isinf(xx))\n assert np.all(np.isinf(yy))\n with pytest.raises(ProjError):\n pyproj.transform(\n p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -90), errcheck=True\n )\n\n\ndef test_lambert_conformal_transform():\n # issue 207\n with pytest.warns(FutureWarning):\n Midelt = pyproj.Proj(init=\"epsg:26191\")\n WGS84 = pyproj.Proj(init=\"epsg:4326\")\n\n E = 567623.931\n N = 256422.787\n h = 1341.467\n\n Long1, Lat1, H1 = pyproj.transform(Midelt, WGS84, E, N, h, radians=False)\n assert_almost_equal((Long1, Lat1, H1), (-4.6753456, 32.902199, 1341.467), decimal=5)\n\n\ndef test_equivalent_crs():\n transformer = Transformer.from_crs(\"epsg:4326\", 4326, skip_equivalent=True)\n assert transformer._transformer.projections_equivalent\n assert transformer._transformer.projections_exact_same\n assert transformer._transformer.skip_equivalent\n\n\ndef test_equivalent_crs__disabled():\n transformer = Transformer.from_crs(\"epsg:4326\", 4326)\n assert not transformer._transformer.skip_equivalent\n assert transformer._transformer.projections_equivalent\n assert transformer._transformer.projections_exact_same\n\n\ndef test_equivalent_crs__different():\n transformer = Transformer.from_crs(\"epsg:4326\", 3857, skip_equivalent=True)\n assert transformer._transformer.skip_equivalent\n assert not transformer._transformer.projections_equivalent\n assert not transformer._transformer.projections_exact_same\n\n\ndef test_equivalent_proj():\n with pytest.warns(UserWarning):\n proj_to = pyproj.Proj(4326).crs.to_proj4()\n with pytest.warns(FutureWarning):\n transformer = Transformer.from_proj(\n \"+init=epsg:4326\", proj_to, skip_equivalent=True\n )\n assert transformer._transformer.skip_equivalent\n assert transformer._transformer.projections_equivalent\n assert not transformer._transformer.projections_exact_same\n\n\ndef test_equivalent_proj__disabled():\n with pytest.warns(UserWarning):\n transformer = Transformer.from_proj(3857, pyproj.Proj(3857).crs.to_proj4())\n assert not transformer._transformer.skip_equivalent\n assert transformer._transformer.projections_equivalent\n assert not transformer._transformer.projections_exact_same\n\n\ndef test_equivalent_proj__different():\n transformer = Transformer.from_proj(3857, 4326, skip_equivalent=True)\n assert transformer._transformer.skip_equivalent\n assert not transformer._transformer.projections_equivalent\n assert not transformer._transformer.projections_exact_same\n\n\ndef test_equivalent_pipeline():\n transformer = Transformer.from_pipeline(\n \"+proj=pipeline +step +proj=longlat +ellps=WGS84 +step \"\n \"+proj=unitconvert +xy_in=rad +xy_out=deg\"\n )\n assert not transformer._transformer.skip_equivalent\n assert not transformer._transformer.projections_equivalent\n assert not transformer._transformer.projections_exact_same\n\n\ndef test_4d_transform():\n transformer = Transformer.from_pipeline(\"+init=ITRF2008:ITRF2000\")\n assert_almost_equal(\n transformer.transform(\n xx=3513638.19380, yy=778956.45250, zz=5248216.46900, tt=2008.75\n ),\n (3513638.1999428216, 778956.4532640711, 5248216.453456361, 2008.75),\n )\n\n\ndef test_2d_with_time_transform():\n transformer = Transformer.from_pipeline(\"+init=ITRF2008:ITRF2000\")\n assert_almost_equal(\n transformer.transform(xx=3513638.19380, yy=778956.45250, tt=2008.75),\n (3513638.1999428216, 778956.4532640711, 2008.75),\n )\n\n\ndef test_4d_transform_crs_obs1():\n transformer = Transformer.from_proj(7789, 8401)\n assert_almost_equal(\n transformer.transform(\n xx=3496737.2679, yy=743254.4507, zz=5264462.9620, tt=2019.0\n ),\n (3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0),\n )\n\n\ndef test_4d_transform_orginal_crs_obs1():\n assert_almost_equal(\n transform(7789, 8401, x=3496737.2679, y=743254.4507, z=5264462.9620, tt=2019.0),\n (3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0),\n )\n\n\ndef test_4d_transform_crs_obs2():\n transformer = Transformer.from_proj(4896, 7930)\n assert_almost_equal(\n transformer.transform(\n xx=3496737.2679, yy=743254.4507, zz=5264462.9620, tt=2019.0\n ),\n (3496737.7857162016, 743254.0394113371, 5264462.643659916, 2019.0),\n )\n\n\ndef test_2d_with_time_transform_crs_obs2():\n transformer = Transformer.from_proj(4896, 7930)\n assert_almost_equal(\n transformer.transform(xx=3496737.2679, yy=743254.4507, tt=2019.0),\n (3496737.4105305015, 743254.1014318303, 2019.0),\n )\n\n\ndef test_2d_with_time_transform_original_crs_obs2():\n assert_almost_equal(\n transform(4896, 7930, x=3496737.2679, y=743254.4507, tt=2019.0),\n (3496737.4105305015, 743254.1014318303, 2019.0),\n )\n\n\ndef test_4d_itransform():\n transformer = Transformer.from_pipeline(\"+init=ITRF2008:ITRF2000\")\n assert_almost_equal(\n list(\n transformer.itransform(\n [(3513638.19380, 778956.45250, 5248216.46900, 2008.75)]\n )\n ),\n [(3513638.1999428216, 778956.4532640711, 5248216.453456361, 2008.75)],\n )\n\n\ndef test_3d_time_itransform():\n transformer = Transformer.from_pipeline(\"+init=ITRF2008:ITRF2000\")\n assert_almost_equal(\n list(\n transformer.itransform(\n [(3513638.19380, 778956.45250, 2008.75)], time_3rd=True\n )\n ),\n [(3513638.1999428216, 778956.4532640711, 2008.75)],\n )\n\n\ndef test_4d_itransform_orginal_crs_obs1():\n assert_almost_equal(\n list(\n itransform(7789, 8401, [(3496737.2679, 743254.4507, 5264462.9620, 2019.0)])\n ),\n [(3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0)],\n )\n\n\ndef test_2d_with_time_itransform_original_crs_obs2():\n assert_almost_equal(\n list(\n itransform(4896, 7930, [(3496737.2679, 743254.4507, 2019.0)], time_3rd=True)\n ),\n [(3496737.4105305015, 743254.1014318303, 2019.0)],\n )\n\n\ndef test_itransform_time_3rd_invalid():\n\n with pytest.raises(ValueError, match=\"'time_3rd' is only valid for 3 coordinates.\"):\n list(\n itransform(\n 7789,\n 8401,\n [(3496737.2679, 743254.4507, 5264462.9620, 2019.0)],\n time_3rd=True,\n )\n )\n with pytest.raises(ValueError, match=\"'time_3rd' is only valid for 3 coordinates.\"):\n list(itransform(7789, 8401, [(3496737.2679, 743254.4507)], time_3rd=True))\n\n\ndef test_transform_no_error():\n with pytest.warns(FutureWarning):\n pj = Proj(init=\"epsg:4555\")\n pjx, pjy = pj(116.366, 39.867)\n transform(pj, Proj(4326), pjx, pjy, radians=True, errcheck=True)\n\n\ndef test_itransform_no_error():\n with pytest.warns(FutureWarning):\n pj = Proj(init=\"epsg:4555\")\n pjx, pjy = pj(116.366, 39.867)\n list(itransform(pj, Proj(4326), [(pjx, pjy)], radians=True, errcheck=True))\n\n\ndef test_transform_no_exception():\n # issue 249\n with pytest.warns(FutureWarning):\n transformer = Transformer.from_proj(\"+init=epsg:4326\", \"+init=epsg:27700\")\n transformer.transform(1.716073972, 52.658007833, errcheck=True)\n transformer.itransform([(1.716073972, 52.658007833)], errcheck=True)\n\n\ndef test_transform__out_of_bounds():\n with pytest.warns(FutureWarning):\n transformer = Transformer.from_proj(\"+init=epsg:4326\", \"+init=epsg:27700\")\n if LooseVersion(pyproj.proj_version_str) >= LooseVersion(\"7.0.0\"):\n with pytest.raises(pyproj.exceptions.ProjError):\n transformer.transform(100000, 100000, errcheck=True)\n else:\n assert np.all(np.isinf(transformer.transform(100000, 100000, errcheck=True)))\n\n\ndef test_transform_radians():\n with pytest.warns(FutureWarning):\n WGS84 = pyproj.Proj(\"+init=EPSG:4326\")\n ECEF = pyproj.Proj(proj=\"geocent\", ellps=\"WGS84\", datum=\"WGS84\")\n assert_almost_equal(\n pyproj.transform(\n ECEF, WGS84, -2704026.010, -4253051.810, 3895878.820, radians=True\n ),\n (-2.137113493845668, 0.6613203738996222, -20.531156923621893),\n )\n\n assert_almost_equal(\n pyproj.transform(\n WGS84,\n ECEF,\n -2.137113493845668,\n 0.6613203738996222,\n -20.531156923621893,\n radians=True,\n ),\n (-2704026.010, -4253051.810, 3895878.820),\n )\n\n\ndef test_itransform_radians():\n with pytest.warns(FutureWarning):\n WGS84 = pyproj.Proj(\"+init=EPSG:4326\")\n ECEF = pyproj.Proj(proj=\"geocent\", ellps=\"WGS84\", datum=\"WGS84\")\n assert_almost_equal(\n list(\n pyproj.itransform(\n ECEF, WGS84, [(-2704026.010, -4253051.810, 3895878.820)], radians=True\n )\n ),\n [(-2.137113493845668, 0.6613203738996222, -20.531156923621893)],\n )\n\n assert_almost_equal(\n list(\n pyproj.itransform(\n WGS84,\n ECEF,\n [(-2.137113493845668, 0.6613203738996222, -20.531156923621893)],\n radians=True,\n )\n ),\n [(-2704026.010, -4253051.810, 3895878.820)],\n )\n\n\ndef test_4d_transform__inverse():\n transformer = Transformer.from_pipeline(\"+init=ITRF2008:ITRF2000\")\n assert_almost_equal(\n transformer.transform(\n xx=3513638.1999428216,\n yy=778956.4532640711,\n zz=5248216.453456361,\n tt=2008.75,\n direction=TransformDirection.INVERSE,\n ),\n (3513638.19380, 778956.45250, 5248216.46900, 2008.75),\n )\n\n\ndef test_transform_direction():\n forward_transformer = Transformer.from_crs(4326, 3857)\n inverse_transformer = Transformer.from_crs(3857, 4326)\n assert inverse_transformer.transform(\n -33, 24, direction=TransformDirection.INVERSE\n ) == forward_transformer.transform(-33, 24)\n ident_transformer = Transformer.from_crs(4326, 3857)\n ident_transformer.transform(-33, 24, direction=TransformDirection.IDENT) == (\n -33,\n 24,\n )\n\n\ndef test_always_xy__transformer():\n transformer = Transformer.from_crs(2193, 4326, always_xy=True)\n assert_almost_equal(\n transformer.transform(1625350, 5504853),\n (173.29964730317386, -40.60674802693758),\n )\n\n\ndef test_always_xy__transform():\n assert_almost_equal(\n transform(2193, 4326, 1625350, 5504853, always_xy=True),\n (173.29964730317386, -40.60674802693758),\n )\n\n\ndef test_always_xy__itransform():\n assert_almost_equal(\n list(itransform(2193, 4326, [(1625350, 5504853)], always_xy=True)),\n [(173.29964730317386, -40.60674802693758)],\n )\n\n\ndef test_transform_direction__string():\n forward_transformer = Transformer.from_crs(4326, 3857)\n inverse_transformer = Transformer.from_crs(3857, 4326)\n assert inverse_transformer.transform(\n -33, 24, direction=\"INVERSE\"\n ) == forward_transformer.transform(-33, 24, direction=\"FORWARD\")\n ident_transformer = Transformer.from_crs(4326, 3857)\n ident_transformer.transform(-33, 24, direction=\"IDENT\") == (-33, 24)\n\n\ndef test_transform_direction__string_lowercase():\n forward_transformer = Transformer.from_crs(4326, 3857)\n inverse_transformer = Transformer.from_crs(3857, 4326)\n assert inverse_transformer.transform(\n -33, 24, direction=\"inverse\"\n ) == forward_transformer.transform(-33, 24, direction=\"forward\")\n ident_transformer = Transformer.from_crs(4326, 3857)\n ident_transformer.transform(-33, 24, direction=\"ident\") == (-33, 24)\n\n\ndef test_transform_direction__invalid():\n transformer = Transformer.from_crs(4326, 3857)\n with pytest.raises(ValueError, match=\"Invalid value\"):\n transformer.transform(-33, 24, direction=\"WHEREVER\")\n\n\ndef test_from_pipeline__non_transform_input():\n with pytest.raises(ProjError, match=\"Input is not a transformation\"):\n Transformer.from_pipeline(\"epsg:4326\")\n\n\ndef test_non_supported_initialization():\n with pytest.raises(ProjError, match=\"Transformer must be initialized using\"):\n Transformer()\n\n\ndef test_pj_info_properties():\n transformer = Transformer.from_crs(4326, 3857)\n assert transformer.name == \"pipeline\"\n assert transformer.description == \"Popular Visualisation Pseudo-Mercator\"\n assert transformer.definition.startswith(\"proj=pipeline\")\n assert transformer.has_inverse\n assert transformer.accuracy == 0\n\n\ndef test_to_wkt():\n transformer = Transformer.from_crs(4326, 3857)\n assert transformer.to_wkt().startswith(\n 'CONVERSION[\"Popular Visualisation Pseudo-Mercator\"'\n )\n\n\ndef test_str():\n assert str(Transformer.from_crs(4326, 3857)).startswith(\"proj=pipeline\")\n\n\ndef test_repr():\n assert repr(Transformer.from_crs(7789, 8401)) == (\n \"<Transformation Transformer: helmert>\\n\"\n \"Description: ITRF2014 to ETRF2014 (1)\\n\"\n \"Area of Use:\\n\"\n \"- name: Europe - ETRS89\\n\"\n \"- bounds: (-16.1, 32.88, 40.18, 84.17)\"\n )\n\n assert repr(Transformer.from_crs(4326, 3857)) == (\n \"<Conversion Transformer: pipeline>\\n\"\n \"Description: Popular Visualisation Pseudo-Mercator\\n\"\n \"Area of Use:\\n\"\n \"- name: World\\n\"\n \"- bounds: (-180.0, -90.0, 180.0, 90.0)\"\n )\n\n assert repr(Transformer.from_crs(4326, 26917)) == (\n \"<Unknown Transformer: unknown>\\n\"\n \"Description: unavailable until proj_trans is called\\n\"\n \"Area of Use:\\n- undefined\"\n )\n\n\ndef test_to_json_dict():\n transformer = Transformer.from_crs(4326, 3857)\n json_dict = transformer.to_json_dict()\n assert json_dict[\"type\"] == \"Conversion\"\n\n\ndef test_to_json():\n transformer = Transformer.from_crs(4326, 3857)\n json_data = transformer.to_json()\n assert \"Conversion\" in json_data\n assert \"\\n\" not in json_data\n\n\ndef test_to_json__pretty():\n transformer = Transformer.from_crs(4326, 3857)\n json_data = transformer.to_json(pretty=True)\n assert \"Conversion\" in json_data\n assert json_data.startswith('{\\n \"')\n\n\ndef test_to_json__pretty__indenation():\n transformer = Transformer.from_crs(4326, 3857)\n json_data = transformer.to_json(pretty=True, indentation=4)\n assert \"Conversion\" in json_data\n assert json_data.startswith('{\\n \"')\n\n\ndef test_transformer__operations():\n transformer = Transformer.from_crs(28356, 7856)\n assert [op.name for op in transformer.operations] == [\n \"Inverse of Map Grid of Australia zone 56\",\n \"GDA94 to GDA2020 (1)\",\n \"Map Grid of Australia zone 56\",\n ]\n\n\ndef test_transformer__operations_missing():\n assert Transformer.from_crs(7789, 8401).operations == ()\n\n\ndef test_transformer__operations__scope_remarks():\n transformer = Transformer.from_crs(28356, 7856)\n assert transformer.scope is None\n assert [op.scope for op in transformer.operations] == [\n None,\n \"Conformal transformation of GDA94 coordinates that have been derived through \"\n \"GNSS CORS.\",\n None,\n ]\n assert [str(op.remarks)[:5] for op in transformer.operations] == [\n \"None\",\n \"Scale\",\n \"None\",\n ]\n\n\ndef test_transformer_group():\n trans_group = TransformerGroup(7789, 8401)\n assert len(trans_group.transformers) == 2\n assert trans_group.transformers[0].name == \"helmert\"\n assert trans_group.transformers[1].description == (\"ITRF2014 to ETRF2014 (2)\")\n assert not trans_group.unavailable_operations\n assert trans_group.best_available\n\n\ndef test_transformer_group__unavailable(aoi_data_directory):\n trans_group = TransformerGroup(4326, 2964)\n assert len(trans_group.unavailable_operations) == 1\n assert (\n trans_group.unavailable_operations[0].name\n == \"Inverse of NAD27 to WGS 84 (33) + Alaska Albers\"\n )\n assert len(trans_group.transformers) == 8\n assert trans_group.best_available\n\n\ndef test_transform_group__missing_best(aoi_data_directory):\n with pytest.warns(FutureWarning):\n lat_lon_proj = pyproj.Proj(init=\"epsg:4326\", preserve_units=False)\n alaska_aea_proj = pyproj.Proj(init=\"epsg:2964\", preserve_units=False)\n\n with pytest.warns(\n UserWarning, match=\"Best transformation is not available due to missing Grid\"\n ):\n trans_group = pyproj.transformer.TransformerGroup(\n lat_lon_proj.crs, alaska_aea_proj.crs\n )\n\n assert not trans_group.best_available\n assert len(trans_group.transformers) == 37\n assert len(trans_group.unavailable_operations) == 41\n\n\ndef test_transform_group__area_of_interest(aoi_data_directory):\n with pytest.warns(\n UserWarning, match=\"Best transformation is not available due to missing Grid\"\n ):\n trans_group = TransformerGroup(\n 4326, 2964, area_of_interest=AreaOfInterest(-136.46, 49.0, -60.72, 83.17)\n )\n assert (\n trans_group.transformers[0].description\n == \"Inverse of NAD27 to WGS 84 (13) + Alaska Albers\"\n )\n\n\ndef test_transformer_group__get_transform_crs():\n tg = TransformerGroup(\"epsg:4258\", \"epsg:7415\")\n if LooseVersion(pyproj.proj_version_str) >= LooseVersion(\"6.3.1\"):\n assert len(tg.transformers) == 1\n else:\n assert len(tg.transformers) == 4\n\n\ndef test_transformer__area_of_interest(aoi_data_directory):\n transformer = Transformer.from_crs(\n 4326, 2964, area_of_interest=AreaOfInterest(-136.46, 49.0, -60.72, 83.17)\n )\n assert transformer.description == \"Inverse of NAD27 to WGS 84 (13) + Alaska Albers\"\n\n\ndef test_transformer_proj__area_of_interest(aoi_data_directory):\n transformer = Transformer.from_proj(\n 4326, 2964, area_of_interest=AreaOfInterest(-136.46, 49.0, -60.72, 83.17)\n )\n assert transformer.description == \"Inverse of NAD27 to WGS 84 (13) + Alaska Albers\"\n\n\ndef test_transformer__area_of_interest__invalid(aoi_data_directory):\n with pytest.raises(ProjError):\n Transformer.from_crs(\n 4326, 2964, area_of_interest=(-136.46, 49.0, -60.72, 83.17)\n )\n\n\ndef test_transformer_group__area_of_interest__invalid(aoi_data_directory):\n with pytest.raises(ProjError):\n TransformerGroup(4326, 2964, area_of_interest=(-136.46, 49.0, -60.72, 83.17))\n\n\ndef test_transformer_equals():\n assert Transformer.from_crs(28356, 7856) == Transformer.from_crs(28356, 7856)\n\n\[email protected](\n \"comparison\",\n [Transformer.from_pipeline(\"+proj=pipeline +ellps=GRS80 +step +proj=cart\"), 22],\n)\ndef test_transformer_not_equals(comparison):\n assert Transformer.from_crs(28356, 7856) != comparison\n\n\[email protected](\n \"pipeline_str\",\n [\n \"+proj=pipeline +ellps=GRS80 +step +proj=cart\",\n \"+proj=pipeline +step +proj=unitconvert +xy_in=deg \"\n \"+xy_out=rad +ellps=GRS80 +step +proj=cart\",\n ],\n)\ndef test_pipeline_transform(pipeline_str):\n trans = Transformer.from_pipeline(pipeline_str)\n assert_almost_equal(\n trans.transform(50, 25, 0),\n (3717892.6072086394, 4430811.87152035, 2679074.4628772778),\n )\n\n\[email protected](\n \"pipeline_str\",\n [\n \"+proj=pipeline +ellps=GRS80 +step +proj=cart\",\n \"+proj=pipeline +step +proj=unitconvert +xy_in=deg \"\n \"+xy_out=rad +ellps=GRS80 +step +proj=cart\",\n ],\n)\ndef test_pipeline_itransform(pipeline_str):\n trans = Transformer.from_pipeline(pipeline_str)\n assert_almost_equal(\n list(trans.itransform([(50, 25, 0)])),\n [(3717892.6072086394, 4430811.87152035, 2679074.4628772778)],\n )\n\n\ndef test_pipeline_radian_transform_warning():\n trans = Transformer.from_pipeline(\"+proj=pipeline +ellps=GRS80 +step +proj=cart\")\n with pytest.warns(UserWarning):\n trans.transform(0.1, 0.1, 0, radians=True)\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.isinf" ] ]
owerbat/scikit-learn_bench
[ "972efac3779578865424515db2897c4b8c71307a" ]
[ "cuml_bench/dbscan.py" ]
[ "# ===============================================================================\r\n# Copyright 2020-2021 Intel Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ===============================================================================\r\n\r\nimport argparse\r\n\r\nimport bench\r\nfrom cuml import DBSCAN\r\nfrom sklearn.metrics.cluster import davies_bouldin_score\r\n\r\n\r\nparser = argparse.ArgumentParser(description='cuML DBSCAN benchmark')\r\nparser.add_argument('-e', '--eps', '--epsilon', type=float, default=10.,\r\n help='Radius of neighborhood of a point')\r\nparser.add_argument('-m', '--min-samples', default=5, type=int,\r\n help='The minimum number of samples required in a '\r\n 'neighborhood to consider a point a core point')\r\nparams = bench.parse_args(parser)\r\n\r\n# Load generated data\r\nX, _, _, _ = bench.load_data(params)\r\n\r\n# Create our clustering object\r\ndbscan = DBSCAN(eps=params.eps,\r\n min_samples=params.min_samples)\r\n\r\n# Time fit\r\ntime, _ = bench.measure_function_time(dbscan.fit, X, params=params)\r\nlabels = dbscan.labels_\r\n\r\nX_host = bench.convert_to_numpy(X)\r\nlabels_host = bench.convert_to_numpy(labels)\r\n\r\nacc = davies_bouldin_score(X_host, labels_host)\r\nparams.n_clusters = len(set(labels_host)) - (1 if -1 in labels_host else 0)\r\n\r\nbench.print_output(library='cuml', algorithm='dbscan', stages=['training'],\r\n params=params, functions=['DBSCAN'], times=[time],\r\n metrics=[acc], metric_type='davies_bouldin_score', data=[X],\r\n alg_instance=dbscan)\r\n" ]
[ [ "sklearn.metrics.cluster.davies_bouldin_score" ] ]
Siddharth-Shrivastava7/DANNet
[ "8db10056a4e445d24fc899505923615457cae5b7" ]
[ "eval_new_val.py" ]
[ "import os\nimport torch\nimport numpy as np\n\nfrom PIL import Image\nimport torch.nn as nn\nfrom torch.utils import data\n\nfrom network import *\nfrom dataset.zurich_night_dataset import zurich_night_DataSet\nfrom dataset.acdc_dataset import acdc_dataset\nfrom configs.test_2_config import get_arguments\nfrom tqdm import tqdm\n\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\ndef colorize_mask(mask):\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask\n\n\ndef main():\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n device = torch.device(\"cuda\")\n\n\n args = get_arguments()\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\n if args.model == 'PSPNet':\n model = PSPNet(num_classes=args.num_classes)\n if args.model == 'DeepLab':\n model = Deeplab(num_classes=args.num_classes)\n if args.model == 'RefineNet':\n model = RefineNet(num_classes=args.num_classes, imagenet=False)\n\n saved_state_dict = torch.load(args.restore_from)\n model_dict = model.state_dict()\n saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}\n model_dict.update(saved_state_dict)\n model.load_state_dict(saved_state_dict)\n\n lightnet = LightNet()\n saved_state_dict = torch.load(args.restore_from_light)\n model_dict = lightnet.state_dict()\n saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}\n model_dict.update(saved_state_dict)\n lightnet.load_state_dict(saved_state_dict)\n\n model = model.to(device)\n lightnet = lightnet.to(device)\n model.eval()\n lightnet.eval()\n\n testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n\n weights = torch.log(torch.FloatTensor(\n [0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,\n 0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,\n 0.00413907])).cuda()\n weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0\n\n for index, batch in tqdm(enumerate(testloader)):\n if index % 10 == 0:\n print('%d processd' % index)\n image, name = batch\n image = image.to(device)\n with torch.no_grad():\n r = lightnet(image)\n enhancement = image + r\n if args.model == 'RefineNet':\n output2 = model(enhancement)\n else:\n _, output2 = model(enhancement)\n\n weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)\n weights_prob = weights_prob.transpose(1, 3)\n output2 = output2 * weights_prob\n output = interp(output2).cpu().data[0].numpy()\n\n output = output.transpose(1,2,0)\n # print(output.shape)\n # torch.save(output, 'out.pt')\n # print(name)\n name = name[0].split('/')[-1].replace('.png','.pt')\n # print(name)\n flname = os.path.join(args.save, name)\n torch.save(output, flname)\n # print('done')\n \n # output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)\n\n # output_col = colorize_mask(output)\n # output = Image.fromarray(output)\n\n # ###### get the enhanced image\n # # enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)\n # # enhancement = enhancement*mean_std[1]+mean_std[0]\n # # enhancement = (enhancement-enhancement.min())/(enhancement.max()-enhancement.min())\n # # enhancement = enhancement[:, :, ::-1]*255 # change to BGR\n # # enhancement = Image.fromarray(enhancement.astype(np.uint8))\n\n # ###### get the light\n # # light = r.cpu().data[0].numpy().transpose(1,2,0)\n # # light = (light-light.min())/(light.max()-light.min())\n # # light = light[:, :, ::-1]*255 # change to BGR\n # # light = Image.fromarray(light.astype(np.uint8))\n\n\n # name = name[0].split('/')[-1]\n # output.save('%s/%s' % (args.save, name))\n # output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))\n # # enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))\n # # light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "torch.save", "torch.no_grad", "torch.FloatTensor", "torch.std", "torch.nn.Upsample", "torch.load", "torch.mean" ] ]
cyclone923/blocks-world
[ "808127e6b4fde2a9cb499cf6934db7ff73e2f534" ]
[ "fosae/view_plot.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom fosae.get_view_data import MAX_N\n\ndef dis(preds, preds_next):\n for a,b in zip(preds, preds_next):\n print(a-b)\n print(\"-\"*10)\n\ndata = np.load(\"fosae/block_data/block_data.npy\")\npreds = np.load(\"fosae/block_data/block_preds.npy\")\n\ndata_next = np.load(\"fosae/block_data/block_data_next.npy\")\npreds_next = np.load(\"fosae/block_data/block_preds_next.npy\")\n\naction = np.load(\"fosae/block_data/change.npy\")\n\nfig, axs = plt.subplots(5, MAX_N, figsize=(8, 6))\nfor _, ax in np.ndenumerate(axs):\n ax.set_xticks([])\n ax.set_yticks([])\nplt.gca()\n\ndef show_img(ax, arr):\n ax.imshow(np.transpose(arr, (1,2,0)))\n\nwhile True:\n for one_data, one_data_next, one_p, one_p_nt, one_a in zip(\n data, data_next, preds, preds_next, action\n ):\n for i, (d, d_nt) in enumerate(zip(one_data, one_data_next)):\n show_img(axs[0,i], d)\n show_img(axs[1,i], d_nt)\n\n\n axs[2,0].imshow(one_p, cmap='gray')\n axs[3,0].imshow(one_p_nt, cmap='gray')\n axs[4,0].imshow(one_a, cmap='gray')\n print(np.abs(0.5-one_p) > 0.49)\n print(\"-\"*20)\n print(np.abs(0.5-one_p_nt) > 0.49)\n print(\"-\"*20)\n print(one_a)\n print(\"-\"*20)\n\n plt.pause(0.2)\n # a = 1\n\n\n\n" ]
[ [ "numpy.ndenumerate", "numpy.load", "matplotlib.pyplot.subplots", "numpy.transpose", "matplotlib.pyplot.pause", "numpy.abs", "matplotlib.pyplot.gca" ] ]
ajkhattak/amanzi
[ "fed8cae6af3f9dfa5984381d34b98401c3b47655" ]
[ "test_suites/benchmarking/chemistry/non_grid_aligned/non_grid_aligned.py" ]
[ "# plots calcium concentration along x at last time step \n# benchmark: compares to pflotran simulation results\n# author: S.Molins - Sept. 2013\n\nimport os\nimport sys\nimport h5py\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm as cm\n\n# break output arrays in chunks\ndef get_chunks(arr, chunk_size = 10):\n chunks = [ arr[start:start+chunk_size] for start in range(0, len(arr), chunk_size)]\n return chunks\n\n# ----------- AMANZI + ALQUIMIA -----------------------------------------------------------------\n\ndef GetXYZ_Amanzi(path,root,comp,nodesx=121,nodesy=101):\n\n # open amanzi concentration and mesh files\n dataname = os.path.join(path,root+\"_data.h5\")\n amanzi_file = h5py.File(dataname,'r')\n\n meshname = os.path.join(path,root+\"_mesh.h5\")\n amanzi_mesh = h5py.File(meshname,'r')\n\n # nodal x, y\n x = [r[0] for r in amanzi_mesh['0']['Mesh']['Nodes']]\n y = [r[1] for r in amanzi_mesh['0']['Mesh']['Nodes']]\n\n # element x\n x_ = get_chunks(x,chunk_size=nodesx)\n x_amanzi = np.array( [np.diff(xcoord[0:nodesx])/2+xcoord[0:nodesx-1] for xcoord in x_[0:-1]] )\n\n # element y\n y_ = get_chunks(y,chunk_size=nodesx)\n y_amanzi = np.diff(y_,axis=0)/2 + y_[0:-1]\n y_amanzi = np.array( [ycoord[0:-1] for ycoord in y_amanzi] )\n\n # element data for x, y -- not sure why this thing is transposed\n time = max(amanzi_file[comp].keys())\n v = [v[0] for v in amanzi_file[comp][time]]\n z_amanzi = np.array( get_chunks(v,chunk_size=nodesy-1) )\n z_amanzi = z_amanzi.T\n\n amanzi_file.close()\n amanzi_mesh.close()\n \n return (x_amanzi, y_amanzi, z_amanzi)\n\n# Main -------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n import run_amanzi_standard\n\n # root name for problem\n root = \"non_grid_aligned\"\n nodesx = 121\n nodesy = 101\n\n local_path = \"\" \n\n # subplots\n plt.subplots(1,figsize=(11,8))\n \n # Amanzi + Native chemistry\n try:\n input_file = os.path.join(\"non_grid_aligned-u.xml\")\n path_to_amanzi = \"output-u\"\n run_amanzi_standard.run_amanzi(input_file, 1, [\"calcite_dbs.bgd\", input_file], path_to_amanzi)\n \n comp = 'mineral_volume_fractions.cell.Calcite vol frac'\n x_native, y_native, z_native = GetXYZ_Amanzi(path_to_amanzi,root,comp,nodesx=nodesx,nodesy=nodesy)\n\n except Exception:\n pass \n \n extent = [0.0, 0.60, 0.0, 0.50]\n \n # plot adjustments\n #plt.subplots_adjust(left=0.0,bottom=0.15,right=0.99,top=0.90)\n plt.suptitle(\"Amanzi 2D Non-grid Aligned Flow and Transport\",fontsize=20)\n plt.tick_params(axis='both', which='major', labelsize=20)\n\n plt.xlabel(\"X (m)\",fontsize=20)\n plt.ylabel(\"Y (m)\",fontsize=20)\n\n plt.imshow(z_native, vmin=z_native.min(), vmax=z_native.max(), origin='lower',extent=extent,cmap=cm.bwr)\n cbar = plt.colorbar(shrink=.8, format='%.2e')\n\n cbar.ax.tick_params(axis='both', which='both', labelsize=20)\n cbar.ax.set_ylabel('Calcite volume\\nfraction [-]',fontsize=20,rotation=0)\n cbar.ax.yaxis.set_label_coords(4., 1.15)\n\n # plt.show()\n plt.savefig(local_path+\"non_grid_aligned_2d.png\",format=\"png\")\n\n \n" ]
[ [ "numpy.array", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.subplots", "numpy.diff", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel" ] ]
ledell/StackNet
[ "c6ee1f5b7c21479be362c33f404debbbdc37aed6" ]
[ "lib/python/SklearnDecisionRegressor.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\nCopyright (c) 2017 Marios Michailidis\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nCreated on Wed Aug 02 16:49:28 2017\n\n@author: Marios Michailidis\n\n\nSupplementary python script to perform sklearn's DecisionTreeRegressor, to berun in conjuction with \nStackNet's SklearnDecisionTreeRegressor.java class\n\nParameters:\n \nbootstrap=True,\ncriterion='gini'\nmax_depth=None\nmax_features=1.0\nmax_leaf_nodes=0\nmin_impurity_split=1e-07,\nmin_samples_leaf=1\nmin_samples_split=2\nmin_weight_fraction_leaf=0.0\nn_jobs=1\nrandom_state=None\nverbose=0\n\n\"\"\"\nimport sys\nfrom sklearn.tree import DecisionTreeRegressor\nimport os\nfrom sklearn.externals import joblib\nfrom sklearn.datasets import load_svmlight_file\nimport numpy as np\n\n\n\n\"\"\"\nconf_name: name with parameters\nparams : python dictionary with parameters and types\nreturn: several filenames and a new parameters's file with the actual values\n\"\"\"\n\ndef read_file_end_return_parameters(conf_name, params):\n new_params={}\n Use_dense=False\n columns=0\n task=\"\"\n model_name=\"\"\n data_name=\"\"\n prediction_name=\"\"\n\n if not os.path.isfile(conf_name):\n raise Exception(\" %s config file does not exist... \" % (conf_name)) \n\n f_file=open(conf_name, \"r\")\n for line in f_file:\n line=line.replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n splits=line.split(\"=\")\n if len(splits)!=2:\n raise Exception(\" this (%s) line in %s config file has the wrong format..the corerct format should be: parameter=value \" % (line,conf_name)) \n parameter=splits[0]\n value=splits[1]\n if parameter==\"task\":\n task=value\n elif parameter==\"usedense\":\n if value.lower()==\"true\":\n Use_dense=True\n else :\n Use_dense=False \n elif parameter==\"columns\": \n try:\n columns=int(value) \n except:\n raise Exception(\" Parameter %s is expecting a int value but the current could nto be converted: %s \" % (parameter,value)) \n \n elif parameter==\"model\": \n model_name=value \n elif parameter==\"data\": \n data_name=value \n elif parameter==\"prediction\": \n prediction_name=value \n else : # it must be a model parameter\n #search if parameter is in the file\n if parameter not in params:\n raise Exception(\" Parameter %s is not recognised \" % (parameter)) \n else :\n paramaeter_type=params[parameter]\n if paramaeter_type==\"bool\":\n if value.lower()==\"true\":\n new_params[parameter]=True\n else :\n new_params[parameter]=False\n elif paramaeter_type==\"str\":\n new_params[parameter]=value \n elif paramaeter_type==\"float\":\n try:\n new_params[parameter]=float(value)\n except:\n raise Exception(\" Parameter %s is expecting a float value but the current could nto be converted: %s \" % (parameter,value)) \n elif paramaeter_type==\"int\":\n try:\n new_params[parameter]=int(value)\n except:\n raise Exception(\" Parameter %s is expecting a int value but the current could nto be converted: %s \" % (parameter,value)) \n #special condition for 'max_leaf_nodes' parameter\n if parameter==\"max_leaf_nodes\" and new_params[parameter]<=0:\n new_params[parameter]=None\n else :\n raise Exception(\" Parameter type %s is not recognised \" % (paramaeter_type)) \n f_file.close()\n \n return Use_dense,task,model_name,data_name,prediction_name,columns, new_params \n\n\"\"\"\nLoads svmlight data\nfname: filename to load\nreturns X, y\n\"\"\" \n\ndef get_data(fname,cols):\n data = load_svmlight_file(fname,n_features =cols)\n return data[0], data[1]\n \n#main method to get executed when calling python\ndef main():\n \n\n config_file=\"\" \n acceptable_parameters={\"bootstrap\" : \"bool\" ,\n \"criterion\" : \"str\" ,\n \"max_depth\" : \"int\" ,\n \"max_features\" : \"float\" , \n \"max_leaf_nodes\" : \"int\" , \n \"min_impurity_split\" : \"float\" , \n \"min_samples_leaf\" : \"int\" ,\n \"min_samples_split\" : \"int\" , \n \"min_weight_fraction_leaf\" : \"float\" , \n \"random_state\" : \"int\" , \n \"verbose\" : \"int\" \n }\n \n arguments=sys.argv\n print (\"arguments: \",arguments )\n if len(arguments)!=2:\n raise Exception(\" was expecting only one argument pointing to the config file... process will terminate\")\n\n else :\n config_file=arguments[1] \n dense,task_type,model_file,data_file,prediction_file,column, model_parameters=read_file_end_return_parameters(config_file, acceptable_parameters) \n #sanity checks\n if task_type not in [\"train\",\"predict\"]:\n raise Exception(\"task needs to be either train or predict, here it was %s ... \" % (task_type)) \n if model_file==\"\":\n raise Exception(\"model file cannot be empty\") \n if data_file==\"\":\n raise Exception(\"data file file cannot be empty\") \n if not os.path.isfile(data_file):\n raise Exception(\" %s data file does not exist... \" % (data_file)) \n if task_type==\"predict\" and prediction_file==\"\":\n raise Exception(\"prediction file cannot be empty when task=predict\") \n if len(model_parameters)==0:\n raise Exception(\"model parameters cannot be empty\") \n if column<1:\n raise Exception(\"columns cannot be less than 1...\") \n \n ################### Model training ###############\n if task_type ==\"train\":\n X,y=get_data(data_file, column) #load data\n model=DecisionTreeRegressor(**model_parameters) # set model parameters\n if dense: #convert to dense - useful if the data does nto have high dimensionality .\n #Also sklearn models are not optimzied for sparse data in tree-cased algos\n X=X.toarray()\n model.fit(X,y) #fitting model\n joblib.dump((model) , model_file)\n if not os.path.isfile(model_file):\n raise Exception(\" %s model file could not be exported - check permissions ... \" % (model_file)) \n \n sys.exit(-1)# exit script\n ################### predicting ############### \n else :\n if not os.path.isfile(model_file):\n raise Exception(\" %s model file could not be imported \" % (model_file)) \n X,y=get_data(data_file, column) #load data\n if dense: #convert to dense - useful if the data does nto have high dimensionality .\n #Also sklearn models are not optimzied for sparse data in tree-cased algos\n X=X.toarray() \n model=joblib.load(model_file)\n preds=model.predict(X)\n np.savetxt(prediction_file, preds, delimiter=\",\", fmt='%.9f')\n if not os.path.isfile(prediction_file):\n raise Exception(\" %s prediction file could not be exported - check permissions ... \" % (prediction_file)) \n sys.exit(-1)# exit script \n \nif __name__==\"__main__\":\n main()\n \n\n\n" ]
[ [ "numpy.savetxt", "sklearn.datasets.load_svmlight_file", "sklearn.externals.joblib.dump", "sklearn.externals.joblib.load", "sklearn.tree.DecisionTreeRegressor" ] ]
RAF96/method-optimization-resit
[ "75f87067942dbd0eafe092c1831d3267c01e3c3a" ]
[ "src/main/optim_methods/adam.py" ]
[ "import numpy as np\n\nfrom src.main.optim_methods.interface_method_optim import InterfaceMethodOptim, InterfaceOptimAnswer\nfrom src.main.stop_conditions.common import NumIterStopCondition, InterfaceStopCondition\n\n\nclass Adam(InterfaceMethodOptim):\n\n def __init__(self, *args,\n beta1: float,\n beta2: float,\n lr: float,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n F = self._w.shape[0]\n self._beta1 = beta1\n self._beta2 = beta2\n self._mw = np.zeros((F, 1))\n self._uw = np.zeros((F, 1))\n self._lr = lr\n\n def step(self):\n S, F = self._X.shape\n grad = self._function.loss_gradient(self._w, self._X, self._y)\n assert grad.shape == (F, 1)\n self._w = self._algo(self._w, grad)\n\n # For RELEASE version should be deleted\n loss = self._function.loss(self._w, self._X, self._y)\n self._tensorboard_writer.add_scalar('loss', loss, self._learning_step)\n\n def get_answer(self):\n return InterfaceOptimAnswer(self._w_start, self._w, self._function)\n\n # MOCK. probably should be rewritten\n def _get_stop_condition(self, F) -> InterfaceStopCondition:\n return NumIterStopCondition(F)\n\n def _algo(self, w, grad):\n \"\"\"\n https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam\n \"\"\"\n beta1 = self._beta1\n beta2 = self._beta2\n eta = self._lr\n mw = self._mw\n uw = self._uw\n\n new_mw = beta1 * mw + (1 - beta1) * grad\n new_uw = beta2 * uw + (1 - beta2) * grad ** 2\n\n _mw = new_mw / (1 - beta1)\n _uw = new_uw / (1 - beta2)\n\n new_w = w - eta * _mw / np.sqrt(_uw + self._eps_for_zero_division)\n\n return new_w\n" ]
[ [ "numpy.sqrt", "numpy.zeros" ] ]
Coder107AI/ai
[ "7865c83472c189c1b1de73af3f8ba270b48a2418" ]
[ "train.py" ]
[ "import argparse\nimport numpy as np\nimport torch\nimport torchvision\nimport os\nimport time\n\nfrom collections import OrderedDict\nfrom torch import nn\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\n\ndef main():\n args = get_arguments()\n data_dir = args.data_path\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n test_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n train_data = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n test_data = datasets.ImageFolder(test_dir ,transform = test_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_data, batch_size = 32,shuffle = True)\n testloader = torch.utils.data.DataLoader(test_data, batch_size = 20, shuffle = True)\n dropout=0.5\n hidden_layer1 = 120\n lr = 0.001\n model = models.densenet121(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('dropout',nn.Dropout(dropout)),\n ('inputs', nn.Linear(1024, hidden_layer1)),\n ('relu1', nn.ReLU()),\n ('hidden_layer1', nn.Linear(hidden_layer1, 90)),\n ('relu2',nn.ReLU()),\n ('hidden_layer2',nn.Linear(90,80)),\n ('relu3',nn.ReLU()),\n ('hidden_layer3',nn.Linear(80,102)),\n ('output', nn.LogSoftmax(dim=1))]))\n model.classifier = classifier\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr )\n model.cuda()\n model = model\n optimizer = optimizer\n training(trainloader=trainloader, validloader=validloader, optimizer=optimizer, criterion=criterion, model=model,epochs= args.epochs, print_every=3)\n check_accuracy_on_test(model, testloader)\n model.class_to_idx = train_data.class_to_idx\n model.cpu\n torch.save({'hidden_layer1':120,\n 'state_dict':model.state_dict(),\n 'class_to_idx':model.class_to_idx,\n 'model':model,\n 'classifier': classifier,\n 'optimizer': optimizer.state_dict()},\n 'checkpoint.pth')\n\n\ndef training(trainloader, validloader, optimizer, criterion, model, print_every, epochs, steps=0):\n loss_show=[]\n model.to('cuda')\n since = time.time()\n count = 0\n print(\"Started The Training: \")\n for e in range(epochs):\n running_loss = 0\n for ii, (inputs, labels) in enumerate(trainloader):\n steps += 1\n inputs,labels = inputs.to('cuda'), labels.to('cuda')\n\n optimizer.zero_grad()\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n model.eval()\n accuracy=0\n validation_loss = 0\n for ii, (inputs2,labels2) in enumerate(validloader):\n optimizer.zero_grad()\n inputs2, labels2 = inputs2.to('cuda:0') , labels2.to('cuda:0')\n model.to('cuda:0')\n with torch.no_grad(): \n outputs = model.forward(inputs2)\n ps = torch.exp(outputs).data\n equality = (labels2.data == ps.max(1)[1])\n validation_loss += criterion(outputs, labels2).data[0]\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n accuracy = accuracy / len(validloader)\n count += 1\n print(\"{}. Epoch: {}/{}\\n -------------------\\n\".format(count, e+1, epochs),\n \"Training Loss: {:.4f}\\n\".format(running_loss/print_every),\n \"Validation Loss: {:.4f}\\n\".format(validation_loss/len(validloader)),\n \"Validation Accuracy: {:.4f}\\n\".format(accuracy))\n running_loss = 0\n print(\"Finished\")\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\ndef check_accuracy_on_test(model,testloader): \n correct = 0\n total = 0\n model.to('cuda:0')\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to('cuda'), labels.to('cuda')\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))\n \ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--save_dir\", action=\"store\", dest=\"save_dir\", default=\".\" , help = \"Set directory to save checkpoints\")\n parser.add_argument(\"--model\", action=\"store\", dest=\"model\", default=\"densenet121\" , help = \"The architechture is already set to densenet121\")\n parser.add_argument(\"--learning_rate\", action=\"store\", dest=\"lr\", default=0.001 , help = \"Set learning rate\")\n parser.add_argument(\"--hidden_units\", action=\"store\", dest=\"hidden_units\", default=512 , help = \"Set number of hidden units\")\n parser.add_argument(\"--epochs\", action=\"store\", dest=\"epochs\", default=10 , help = \"Set number of epochs\")\n parser.add_argument(\"--gpu\", action=\"store_true\", dest=\"cuda\", default=False , help = \"Use CUDA for training\")\n parser.add_argument('data_path', action=\"store\")\n return parser.parse_args()\n\nmain()" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.max", "torch.no_grad", "torch.FloatTensor", "torch.nn.ReLU", "torch.utils.data.DataLoader", "torch.exp" ] ]
saadmoumad/Sounds-Classification
[ "f8a2235804592208091d201ecf00c363e85adc2d" ]
[ "util/data_gen.py" ]
[ "from audio_preprocessing import preprocess\nimport pandas as pd\n\nclass Gen_Dataset(Dataset):\n def __init__(self, base_dir, meta_path, eval=False):\n self.eval = eval\n #self.base_dir = os.path.join(base_dir,'test') if self.test else os.path.join(base_dir,'train')\n #self.csv_path = os.path.join(meta_path,'test.csv') if self.test else os.path.join(meta_path,'train.csv')\n self.base_dir = base_dir\n self.csv_path = meta_path\n \n self.file_names = []\n self.labels = []\n \n self.preproces = preprocess(self.base_dir, configuration_dict.get('sample_rate'))\n self.spec_len = configuration_dict.get('spec_len')\n \n csvData = pd.read_csv(self.csv_path)\n \n self.start_indx = 114 if self.eval else 0\n self.end_indx = len(csvData) if self.eval else 114\n \n \n for i in range(self.start_indx, self.end_indx):\n self.file_names.append(csvData.iloc[i, 0])\n try:\n self.labels.append(csvData.iloc[i, 1]) \n except AttributeError:\n pass \n \n\n \n def __getitem__(self, index):\n audio_path = os.path.join(self.base_dir,self.file_names[index]+'.wav')\n mfcc_spec = self.preproces.get_audio_MFCC(audio_path, self.spec_len, normalisation=False)\n \n #if self.test:\n # return mfcc_spec, self.file_names[index]\n \n return mfcc_spec, self.labels[index]\n \n def __len__(self):\n return len(self.file_names)" ]
[ [ "pandas.read_csv" ] ]
niteshjha08/Advanced-Lane-Lines
[ "8937183559acb7999be2b7a0684ed10beffe9797" ]
[ "src/detect_lane.py" ]
[ "import cv2\nimport numpy as np\nimport pickle\nfrom binary_tuner import mag_sobel,abs_sobel_mag, dir_sobel, get_sobel_mag, hls_thresh,color_thresh\nfrom perspective_transformations import perspective_transform, inv_perspective_transform, undistort_img, get_perspective_mtx,\\\n get_inv_perspective_mtx, get_distortion_measure\nimport matplotlib.pyplot as plt\nfrom moviepy.editor import VideoFileClip\n\n# Loading parameters stored in binary files\nM = get_perspective_mtx()\nMinv = get_inv_perspective_mtx()\nret, mtx, dist, rvecs, tvecs = get_distortion_measure()\n# Initialising fit coefficients\nleft_coeff=np.zeros((3,1))\nright_coeff=np.zeros((3,1))\n# Flag to check if frame is the first in a video\nfirst_run=True\n\n\n# Generate binary images using Sobel magnitude and HLS thresholding\ndef get_binary(img):\n mag_sob=mag_sobel(img,(64,255))\n color_bin=color_thresh(img)\n bin_img=np.zeros_like(mag_sob)\n bin_img[(mag_sob == 255)| ((color_bin == 255))] = 255\n return bin_img\n\n\n# Obtaining histogram of lower half of binary image for lane base location\ndef get_histogram(img):\n half_img=img[int(img.shape[0]/2):,:]\n histogram=np.sum(half_img,axis=0)\n return histogram\n\n\n# Function to run whole pipeline\ndef process_video():\n cap=cv2.VideoCapture('./../project_video.mp4')\n while(cv2.waitKey(10)!=ord('q')):\n ret,img=cap.read()\n process_img(img)\n\n\n# Generate video file as annotated output of raw video\ndef get_video():\n white_output='project_video_final_2ndtry.mp4'\n clip1 = VideoFileClip(\"./../project_video.mp4\")\n white_clip = clip1.fl_image(process_img) # NOTE: this function expects color images!!\n white_clip.write_videofile(white_output, audio=False)\n\n\n# Visualisation for lane finding using histograms of images as successively they get cropped vertically\n# This approach was tested but not used in the current pipeline.\ndef draw_boxes(img):\n margin=100\n nwindows=9\n window_size=80\n left_indices=[]\n right_indices=[]\n shape=img.shape\n copy=img.copy()\n for window in range(nwindows):\n copy=copy[:shape[0]-window*window_size,:]\n hist=get_histogram(copy)\n hist_left=hist[:int(hist.shape[0]/2)]\n hist_right=hist[int(hist.shape[0]/2):]\n left_max=np.argmax(hist_left)\n right_max=np.argmax(hist_right)\n left_indices.append(left_max)\n right_indices.append(right_max)\n\n for window in range(nwindows):\n cv2.rectangle(img,(left_indices[window]-int(window_size/2),shape[0]-window*window_size),\\\n (left_indices[window]+int(window_size/2),shape[0]-(window+1)*window_size),255,3)\n cv2.rectangle(img, (int(hist.shape[0]/2)+right_indices[window] - int(window_size / 2), shape[0]-window * window_size), \\\n (int(hist.shape[0]/2)+right_indices[window] + int(window_size / 2),shape[0]-(window+1)*window_size), 255, 3)\n cv2.imshow('img',img)\n cv2.waitKey()\n\n\n# Generate search locations using windows which slide horizontally as it moves up the image\ndef sliding_window(img):\n global left_coeff,right_coeff\n left_indices=[] # Save white pixels on left half of image\n right_indices=[] # Save white pixels on right half of image\n # +\\- 100 pixels are checked for non zero pixels\n margin=100\n # Image is divided into 9 parts along the vertical\n nwindows=9\n\n shape = img.shape\n window_height=shape[0]//nwindows\n minpix=50\n hist=get_histogram(img)\n midpoint = hist.shape[0] // 2\n # Find location of left lane base\n leftx_base=np.argmax(hist[:midpoint])\n # Find location of the right lane base\n rightx_base=midpoint + np.argmax(hist[midpoint:])\n\n imgcolor=np.dstack((img,img,img))\n # These variables will contain locations of all non-zero pixels\n nonzero=img.nonzero()\n nonzerox = np.array(nonzero[1])\n nonzeroy = np.array(nonzero[0])\n\n leftx_current=leftx_base\n rightx_current=rightx_base\n\n for window in range(nwindows):\n # Defining window points within which pixels will be counted\n left_x_low = leftx_current - margin\n left_x_high = leftx_current + margin\n y_high=shape[0] - window * window_height\n y_low = shape[0] - (window + 1) * window_height\n right_x_low = rightx_current - margin\n right_x_high = rightx_current + margin\n # Visualising search windows\n cv2.rectangle(imgcolor,(left_x_low,y_low),(left_x_high,y_high),(255,255,255),2)\n cv2.rectangle(imgcolor, (right_x_low, y_low), (right_x_high, y_high), (255, 0, 0), 2)\n\n # Storing all nonzeros indices that lie within the window defined.\n # It stores the index of array which contains index of nonzero pixels\n left_nonzeros_in_window=((nonzerox>=left_x_low) & (nonzerox < left_x_high) & (nonzeroy>=y_low)\n & (nonzeroy < y_high)).nonzero()[0]\n\n right_nonzeros_in_window = ((nonzerox >= right_x_low) & (nonzerox <= right_x_high) & (nonzeroy >= y_low)\n & (nonzeroy < y_high)).nonzero()[0]\n # Check if sufficient pixels are found to shift window location, else it stays the same\n if(len(left_nonzeros_in_window)>minpix):\n leftx_current= np.int(np.mean(nonzerox[left_nonzeros_in_window]))\n if(len(right_nonzeros_in_window)>minpix):\n rightx_current= np.int(np.mean(nonzerox[right_nonzeros_in_window]))\n\n left_indices.append(left_nonzeros_in_window)\n right_indices.append(right_nonzeros_in_window)\n\n left_indices=np.concatenate(left_indices)\n right_indices=np.concatenate(right_indices)\n # return values of pixel location which are non zero and also lie within the search window margin.\n leftx = nonzerox[left_indices]\n lefty = nonzeroy[left_indices]\n rightx = nonzerox[right_indices]\n righty = nonzeroy[right_indices]\n return leftx,lefty,rightx,righty,imgcolor\n\n# Takes img and searches around the polynomial which was fit in the previous fit, thus eliminating histogram based sliding window search\ndef search_around_poly(img):\n\n global left_coeff,right_coeff\n nonzero=img.nonzero()\n nonzerox=nonzero[1]\n nonzeroy=nonzero[0]\n # Defines margin width within which search will happen around the previous fit\n margin = 100\n left_lane_inds=((nonzerox > nonzeroy**2*left_coeff[0] + nonzeroy*left_coeff[1]+left_coeff[2]-margin)&\\\n (nonzerox < nonzeroy**2*left_coeff[0] + nonzeroy*left_coeff[1]+left_coeff[2]+margin)).nonzero()[0]\n right_lane_inds=((nonzerox > nonzeroy**2*right_coeff[0] + nonzeroy*right_coeff[1]+right_coeff[2]-margin)&\\\n (nonzerox < nonzeroy**2*right_coeff[0] + nonzeroy*right_coeff[1]+right_coeff[2]+margin)).nonzero()[0]\n # return values in this margin which had non zero pixels\n leftx=nonzerox[left_lane_inds]\n lefty=nonzeroy[left_lane_inds]\n rightx=nonzerox[right_lane_inds]\n righty=nonzeroy[right_lane_inds]\n return leftx,lefty,rightx,righty,img\n\n\n# Visualise the search margins defined above, (in search_around_poly())\ndef draw_current_lanes(img):\n global left_coeff,right_coeff\n ys=np.linspace(0,img.shape[0]-1,img.shape[0])\n xleft=ys**2*left_coeff[0]+ys*left_coeff[1]+left_coeff[2]\n xright=ys**2*right_coeff[0]+ys*right_coeff[1]+right_coeff[2]\n left_pairs = [(xleft[i], ys[i]) for i in range(len(xleft))]\n right_pairs = [(xright[i], ys[i]) for i in range(len(xright))]\n left_pairs = np.array(left_pairs).astype(np.int32)\n left_pairs = left_pairs.reshape(-1, 1, 2)\n right_pairs = np.array(right_pairs).astype(np.int32)\n right_pairs = right_pairs.reshape(-1, 1, 2)\n\n blank = np.zeros_like(img)\n blank_color = np.dstack((blank, blank, blank))\n left_lmargin=np.copy(left_pairs).T\n left_rmargin = np.copy(left_pairs).T\n left_lmargin[0]=left_lmargin[0]-100\n left_lmargin = left_lmargin.T\n left_rmargin[0]=left_rmargin[0]+100\n left_rmargin=left_rmargin.T\n\n right_lmargin = np.copy(right_pairs).T\n right_rmargin = np.copy(right_pairs).T\n right_lmargin[0] = right_lmargin[0] - 100\n right_lmargin = right_lmargin.T\n right_rmargin[0] = right_rmargin[0] + 100\n right_rmargin = right_rmargin.T\n left_margin=fill_lane(left_lmargin,left_rmargin,blank_color,(0,0,255))\n both_margins=fill_lane(right_lmargin,right_rmargin,left_margin,(0,0,255))\n return both_margins\n\n\n# Fits 2 degree polynomial using (x,y) pairs of lane pixels\ndef fit_line(leftx,lefty,rightx,righty,img):\n global left_coeff, right_coeff\n left_coeff=np.polyfit(lefty,leftx,2)\n right_coeff=np.polyfit(righty,rightx,2)\n\n lefty_arr=np.linspace(0,img.shape[1]-1,img.shape[1])\n leftx_arr=left_coeff[0]* (lefty_arr**2) + left_coeff[1]*lefty_arr + left_coeff[2]\n left_pairs=[(leftx_arr[i],lefty_arr[i]) for i in range(len(leftx_arr))]\n\n righty_arr = np.linspace(0, img.shape[1]-1, img.shape[1])\n rightx_arr = right_coeff[0] * (righty_arr ** 2) + right_coeff[1] * righty_arr + right_coeff[2]\n right_pairs = [(rightx_arr[i], righty_arr[i]) for i in range(len(rightx_arr))]\n # Finding lane center and vehicle offset\n lane_center=((left_coeff[0]* (700**2) + left_coeff[1]*700 + left_coeff[2])+(right_coeff[0] * (700 ** 2) + right_coeff[1] * 700 + right_coeff[2]))/2\n img_center=img.shape[1]/2\n lane_offset=img_center-lane_center\n\n left_pairs=np.array(left_pairs).astype(np.int32)\n left_pairs=left_pairs.reshape(-1,1,2)\n right_pairs=np.array(right_pairs).astype(np.int32)\n right_pairs=right_pairs.reshape(-1,1,2)\n # Drawing fit polynomial on the lanes\n cv2.polylines(img,[left_pairs],False,(0,255,0),5)\n cv2.polylines(img,[right_pairs],False,(0,255,0),5)\n R_left, R_right=radius_curvature(leftx,lefty,rightx,righty)\n return img,R_left,R_right,left_pairs,right_pairs,lane_offset\n\n\n# Calculate radius of curvature of lane\ndef radius_curvature(leftx,lefty,rightx,righty):\n # Scale factors:\n # 3.7m width = 700 px\n # 30m length = 720 px\n scalex= 3.7/700\n scaley=30/720\n # closest 'y' to the car, lowermost point of the image\n y=719\n\n left_fit_coeff=np.polyfit(lefty*scaley,leftx*scalex,2)\n right_fit_coeff=np.polyfit(righty*scaley,rightx*scalex,2)\n R_left=(1+(2*left_fit_coeff[0]*y*scaley + left_fit_coeff[1])**2)**(3/2)/(2*left_fit_coeff[0])\n R_right=(1+(2*right_fit_coeff[0]*y*scaley + right_fit_coeff[1])**2)**(3/2)/(2*right_fit_coeff[0])\n return R_left,R_right\n\n\n# Fill the lane found using fit_line() in the given color\ndef fill_lane(left_pairs,right_pairs,img,color=(0,255,0)):\n pts = np.array([left_pairs, np.flipud(right_pairs)])\n pts = np.concatenate(pts)\n cv2.fillPoly(img, [pts], color)\n return img\n\n\n# Pipeline for a frame of a video\ndef process_img(img):\n global first_run\n #img=cv2.imread('./../test_images/test1.jpg)\n\n #As moviepy Videoclip function uses RGB scheme, and image is processed in BGR, conversion takes place.\n img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n # Undistortion\n undist=undistort_img(img,mtx,dist)\n # Binary image generation\n bin_img=get_binary(undist)\n\n blank_img = np.zeros_like(bin_img)\n blank_color=np.dstack((blank_img,blank_img,blank_img))\n # Perspective transformation of binary image\n warped=perspective_transform(bin_img,M)\n\n box = undist.copy()\n # For only the first frame of a vide, histogram based sliding search takes place, subsequently, previous fits are used to search\n # for new lane lines\n if(first_run):\n leftx,lefty,rightx,righty,imgbox = sliding_window(warped)\n\n else:\n #Visualising the search margin from previous fit\n search_margin = draw_current_lanes(warped)\n # Searching in margin for new fit\n leftx, lefty, rightx, righty, imgbox = search_around_poly(warped)\n\n #Fitting 2 degree polynomial\n imgcolor,R_left,R_right,left_pairs,right_pairs, lane_offset=fit_line(leftx,lefty,rightx,righty,imgbox)\n # Averaging left and right lane radius of curvature\n Rad_curve=(R_left+R_right)/2\n # Filling the detected lane in green(default)\n filled=fill_lane(left_pairs,right_pairs,blank_color)\n # Projecting the image back onto the original perspective\n filled_rev_warp=inv_perspective_transform(filled,Minv)\n # Weighted sum of this with undistorted image for transparency\n final = cv2.addWeighted(box, 1, filled_rev_warp, 0.3, 0)\n\n # Visualising search_around_poly(). Can comment whole block if not needed (line 297-line 306)\n if(first_run==False):\n warped_color=np.dstack((warped,warped,warped))\n bin_color=np.dstack((bin_img,bin_img,bin_img))\n print(search_margin.shape)\n print(warped.shape)\n search_margin_persp = inv_perspective_transform(search_margin, Minv)\n cv2.imshow('search_margin',search_margin_persp)\n marginimg=cv2.addWeighted(warped_color,1,search_margin,0.4,0)\n maginpersp=cv2.addWeighted(bin_color,1,search_margin_persp,0.3,0)\n cv2.imshow('marginimg',marginimg)\n\n # Converting pixel to m\n lane_offset= lane_offset * 3.7/685\n # For annotated output\n if(lane_offset<0):\n offset_direction=\"left\"\n else:\n offset_direction=\"right\"\n if(Rad_curve<0):\n sign=\"(-)\"\n else:\n sign=\"\"\n Rad_curve=np.abs(Rad_curve)\n lane_offset=np.abs(lane_offset)\n cv2.putText(final, \"Radius of curvature={0}{1:.2f}m\".format(sign,Rad_curve), (50, 50),\n cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)\n cv2.putText(final, \"Vehicle is {0:.2f}m {1} of center\".format(lane_offset,offset_direction), (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 2)\n final=cv2.cvtColor(final,cv2.COLOR_BGR2RGB)\n cv2.imshow('final annotated image', final)\n # To make subsequent frames use search_around_poly() instead of sliding_windows()\n first_run=False\n return final\n\n\nif __name__=='__main__':\n # To only display video frame by frame. Also use cv2.waitKey() in process_img() to pause frames\n process_video()\n # Use this to generate video file. Edit source and output names first\n #get_video()\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.copy", "numpy.mean", "numpy.flipud", "numpy.argmax", "numpy.polyfit", "numpy.abs", "numpy.dstack", "numpy.linspace" ] ]
huminghao16/MTMSN
[ "4f64a1d1cedab0d74e24ecf939b44c75137c4399" ]
[ "drop/drop_utils.py" ]
[ "import json\r\nimport copy\r\nimport string\r\nimport itertools\r\nimport numpy as np\r\nfrom random import choice\r\nfrom decimal import Decimal\r\nfrom typing import Any, Dict, List, Tuple, Callable\r\nimport collections\r\nfrom collections import defaultdict\r\n\r\nfrom allennlp.common.file_utils import cached_path\r\nfrom allennlp.tools.squad_eval import metric_max_over_ground_truths\r\nfrom allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer\r\nfrom allennlp.data.dataset_readers.reading_comprehension.util import IGNORED_TOKENS, STRIPPED_CHARACTERS\r\n\r\nimport torch\r\nimport bert.tokenization as tokenization\r\nfrom squad.squad_utils import _get_best_indexes, get_final_text, _compute_softmax\r\nfrom squad.squad_evaluate import f1_score as calculate_f1\r\nfrom drop.w2n import word_to_num\r\nfrom drop.beam_search import beam_search\r\nfrom drop.drop_eval import (get_metrics as drop_em_and_f1, answer_json_to_strings)\r\n\r\n\r\nsign_remap = {0: 0, 1: 1, 2: -1}\r\n\r\n\r\nclass DropExample(object):\r\n def __init__(self,\r\n qas_id,\r\n question_tokens,\r\n passage_tokens,\r\n numbers_in_passage=None,\r\n number_indices=None,\r\n answer_type=None,\r\n number_of_answer=None,\r\n passage_spans=None,\r\n question_spans=None,\r\n add_sub_expressions=None,\r\n counts=None,\r\n negations=None,\r\n answer_annotations=None\r\n ):\r\n self.qas_id = qas_id\r\n self.question_tokens = question_tokens\r\n self.passage_tokens = passage_tokens\r\n self.numbers_in_passage = numbers_in_passage\r\n self.number_indices = number_indices\r\n self.answer_type = answer_type\r\n self.number_of_answer = number_of_answer\r\n self.passage_spans = passage_spans\r\n self.question_spans = question_spans\r\n self.add_sub_expressions = add_sub_expressions\r\n self.counts = counts\r\n self.negations = negations\r\n self.answer_annotations = answer_annotations\r\n\r\n def __str__(self):\r\n return self.__repr__()\r\n\r\n def __repr__(self):\r\n s = \"\"\r\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\r\n s += \", \\nquestion: %s\" % (\" \".join(self.question_tokens))\r\n s += \", \\npassage: %s\" % (\" \".join(self.passage_tokens))\r\n if self.numbers_in_passage:\r\n s += \", \\nnumbers_in_passage: {}\".format(self.numbers_in_passage)\r\n if self.number_indices:\r\n s += \", \\nnumber_indices: {}\".format(self.number_indices)\r\n if self.answer_type:\r\n s += \", \\nanswer_type: {}\".format(self.answer_type)\r\n if self.number_of_answer:\r\n s += \", \\nnumber_of_answer: {}\".format(self.number_of_answer)\r\n if self.passage_spans:\r\n s += \", \\npassage_spans: {}\".format(self.passage_spans)\r\n if self.question_spans:\r\n s += \", \\nquestion_spans: {}\".format(self.question_spans)\r\n if self.add_sub_expressions:\r\n s += \", \\nadd_sub_expressions: {}\".format(self.add_sub_expressions)\r\n if self.counts:\r\n s += \", \\ncounts: {}\".format(self.counts)\r\n if self.negations:\r\n s += \", \\nnegations: {}\".format(self.negations)\r\n if self.answer_annotations:\r\n s += \", \\nanswer_annotations: {}\".format(self.answer_annotations)\r\n return s\r\n\r\n\r\nclass InputFeatures(object):\r\n def __init__(self,\r\n unique_id,\r\n example_index,\r\n tokens,\r\n que_token_to_orig_map,\r\n doc_token_to_orig_map,\r\n input_ids,\r\n input_mask,\r\n segment_ids,\r\n number_indices,\r\n start_indices=None,\r\n end_indices=None,\r\n number_of_answers=None,\r\n add_sub_expressions=None,\r\n input_counts=None,\r\n negations=None):\r\n self.unique_id = unique_id\r\n self.example_index = example_index\r\n self.tokens = tokens\r\n self.que_token_to_orig_map = que_token_to_orig_map\r\n self.doc_token_to_orig_map = doc_token_to_orig_map\r\n self.input_ids = input_ids\r\n self.input_mask = input_mask\r\n self.segment_ids = segment_ids\r\n self.number_indices = number_indices\r\n self.start_indices = start_indices\r\n self.end_indices = end_indices\r\n self.number_of_answers = number_of_answers\r\n self.add_sub_expressions = add_sub_expressions\r\n self.input_counts = input_counts\r\n self.negations = negations\r\n\r\n def __str__(self):\r\n return self.__repr__()\r\n\r\n def __repr__(self):\r\n s = \"\"\r\n s += \"unique_id: %s\" % (self.unique_id)\r\n s += \", \\nnumber_indices: {}\".format(self.number_indices)\r\n if self.start_indices:\r\n s += \", \\nstart_indices: {}\".format(self.start_indices)\r\n if self.end_indices:\r\n s += \", \\nend_indices: {}\".format(self.end_indices)\r\n if self.number_of_answers:\r\n s += \", \\nnumber_of_answers: {}\".format(self.number_of_answers)\r\n if self.add_sub_expressions:\r\n s += \", \\nadd_sub_expressions: {}\".format(self.add_sub_expressions)\r\n if self.input_counts:\r\n s += \", \\ninput_counts: {}\".format(self.input_counts)\r\n if self.negations:\r\n s += \", \\nnegations: {}\".format(self.negations)\r\n return s\r\n\r\n\r\nWORD_NUMBER_MAP = {\"zero\": 0, \"one\": 1, \"two\": 2, \"three\": 3, \"four\": 4,\r\n \"five\": 5, \"six\": 6, \"seven\": 7, \"eight\": 8,\r\n \"nine\": 9, \"ten\": 10, \"eleven\": 11, \"twelve\": 12,\r\n \"thirteen\": 13, \"fourteen\": 14, \"fifteen\": 15,\r\n \"sixteen\": 16, \"seventeen\": 17, \"eighteen\": 18, \"nineteen\": 19}\r\n\r\n\r\ndef split_token_by_delimiter(token: Token, delimiter: str) -> List[Token]:\r\n split_tokens = []\r\n char_offset = token.idx\r\n for sub_str in token.text.split(delimiter):\r\n if sub_str:\r\n split_tokens.append(Token(text=sub_str, idx=char_offset))\r\n char_offset += len(sub_str)\r\n split_tokens.append(Token(text=delimiter, idx=char_offset))\r\n char_offset += len(delimiter)\r\n if split_tokens:\r\n split_tokens.pop(-1)\r\n char_offset -= len(delimiter)\r\n return split_tokens\r\n else:\r\n return [token]\r\n\r\n\r\ndef split_tokens_by_hyphen(tokens: List[Token]) -> List[Token]:\r\n hyphens = [\"-\", \"–\", \"~\"]\r\n new_tokens: List[Token] = []\r\n\r\n for token in tokens:\r\n if any(hyphen in token.text for hyphen in hyphens):\r\n unsplit_tokens = [token]\r\n split_tokens: List[Token] = []\r\n for hyphen in hyphens:\r\n for unsplit_token in unsplit_tokens:\r\n if hyphen in token.text:\r\n split_tokens += split_token_by_delimiter(unsplit_token, hyphen)\r\n else:\r\n split_tokens.append(unsplit_token)\r\n unsplit_tokens, split_tokens = split_tokens, []\r\n new_tokens += unsplit_tokens\r\n else:\r\n new_tokens.append(token)\r\n\r\n return new_tokens\r\n\r\n\r\ndef extend_number_magnitude(number, next_token):\r\n if next_token == \"hundred\":\r\n number *= 100\r\n elif next_token == \"thousand\":\r\n number *= 1000\r\n elif next_token == \"million\":\r\n number *= 1000000\r\n elif next_token == \"billion\":\r\n number *= 1000000000\r\n elif next_token == \"thousand\":\r\n number *= 1000000000000\r\n return number\r\n\r\n\r\nclass DropReader(object):\r\n def __init__(self,\r\n debug: bool = False,\r\n tokenizer: Tokenizer = None,\r\n include_more_numbers: bool = False,\r\n skip_when_all_empty: List[str] = None,\r\n max_number_of_answer: int = 8,\r\n max_number_count: int = 10,\r\n logger = None) -> None:\r\n super().__init__()\r\n self.debug = debug\r\n self._tokenizer = tokenizer or WordTokenizer()\r\n self.include_more_numbers = include_more_numbers\r\n self.max_number_of_answer = max_number_of_answer\r\n self.max_number_count = max_number_count\r\n self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []\r\n for item in self.skip_when_all_empty:\r\n assert item in [\"passage_span\", \"question_span\", \"addition_subtraction\", \"counting\", \"negation\"], \\\r\n f\"Unsupported skip type: {item}\"\r\n self.logger = logger\r\n\r\n def _read(self, file_path: str):\r\n # if `file_path` is a URL, redirect to the cache\r\n file_path = cached_path(file_path)\r\n self.logger.info(\"Reading file at %s\", file_path)\r\n with open(file_path) as dataset_file:\r\n dataset = json.load(dataset_file)\r\n examples, skip_count = [], 0\r\n for passage_id, passage_info in dataset.items():\r\n passage_text = passage_info[\"passage\"]\r\n passage_tokens = self._tokenizer.tokenize(passage_text)\r\n passage_tokens = split_tokens_by_hyphen(passage_tokens)\r\n for question_answer in passage_info[\"qa_pairs\"]:\r\n question_id = question_answer[\"query_id\"]\r\n question_text = question_answer[\"question\"].strip()\r\n answer_annotations = []\r\n if \"answer\" in question_answer:\r\n answer_annotations.append(question_answer[\"answer\"])\r\n if \"validated_answers\" in question_answer:\r\n answer_annotations += question_answer[\"validated_answers\"]\r\n\r\n example = self.text_to_example(question_text, passage_text, question_id, answer_annotations, passage_tokens)\r\n if example is not None:\r\n examples.append(example)\r\n else:\r\n skip_count += 1\r\n if self.debug and len(examples) > 100:\r\n break\r\n self.logger.info(f\"Skipped {skip_count} examples, kept {len(examples)} examples.\")\r\n return examples\r\n\r\n def text_to_example(self, # type: ignore\r\n question_text: str,\r\n passage_text: str,\r\n question_id: str,\r\n answer_annotations: List[Dict] = None,\r\n passage_tokens: List[Token] = None):\r\n if not passage_tokens:\r\n passage_tokens = self._tokenizer.tokenize(passage_text)\r\n passage_tokens = split_tokens_by_hyphen(passage_tokens)\r\n question_tokens = self._tokenizer.tokenize(question_text)\r\n question_tokens = split_tokens_by_hyphen(question_tokens)\r\n\r\n answer_type: str = None\r\n answer_texts: List[str] = []\r\n number_of_answer: int = None\r\n if answer_annotations:\r\n # Currently we only use the first annotated answer here, but actually this doesn't affect\r\n # the training, because we only have one annotation for the train set.\r\n answer_type, answer_texts = self.extract_answer_info_from_annotation(answer_annotations[0])\r\n number_of_answer = self.max_number_of_answer if len(answer_texts) > self.max_number_of_answer else len(answer_texts)\r\n\r\n # Tokenize the answer text in order to find the matched span based on token\r\n tokenized_answer_texts = []\r\n for answer_text in answer_texts:\r\n answer_tokens = self._tokenizer.tokenize(answer_text)\r\n answer_tokens = split_tokens_by_hyphen(answer_tokens)\r\n tokenized_answer_texts.append(answer_tokens)\r\n\r\n numbers_in_passage = [0]\r\n number_indices = [-1]\r\n for token_index, token in enumerate(passage_tokens):\r\n number = self.convert_word_to_number(token.text, self.include_more_numbers)\r\n if number is not None:\r\n numbers_in_passage.append(number)\r\n number_indices.append(token_index)\r\n\r\n valid_passage_spans = \\\r\n self.find_valid_spans(passage_tokens, tokenized_answer_texts) if tokenized_answer_texts else []\r\n valid_question_spans = \\\r\n self.find_valid_spans(question_tokens, tokenized_answer_texts) if tokenized_answer_texts else []\r\n number_of_answer = None if valid_passage_spans == [] and valid_question_spans == [] else number_of_answer\r\n\r\n target_numbers = []\r\n # `answer_texts` is a list of valid answers.\r\n for answer_text in answer_texts:\r\n number = self.convert_word_to_number(answer_text, self.include_more_numbers)\r\n if number is not None:\r\n target_numbers.append(number)\r\n\r\n valid_signs_for_add_sub_expressions = self.find_valid_add_sub_expressions(numbers_in_passage,\r\n target_numbers,\r\n max_number_of_numbers_to_consider=3)\r\n\r\n # Currently we only support count number 0 ~ 9\r\n numbers_for_count = list(range(self.max_number_count))\r\n valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)\r\n\r\n valid_negations = self.find_valid_negations(numbers_in_passage, target_numbers)\r\n\r\n type_to_answer_map = {\"passage_span\": valid_passage_spans,\r\n \"question_span\": valid_question_spans,\r\n \"addition_subtraction\": valid_signs_for_add_sub_expressions,\r\n \"counting\": valid_counts,\r\n \"negation\": valid_negations}\r\n\r\n if self.skip_when_all_empty \\\r\n and not any(type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty):\r\n return None\r\n\r\n return DropExample(\r\n qas_id=question_id,\r\n question_tokens=[token.text for token in question_tokens],\r\n passage_tokens=[token.text for token in passage_tokens],\r\n numbers_in_passage=numbers_in_passage,\r\n number_indices=number_indices,\r\n answer_type=answer_type,\r\n number_of_answer=number_of_answer,\r\n passage_spans=valid_passage_spans,\r\n question_spans=valid_question_spans,\r\n add_sub_expressions=valid_signs_for_add_sub_expressions,\r\n counts=valid_counts,\r\n negations=valid_negations,\r\n answer_annotations=answer_annotations)\r\n\r\n @staticmethod\r\n def extract_answer_info_from_annotation(answer_annotation: Dict[str, Any]) -> Tuple[str, List[str]]:\r\n answer_type = None\r\n if answer_annotation[\"spans\"]:\r\n answer_type = \"spans\"\r\n elif answer_annotation[\"number\"]:\r\n answer_type = \"number\"\r\n elif any(answer_annotation[\"date\"].values()):\r\n answer_type = \"date\"\r\n\r\n answer_content = answer_annotation[answer_type] if answer_type is not None else None\r\n\r\n answer_texts: List[str] = []\r\n if answer_type is None: # No answer\r\n pass\r\n elif answer_type == \"spans\":\r\n # answer_content is a list of string in this case\r\n answer_texts = answer_content\r\n elif answer_type == \"date\":\r\n # answer_content is a dict with \"month\", \"day\", \"year\" as the keys\r\n date_tokens = [answer_content[key]\r\n for key in [\"month\", \"day\", \"year\"] if key in answer_content and answer_content[key]]\r\n answer_texts = date_tokens\r\n elif answer_type == \"number\":\r\n # answer_content is a string of number\r\n answer_texts = [answer_content]\r\n return answer_type, answer_texts\r\n\r\n @staticmethod\r\n def convert_word_to_number(word: str, try_to_include_more_numbers=False, normalized_tokens=None, token_index=None):\r\n \"\"\"\r\n Currently we only support limited types of conversion.\r\n \"\"\"\r\n if try_to_include_more_numbers:\r\n # strip all punctuations from the sides of the word, except for the negative sign\r\n punctruations = string.punctuation.replace('-', '')\r\n word = word.strip(punctruations)\r\n # some words may contain the comma as deliminator\r\n word = word.replace(\",\", \"\")\r\n # word2num will convert hundred, thousand ... to number, but we skip it.\r\n if word in [\"hundred\", \"thousand\", \"million\", \"billion\", \"trillion\"]:\r\n return None\r\n try:\r\n number = word_to_num(word)\r\n except ValueError:\r\n try:\r\n number = int(word)\r\n except ValueError:\r\n try:\r\n number = float(word)\r\n except ValueError:\r\n number = None\r\n if number is not None and normalized_tokens is not None and token_index is not None:\r\n if token_index < len(normalized_tokens) - 1:\r\n next_token = normalized_tokens[token_index + 1]\r\n if next_token in [\"hundred\", \"thousand\", \"million\", \"billion\", \"trillion\"]:\r\n number = extend_number_magnitude(number, next_token)\r\n return number\r\n else:\r\n no_comma_word = word.replace(\",\", \"\")\r\n if no_comma_word in WORD_NUMBER_MAP:\r\n number = WORD_NUMBER_MAP[no_comma_word]\r\n else:\r\n try:\r\n number = int(no_comma_word)\r\n except ValueError:\r\n number = None\r\n return number\r\n\r\n @staticmethod\r\n def find_valid_spans(passage_tokens: List[Token],\r\n answer_texts: List[List[Token]]) -> List[Tuple[int, int]]:\r\n normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]\r\n word_positions: Dict[str, List[int]] = defaultdict(list)\r\n for i, token in enumerate(normalized_tokens):\r\n word_positions[token].append(i)\r\n spans = []\r\n for answer_text in answer_texts:\r\n answer_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in answer_text]\r\n num_answer_tokens = len(answer_tokens)\r\n if answer_tokens[0] not in word_positions:\r\n continue\r\n for span_start in word_positions[answer_tokens[0]]:\r\n span_end = span_start # span_end is _inclusive_\r\n answer_index = 1\r\n while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):\r\n token = normalized_tokens[span_end + 1]\r\n if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:\r\n answer_index += 1\r\n span_end += 1\r\n elif token in IGNORED_TOKENS:\r\n span_end += 1\r\n else:\r\n break\r\n if num_answer_tokens == answer_index:\r\n spans.append((span_start, span_end))\r\n return spans\r\n\r\n @staticmethod\r\n def find_valid_add_sub_expressions(numbers: List[int],\r\n targets: List[int],\r\n max_number_of_numbers_to_consider: int = 2) -> List[List[int]]:\r\n valid_signs_for_add_sub_expressions = []\r\n decimal_targets = [Decimal(x).quantize(Decimal('0.00')) for x in targets]\r\n # TODO: Try smaller numbers?'\r\n for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):\r\n possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))\r\n for number_combination in itertools.combinations(enumerate(numbers), number_of_numbers_to_consider):\r\n indices = [it[0] for it in number_combination]\r\n values = [it[1] for it in number_combination]\r\n for signs in possible_signs:\r\n eval_value = sum(sign * value for sign, value in zip(signs, values))\r\n decimal_eval_value = Decimal(eval_value).quantize(Decimal('0.00'))\r\n if decimal_eval_value in decimal_targets and min(indices) != 0:\r\n labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.\r\n for index, sign in zip(indices, signs):\r\n labels_for_numbers[index] = 1 if sign == 1 else 2 # 1 for positive, 2 for negative\r\n if labels_for_numbers not in valid_signs_for_add_sub_expressions:\r\n valid_signs_for_add_sub_expressions.append(labels_for_numbers)\r\n return valid_signs_for_add_sub_expressions\r\n\r\n @staticmethod\r\n def find_valid_negations(numbers: List[int], targets: List[int]) -> List[List[int]]:\r\n valid_negations = []\r\n decimal_targets = [Decimal(x).quantize(Decimal('0.00')) for x in targets]\r\n for index, number in enumerate(numbers):\r\n decimal_negating_number = Decimal(100 - number).quantize(Decimal('0.00'))\r\n if number > 0 and number < 100 and decimal_negating_number in decimal_targets:\r\n labels_for_numbers = [0] * len(numbers)\r\n labels_for_numbers[index] = 1\r\n valid_negations.append(labels_for_numbers)\r\n return valid_negations\r\n\r\n @staticmethod\r\n def find_valid_counts(count_numbers: List[int], targets: List[int]) -> List[int]:\r\n valid_indices = []\r\n for index, number in enumerate(count_numbers):\r\n if number in targets:\r\n valid_indices.append(index)\r\n return valid_indices\r\n\r\n\r\ndef convert_answer_spans(spans, orig_to_tok_index, all_len, all_tokens):\r\n tok_start_positions, tok_end_positions = [], []\r\n for span in spans:\r\n start_position, end_position = span[0], span[1]\r\n tok_start_position = orig_to_tok_index[start_position]\r\n if end_position + 1 >= len(orig_to_tok_index):\r\n tok_end_position = all_len - 1\r\n else:\r\n tok_end_position = orig_to_tok_index[end_position + 1] - 1\r\n if tok_start_position < len(all_tokens) and tok_end_position < len(all_tokens):\r\n tok_start_positions.append(tok_start_position)\r\n tok_end_positions.append(tok_end_position)\r\n return tok_start_positions, tok_end_positions\r\n\r\n\r\ndef convert_examples_to_features(examples, tokenizer, max_seq_length, is_train, answering_abilities=None, logger=None):\r\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\r\n\r\n unique_id = 1000000000\r\n skip_count, truncate_count = 0, 0\r\n\r\n features = []\r\n for (example_index, example) in enumerate(examples):\r\n que_tok_to_orig_index = []\r\n que_orig_to_tok_index = []\r\n all_que_tokens = []\r\n for (i, token) in enumerate(example.question_tokens):\r\n que_orig_to_tok_index.append(len(all_que_tokens))\r\n sub_tokens = tokenizer.tokenize(token)\r\n for sub_token in sub_tokens:\r\n que_tok_to_orig_index.append(i)\r\n all_que_tokens.append(sub_token)\r\n\r\n doc_tok_to_orig_index = []\r\n doc_orig_to_tok_index = []\r\n all_doc_tokens = []\r\n for (i, token) in enumerate(example.passage_tokens):\r\n doc_orig_to_tok_index.append(len(all_doc_tokens))\r\n if i in example.number_indices:\r\n doc_tok_to_orig_index.append(i)\r\n all_doc_tokens.append(token)\r\n else:\r\n sub_tokens = tokenizer.tokenize(token)\r\n for sub_token in sub_tokens:\r\n doc_tok_to_orig_index.append(i)\r\n all_doc_tokens.append(sub_token)\r\n\r\n # The -3 accounts for [CLS], [SEP] and [SEP]\r\n # Truncate the passage according to the max sequence length\r\n max_tokens_for_doc = max_seq_length - len(all_que_tokens) - 3\r\n all_doc_len = len(all_doc_tokens)\r\n if all_doc_len > max_tokens_for_doc:\r\n all_doc_tokens = all_doc_tokens[:max_tokens_for_doc]\r\n truncate_count += 1\r\n\r\n query_tok_start_positions, query_tok_end_positions = \\\r\n convert_answer_spans(example.question_spans, que_orig_to_tok_index, len(all_que_tokens), all_que_tokens)\r\n\r\n passage_tok_start_positions, passage_tok_end_positions = \\\r\n convert_answer_spans(example.passage_spans, doc_orig_to_tok_index, all_doc_len, all_doc_tokens)\r\n\r\n tok_number_indices = []\r\n for index in example.number_indices:\r\n if index != -1:\r\n tok_index = doc_orig_to_tok_index[index]\r\n if tok_index < len(all_doc_tokens):\r\n tok_number_indices.append(tok_index)\r\n else:\r\n tok_number_indices.append(-1)\r\n\r\n tokens = []\r\n que_token_to_orig_map = {}\r\n doc_token_to_orig_map = {}\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for i in range(len(all_que_tokens)):\r\n que_token_to_orig_map[len(tokens)] = que_tok_to_orig_index[i]\r\n tokens.append(all_que_tokens[i])\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n for i in range(len(all_doc_tokens)):\r\n doc_token_to_orig_map[len(tokens)] = doc_tok_to_orig_index[i]\r\n tokens.append(all_doc_tokens[i])\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n number_indices = []\r\n doc_offset = len(all_que_tokens) + 2\r\n que_offset = 1\r\n for tok_number_index in tok_number_indices:\r\n if tok_number_index != -1:\r\n number_index = tok_number_index + doc_offset\r\n number_indices.append(number_index)\r\n else:\r\n number_indices.append(-1)\r\n\r\n start_indices, end_indices, add_sub_expressions, input_counts, negations, number_of_answers = [], [], [], [], [], []\r\n if is_train:\r\n # For distant supervision, we annotate the positions of all answer spans\r\n if passage_tok_start_positions != [] and passage_tok_end_positions !=[]:\r\n for tok_start_position, tok_end_position in zip(passage_tok_start_positions, passage_tok_end_positions):\r\n start_position = tok_start_position + doc_offset\r\n end_position = tok_end_position + doc_offset\r\n start_indices.append(start_position)\r\n end_indices.append(end_position)\r\n elif query_tok_start_positions != [] and query_tok_end_positions !=[]:\r\n for tok_start_position, tok_end_position in zip(query_tok_start_positions, query_tok_end_positions):\r\n start_position = tok_start_position + que_offset\r\n end_position = tok_end_position + que_offset\r\n start_indices.append(start_position)\r\n end_indices.append(end_position)\r\n\r\n # Weakly-supervised for addition-subtraction\r\n if example.add_sub_expressions != []:\r\n for add_sub_expression in example.add_sub_expressions:\r\n # Since we have truncated the passage, the expression should also be truncated\r\n if sum(add_sub_expression[:len(number_indices)]) >= 2:\r\n assert len(add_sub_expression[:len(number_indices)]) == len(number_indices)\r\n add_sub_expressions.append(add_sub_expression[:len(number_indices)])\r\n\r\n # Weakly-supervised for counting\r\n for count in example.counts:\r\n input_counts.append(count)\r\n\r\n # Weeakly-supervised for negation\r\n if example.negations != []:\r\n for negation in example.negations:\r\n if sum(negation[:len(number_indices)]) == 1:\r\n assert len(negation[:len(number_indices)]) == len(number_indices)\r\n negations.append(negation[:len(number_indices)])\r\n\r\n is_impossible = True\r\n if \"span_extraction\" in answering_abilities and start_indices != [] and end_indices != []:\r\n is_impossible = False\r\n assert example.number_of_answer is not None\r\n number_of_answers.append(example.number_of_answer - 1)\r\n\r\n if \"negation\" in answering_abilities and negations != []:\r\n is_impossible = False\r\n\r\n if \"addition_subtraction\" in answering_abilities and add_sub_expressions != []:\r\n is_impossible = False\r\n\r\n if \"counting\" in answering_abilities and input_counts != []:\r\n is_impossible = False\r\n\r\n if start_indices == [] and end_indices == [] and number_of_answers == []:\r\n start_indices.append(-1)\r\n end_indices.append(-1)\r\n number_of_answers.append(-1)\r\n\r\n if negations == []:\r\n negations.append([-1] * len(number_indices))\r\n\r\n if add_sub_expressions == []:\r\n add_sub_expressions.append([-1] * len(number_indices))\r\n\r\n if input_counts == []:\r\n input_counts.append(-1)\r\n\r\n if not is_impossible:\r\n features.append(InputFeatures(\r\n unique_id=unique_id,\r\n example_index=example_index,\r\n tokens=tokens,\r\n que_token_to_orig_map=que_token_to_orig_map,\r\n doc_token_to_orig_map=doc_token_to_orig_map,\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n number_indices=number_indices,\r\n start_indices=start_indices,\r\n end_indices=end_indices,\r\n number_of_answers=number_of_answers,\r\n add_sub_expressions=add_sub_expressions,\r\n input_counts=input_counts,\r\n negations=negations))\r\n unique_id += 1\r\n else:\r\n skip_count += 1\r\n else:\r\n features.append(InputFeatures(\r\n unique_id=unique_id,\r\n example_index=example_index,\r\n tokens=tokens,\r\n que_token_to_orig_map=que_token_to_orig_map,\r\n doc_token_to_orig_map=doc_token_to_orig_map,\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n number_indices=number_indices))\r\n unique_id += 1\r\n\r\n if len(features) % 5000 == 0:\r\n logger.info(\"Processing features: %d\" % (len(features)))\r\n\r\n logger.info(f\"Skipped {skip_count} features, truncated {truncate_count} features, kept {len(features)} features.\")\r\n return features\r\n\r\n\r\ndef wrapped_get_final_text(example, feature, start_index, end_index, do_lower_case, verbose_logging, logger):\r\n if start_index in feature.doc_token_to_orig_map and end_index in feature.doc_token_to_orig_map:\r\n orig_doc_start = feature.doc_token_to_orig_map[start_index]\r\n orig_doc_end = feature.doc_token_to_orig_map[end_index]\r\n orig_tokens = example.passage_tokens[orig_doc_start:(orig_doc_end + 1)]\r\n elif start_index in feature.que_token_to_orig_map and end_index in feature.que_token_to_orig_map:\r\n orig_que_start = feature.que_token_to_orig_map[start_index]\r\n orig_que_end = feature.que_token_to_orig_map[end_index]\r\n orig_tokens = example.question_tokens[orig_que_start:(orig_que_end + 1)]\r\n else:\r\n return None\r\n\r\n tok_tokens = feature.tokens[start_index:(end_index + 1)]\r\n tok_text = \" \".join(tok_tokens)\r\n\r\n # De-tokenize WordPieces that have been split off.\r\n tok_text = tok_text.replace(\" ##\", \"\")\r\n tok_text = tok_text.replace(\"##\", \"\")\r\n\r\n # Clean whitespace\r\n tok_text = tok_text.strip()\r\n tok_text = \" \".join(tok_text.split())\r\n orig_text = \" \".join(orig_tokens)\r\n\r\n final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging, logger)\r\n return final_text\r\n\r\n\r\ndef add_sub_beam_search(example, feature, result, is_training, beam_size, max_count):\r\n number_sign_logits = result['number_sign_logits'] # [L, 3]\r\n number_mask = result['number_mask'] # [L]\r\n number_indices_list, sign_indices_list, scores_list = beam_search(number_sign_logits, number_mask, beam_size, max_count)\r\n\r\n number_sign_labels = []\r\n if is_training:\r\n if number_indices_list != [] and sign_indices_list != []:\r\n for number_indices, sign_indices in zip(number_indices_list, sign_indices_list):\r\n pred_answer = sum([example.numbers_in_passage[number_index] * sign_remap[sign_index]\r\n for number_index, sign_index in zip(number_indices, sign_indices)])\r\n pred_answer = float(Decimal(pred_answer).quantize(Decimal('0.0000')))\r\n ground_truth_answer_strings = [answer_json_to_strings(annotation)[0] for annotation in\r\n example.answer_annotations]\r\n exact_match, _ = metric_max_over_ground_truths(\r\n drop_em_and_f1, str(pred_answer), ground_truth_answer_strings)\r\n number_sign_labels.append(exact_match)\r\n\r\n # Pad to fixed length\r\n for number_indices, sign_indices in zip(number_indices_list, sign_indices_list):\r\n while len(number_indices) < max_count:\r\n number_indices.append(-1)\r\n sign_indices.append(-1)\r\n\r\n while len(number_indices_list) < beam_size:\r\n number_indices_list.append([-1] * max_count)\r\n sign_indices_list.append([-1] * max_count)\r\n scores_list.append(0)\r\n if is_training:\r\n number_sign_labels.append(0)\r\n\r\n # Add ground truth expressions if there is no positive label\r\n if is_training and max(number_sign_labels) == 0:\r\n gold_number_indices, gold_sign_indices = [], []\r\n add_sub_expression = choice(feature.add_sub_expressions)\r\n for number_index, sign_index in enumerate(add_sub_expression):\r\n if sign_index > 0 and number_mask[number_index]:\r\n gold_number_indices.append(number_index)\r\n gold_sign_indices.append(sign_index)\r\n while len(gold_number_indices) < max_count:\r\n gold_number_indices.append(-1)\r\n gold_sign_indices.append(-1)\r\n number_indices_list[-1] = gold_number_indices\r\n sign_indices_list[-1] = gold_sign_indices\r\n number_sign_labels[-1] = 1\r\n\r\n return number_indices_list, sign_indices_list, number_sign_labels, scores_list\r\n\r\n\r\ndef batch_annotate_candidates(all_examples, batch_features, batch_results, answering_abilities,\r\n is_training, beam_size, max_count):\r\n \"\"\"Annotate top-k candidate answers into features.\"\"\"\r\n unique_id_to_result = {}\r\n for result in batch_results:\r\n unique_id_to_result[result['unique_id']] = result\r\n\r\n batch_number_indices, batch_sign_indices, batch_sign_labels, batch_scores = [], [], [], []\r\n for (feature_index, feature) in enumerate(batch_features):\r\n example = all_examples[feature.example_index]\r\n result = unique_id_to_result[feature.unique_id]\r\n\r\n number_indices, sign_indices, sign_labels, scores = None, None, None, None\r\n if is_training:\r\n if feature.add_sub_expressions != [[-1] * len(feature.number_indices)]:\r\n number_indices, sign_indices, sign_labels, scores = add_sub_beam_search(example, feature, result,\r\n is_training, beam_size, max_count)\r\n else:\r\n predicted_ability = result['predicted_ability']\r\n predicted_ability_str = answering_abilities[predicted_ability]\r\n if predicted_ability_str == \"addition_subtraction\":\r\n number_indices, sign_indices, sign_labels, scores = add_sub_beam_search(example, feature, result,\r\n is_training, beam_size, max_count)\r\n\r\n if number_indices is None and sign_indices is None and sign_labels is None and scores is None:\r\n number_indices, sign_indices, sign_labels, scores = [], [], [], []\r\n while len(number_indices) < beam_size:\r\n number_indices.append([-1] * max_count)\r\n sign_indices.append([-1] * max_count)\r\n sign_labels.append(0)\r\n scores.append(0)\r\n\r\n batch_number_indices.append(number_indices)\r\n batch_sign_indices.append(sign_indices)\r\n batch_sign_labels.append(sign_labels)\r\n batch_scores.append(scores)\r\n return batch_number_indices, batch_sign_indices, batch_sign_labels, batch_scores\r\n\r\n\r\ndef write_predictions(all_examples, all_features, all_results, answering_abilities, drop_metrics, length_heuristic,\r\n n_best_size, max_answer_length, do_lower_case, verbose_logging, logger):\r\n \"\"\"Write final predictions to the json file.\"\"\"\r\n\r\n example_index_to_features = collections.defaultdict(list)\r\n for feature in all_features:\r\n example_index_to_features[feature.example_index].append(feature)\r\n\r\n unique_id_to_result = {}\r\n for result in all_results:\r\n unique_id_to_result[result['unique_id']] = result\r\n\r\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"PrelimPrediction\",\r\n [\"start_index\", \"end_index\", \"start_logit\", \"end_logit\", \"rerank_logit\", \"heuristic_logit\"])\r\n\r\n all_nbest_json = collections.OrderedDict()\r\n for (example_index, example) in enumerate(all_examples):\r\n features = example_index_to_features[example_index]\r\n assert len(features) == 1\r\n\r\n feature = features[0]\r\n result = unique_id_to_result[feature.unique_id]\r\n predicted_ability = result['predicted_ability']\r\n predicted_ability_str = answering_abilities[predicted_ability]\r\n nbest_json, predicted_answers = [], []\r\n if predicted_ability_str == \"addition_subtraction\":\r\n max_prob, best_answer = 0, None\r\n sign_rerank_probs = _compute_softmax(result['sign_rerank_logits'])\r\n for number_indices, sign_indices, rerank_prob, prob in zip(result['number_indices2'], result['sign_indices'], sign_rerank_probs, result['sign_probs']):\r\n pred_answer = sum([sign_remap[sign_index] * example.numbers_in_passage[number_index] for sign_index, number_index in zip(sign_indices, number_indices) if sign_index != -1 and number_index != -1])\r\n pred_answer = str(float(Decimal(pred_answer).quantize(Decimal('0.0000'))))\r\n if rerank_prob*prob > max_prob:\r\n max_prob = rerank_prob*prob\r\n best_answer = pred_answer\r\n assert best_answer is not None\r\n predicted_answers.append(best_answer)\r\n output = collections.OrderedDict()\r\n output[\"text\"] = str(best_answer)\r\n output[\"type\"] = \"addition_subtraction\"\r\n nbest_json.append(output)\r\n elif predicted_ability_str == \"counting\":\r\n predicted_answers.append(str(result['predicted_count']))\r\n output = collections.OrderedDict()\r\n output[\"text\"] = str(result['predicted_count'])\r\n output[\"type\"] = \"counting\"\r\n nbest_json.append(output)\r\n elif predicted_ability_str == \"negation\":\r\n index = np.argmax(result['predicted_negations'])\r\n pred_answer = 100 - example.numbers_in_passage[index]\r\n pred_answer = float(Decimal(pred_answer).quantize(Decimal('0.0000')))\r\n predicted_answers.append(str(pred_answer))\r\n output = collections.OrderedDict()\r\n output[\"text\"] = str(pred_answer)\r\n output[\"type\"] = \"negation\"\r\n nbest_json.append(output)\r\n elif predicted_ability_str == \"span_extraction\":\r\n number_of_spans = result['predicted_spans']\r\n prelim_predictions = []\r\n start_indexes = _get_best_indexes(result['start_logits'], n_best_size)\r\n end_indexes = _get_best_indexes(result['end_logits'], n_best_size)\r\n for start_index in start_indexes:\r\n for end_index in end_indexes:\r\n # We could hypothetically create invalid predictions, e.g., predict\r\n # that the start of the span is in the question. We throw out all\r\n # invalid predictions.\r\n if start_index >= len(feature.tokens):\r\n continue\r\n if end_index >= len(feature.tokens):\r\n continue\r\n if start_index not in feature.que_token_to_orig_map and start_index not in feature.doc_token_to_orig_map:\r\n continue\r\n if end_index not in feature.que_token_to_orig_map and start_index not in feature.doc_token_to_orig_map:\r\n continue\r\n if end_index < start_index:\r\n continue\r\n length = end_index - start_index + 1\r\n if length > max_answer_length:\r\n continue\r\n\r\n start_logit = result['start_logits'][start_index]\r\n end_logit = result['end_logits'][end_index]\r\n heuristic_logit = start_logit + end_logit \\\r\n - length_heuristic * (end_index - start_index + 1)\r\n prelim_predictions.append(\r\n _PrelimPrediction(\r\n start_index=start_index,\r\n end_index=end_index,\r\n start_logit=start_logit,\r\n end_logit=end_logit,\r\n rerank_logit=0,\r\n heuristic_logit=heuristic_logit))\r\n\r\n prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.heuristic_logit), reverse=True)\r\n\r\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"start_index\", \"end_index\", \"rerank_logit\", \"heuristic_logit\"])\r\n\r\n seen_predictions = {}\r\n nbest = []\r\n for i, pred_i in enumerate(prelim_predictions):\r\n if len(nbest) >= n_best_size:\r\n break\r\n\r\n final_text = wrapped_get_final_text(example, feature, pred_i.start_index, pred_i.end_index,\r\n do_lower_case, verbose_logging, logger)\r\n if final_text in seen_predictions or final_text is None:\r\n continue\r\n\r\n seen_predictions[final_text] = True\r\n nbest.append(\r\n _NbestPrediction(\r\n text=final_text,\r\n start_logit=pred_i.start_logit,\r\n end_logit=pred_i.end_logit,\r\n start_index=pred_i.start_index,\r\n end_index=pred_i.end_index,\r\n rerank_logit=pred_i.rerank_logit,\r\n heuristic_logit=pred_i.heuristic_logit\r\n ))\r\n\r\n # filter out redundant candidates\r\n if (i + 1) < len(prelim_predictions):\r\n indexes = []\r\n for j, pred_j in enumerate(prelim_predictions[(i + 1):]):\r\n filter_text = wrapped_get_final_text(example, feature, pred_j.start_index, pred_j.end_index,\r\n do_lower_case, verbose_logging, logger)\r\n if filter_text is None:\r\n indexes.append(i + j + 1)\r\n else:\r\n if calculate_f1(final_text, filter_text) > 0:\r\n indexes.append(i + j + 1)\r\n [prelim_predictions.pop(index - k) for k, index in enumerate(indexes)]\r\n\r\n # In very rare edge cases we could have no valid predictions. So we\r\n # just create a nonce prediction in this case to avoid failure.\r\n if not nbest:\r\n nbest.append(\r\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, start_index=0.0, end_index=0.0,\r\n rerank_logit=0., heuristic_logit=0.))\r\n\r\n assert len(nbest) >= 1\r\n\r\n for i, entry in enumerate(nbest):\r\n if i > number_of_spans:\r\n break\r\n predicted_answers.append(entry.text)\r\n output = collections.OrderedDict()\r\n output[\"text\"] = entry.text\r\n output[\"type\"] = \"span_extraction\"\r\n nbest_json.append(output)\r\n else:\r\n raise ValueError(f\"Unsupported answer ability: {predicted_ability_str}\")\r\n\r\n assert len(nbest_json) >= 1 and len(predicted_answers) >= 1\r\n if example.answer_annotations:\r\n drop_metrics(predicted_answers, example.answer_annotations)\r\n all_nbest_json[example.qas_id] = nbest_json\r\n\r\n exact_match, f1_score = drop_metrics.get_metric(reset=True)\r\n return all_nbest_json, {'em': exact_match, 'f1': f1_score}\r\n\r\n\r\nclass ListBatcher(object):\r\n def get_epoch(self, data: List):\r\n raise NotImplementedError()\r\n\r\n def get_batch_size(self):\r\n \"\"\" Return the batch size \"\"\"\r\n raise NotImplementedError()\r\n\r\n def epoch_size(self, n_elements):\r\n raise NotImplementedError()\r\n\r\n\r\nclass ExampleLenKey(object):\r\n def __call__(self, d: DropExample):\r\n return len(d.passage_tokens) + len(d.question_tokens)\r\n\r\n\r\nclass FeatureLenKey(object):\r\n def __call__(self, d: InputFeatures):\r\n return len(d.input_ids)\r\n\r\n\r\nclass ClusteredBatcher(ListBatcher):\r\n def __init__(self,\r\n batch_size: int,\r\n clustering: Callable,\r\n truncate_batches=False):\r\n self.batch_size = batch_size\r\n self.clustering = clustering\r\n self.truncate_batches = truncate_batches\r\n\r\n def get_batch_size(self):\r\n return self.batch_size\r\n\r\n def get_epoch(self, data: List):\r\n data = sorted(data, key=self.clustering)\r\n n_batches = len(data) // self.batch_size\r\n intervals = [(i * self.batch_size, (i + 1) * self.batch_size) for i in range(0, n_batches)]\r\n remainder = len(data) % self.batch_size\r\n if self.truncate_batches and remainder > 0:\r\n intervals.append((len(data) - remainder, len(data)))\r\n np.random.shuffle(intervals)\r\n for i, j in intervals:\r\n yield data[i:j]\r\n\r\n def epoch_size(self, n_elements):\r\n size = n_elements // self.batch_size\r\n if self.truncate_batches and (n_elements % self.batch_size) > 0:\r\n size += 1\r\n return size\r\n\r\n\r\nclass FixedOrderBatcher(ListBatcher):\r\n def __init__(self, batch_size: int, truncate_batches=False):\r\n self.batch_size = batch_size\r\n self.truncate_batches = truncate_batches\r\n\r\n def get_batch_size(self):\r\n return self.batch_size\r\n\r\n def get_epoch(self, data: List):\r\n n_batches = len(data) // self.batch_size\r\n for i in range(n_batches):\r\n yield data[i*self.batch_size:(i + 1)*self.batch_size]\r\n if self.truncate_batches and (len(data) % self.batch_size) > 0:\r\n yield data[self.batch_size * (len(data) // self.batch_size):]\r\n\r\n def epoch_size(self, n_elements):\r\n size = n_elements // self.batch_size\r\n if self.truncate_batches and (n_elements % self.batch_size) > 0:\r\n size += 1\r\n return size\r\n\r\n\r\ndef get_tensors_list(batch, is_train, gra_acc_steps, max_seq_length):\r\n input_len = np.array([len(feature.input_ids) for feature in batch], dtype='int32')\r\n max_input_len = input_len.max()\r\n mini_batch_size = int(len(batch) / gra_acc_steps)\r\n\r\n batchs_list, tensors_list = [], []\r\n if max_input_len > max_seq_length / gra_acc_steps and mini_batch_size > 0:\r\n mini_batching = ClusteredBatcher(mini_batch_size, FeatureLenKey(), truncate_batches=True)\r\n for mini_batch in mini_batching.get_epoch(batch):\r\n tensors_list.append(get_tensors(mini_batch, is_train))\r\n batchs_list.append(mini_batch)\r\n else:\r\n tensors_list.append(get_tensors(batch, is_train))\r\n batchs_list.append(batch)\r\n return batchs_list, tensors_list\r\n\r\n\r\ndef get_tensors(batch, is_train):\r\n input_len = np.array([len(feature.input_ids) for feature in batch], dtype='int32')\r\n max_input_len = input_len.max()\r\n\r\n number_indices_len = np.array([len(feature.number_indices) for feature in batch], dtype='int32')\r\n max_number_indices_len = number_indices_len.max()\r\n\r\n if is_train:\r\n start_indices_len = np.array([len(feature.start_indices) for feature in batch], dtype='int32')\r\n max_start_indices_len = start_indices_len.max()\r\n\r\n input_counts_len = np.array([len(feature.input_counts) for feature in batch], dtype='int32')\r\n max_input_counts_len = input_counts_len.max()\r\n\r\n number_of_answers_len = np.array([len(feature.number_of_answers) for feature in batch], dtype='int32')\r\n max_number_of_answers_len = number_of_answers_len.max()\r\n\r\n add_sub_combination_len, negation_combination_len = [], []\r\n for feature in batch:\r\n add_sub_combination_len.append(len(feature.add_sub_expressions))\r\n negation_combination_len.append(len(feature.negations))\r\n max_add_sub_combination_len = np.array(add_sub_combination_len).max()\r\n max_negation_combination_len = np.array(negation_combination_len).max()\r\n\r\n input_ids_list, input_mask_list, segment_ids_list, number_indices_list = [], [], [], []\r\n if is_train:\r\n start_indices_list, end_indices_list, number_of_answers_list, input_counts_list, add_sub_expressions_list, \\\r\n negations_list = [], [], [], [], [], []\r\n for feature in batch:\r\n input_ids = copy.deepcopy(feature.input_ids)\r\n input_mask = copy.deepcopy(feature.input_mask)\r\n segment_ids = copy.deepcopy(feature.segment_ids)\r\n # Zero-pad up to the max mini-batch sequence length.\r\n while len(input_ids) < max_input_len:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n\r\n input_ids_list.append(input_ids)\r\n input_mask_list.append(input_mask)\r\n segment_ids_list.append(segment_ids)\r\n\r\n number_indices = copy.deepcopy(feature.number_indices)\r\n while len(number_indices) < max_number_indices_len:\r\n number_indices.append(-1)\r\n\r\n number_indices_list.append(number_indices)\r\n\r\n if is_train:\r\n start_indices = copy.deepcopy(feature.start_indices)\r\n end_indices = copy.deepcopy(feature.end_indices)\r\n number_of_answers = copy.deepcopy(feature.number_of_answers)\r\n input_counts = copy.deepcopy(feature.input_counts)\r\n add_sub_expressions = copy.deepcopy(feature.add_sub_expressions)\r\n negations = copy.deepcopy(feature.negations)\r\n\r\n while len(start_indices) < max_start_indices_len:\r\n start_indices.append(-1)\r\n end_indices.append(-1)\r\n\r\n while len(input_counts) < max_input_counts_len:\r\n input_counts.append(-1)\r\n\r\n while len(number_of_answers) < max_number_of_answers_len:\r\n number_of_answers.append(-1)\r\n\r\n new_add_sub_expressions = []\r\n for add_sub_expression in add_sub_expressions:\r\n while len(add_sub_expression) < max_number_indices_len:\r\n add_sub_expression.append(-1)\r\n new_add_sub_expressions.append(add_sub_expression)\r\n\r\n while len(new_add_sub_expressions) < max_add_sub_combination_len:\r\n new_add_sub_expressions.append([-1] * max_number_indices_len)\r\n\r\n new_negations = []\r\n for negation in negations:\r\n while len(negation) < max_number_indices_len:\r\n negation.append(-1)\r\n new_negations.append(negation)\r\n\r\n while len(new_negations) < max_negation_combination_len:\r\n new_negations.append([-1] * max_number_indices_len)\r\n\r\n start_indices_list.append(start_indices)\r\n end_indices_list.append(end_indices)\r\n number_of_answers_list.append(number_of_answers)\r\n input_counts_list.append(input_counts)\r\n add_sub_expressions_list.append(new_add_sub_expressions)\r\n negations_list.append(new_negations)\r\n\r\n batch_input_ids = torch.tensor(input_ids_list, dtype=torch.long)\r\n batch_input_mask = torch.tensor(input_mask_list, dtype=torch.long)\r\n batch_segment_ids = torch.tensor(segment_ids_list, dtype=torch.long)\r\n batch_number_indices = torch.tensor(number_indices_list, dtype=torch.long)\r\n\r\n if is_train:\r\n batch_start_indices = torch.tensor(start_indices_list, dtype=torch.long)\r\n batch_end_indices = torch.tensor(end_indices_list, dtype=torch.long)\r\n batch_number_of_answers = torch.tensor(number_of_answers_list, dtype=torch.long)\r\n batch_input_counts = torch.tensor(input_counts_list, dtype=torch.long)\r\n batch_add_sub_expressions = torch.tensor(add_sub_expressions_list, dtype=torch.long)\r\n batch_negations = torch.tensor(negations_list, dtype=torch.long)\r\n return batch_input_ids, batch_input_mask, batch_segment_ids, batch_number_indices, batch_start_indices, \\\r\n batch_end_indices, batch_number_of_answers, batch_input_counts, batch_add_sub_expressions, batch_negations\r\n else:\r\n return batch_input_ids, batch_input_mask, batch_segment_ids, batch_number_indices\r\n" ]
[ [ "numpy.array", "torch.tensor", "numpy.argmax", "numpy.random.shuffle" ] ]
archon159/elsa
[ "b9e680bd972bcf9630bc8465e33abe42d3824c85" ]
[ "pretrain/datasets/datasets.py" ]
[ "import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data.dataset import Subset\nfrom torchvision import datasets, transforms\nimport json\nfrom utils.utils import set_random_seed\n\nDATA_PATH = '~/data/'\nIMAGENET_PATH = '~/data/ImageNet'\n\n\nCIFAR10_SUPERCLASS = list(range(10)) # one class\nIMAGENET_SUPERCLASS = list(range(30)) # one class\n\nCIFAR100_SUPERCLASS = [\n [4, 31, 55, 72, 95],\n [1, 33, 67, 73, 91],\n [54, 62, 70, 82, 92],\n [9, 10, 16, 29, 61],\n [0, 51, 53, 57, 83],\n [22, 25, 40, 86, 87],\n [5, 20, 26, 84, 94],\n [6, 7, 14, 18, 24],\n [3, 42, 43, 88, 97],\n [12, 17, 38, 68, 76],\n [23, 34, 49, 60, 71],\n [15, 19, 21, 32, 39],\n [35, 63, 64, 66, 75],\n [27, 45, 77, 79, 99],\n [2, 11, 36, 46, 98],\n [28, 30, 44, 78, 93],\n [37, 50, 65, 74, 80],\n [47, 52, 56, 59, 96],\n [8, 13, 48, 58, 90],\n [41, 69, 81, 85, 89],\n]\n\n\nclass MultiDataTransform(object):\n def __init__(self, transform):\n self.transform1 = transform\n self.transform2 = transform\n\n def __call__(self, sample):\n x1 = self.transform1(sample)\n x2 = self.transform2(sample)\n return x1, x2\n\n\nclass MultiDataTransformList(object):\n def __init__(self, transform, clean_trasform, sample_num):\n self.transform = transform\n self.clean_transform = clean_trasform\n self.sample_num = sample_num\n\n def __call__(self, sample):\n set_random_seed(0)\n\n sample_list = []\n for i in range(self.sample_num):\n sample_list.append(self.transform(sample))\n\n return sample_list, self.clean_transform(sample)\n\n\ndef get_transform(image_size=None):\n # Note: data augmentation is implemented in the layers\n # Hence, we only define the identity transformation here\n if image_size: # use pre-specified image size\n train_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.ToTensor(),\n ])\n else: # use default image size\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n test_transform = transforms.ToTensor()\n\n return train_transform, test_transform\n\n\ndef get_subset_with_len(dataset, length, shuffle=False):\n set_random_seed(0)\n dataset_size = len(dataset)\n\n index = np.arange(dataset_size)\n if shuffle:\n np.random.shuffle(index)\n\n index = torch.from_numpy(index[0:length])\n subset = Subset(dataset, index)\n\n assert len(subset) == length\n\n return subset\n\n\ndef get_transform_imagenet():\n\n train_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n train_transform = MultiDataTransform(train_transform)\n\n return train_transform, test_transform\n\n\ndef get_dataset(P, dataset, test_only=False, image_size=None, download=False, eval=False):\n if dataset in ['imagenet', 'cub', 'stanford_dogs', 'flowers102',\n 'places365', 'food_101', 'caltech_256', 'dtd', 'pets']:\n if eval:\n train_transform, test_transform = get_simclr_eval_transform_imagenet(P.ood_samples,\n P.resize_factor, P.resize_fix)\n else:\n train_transform, test_transform = get_transform_imagenet()\n else:\n train_transform, test_transform = get_transform(image_size=image_size)\n\n if dataset == 'cifar10':\n image_size = (32, 32, 3)\n n_classes = 10\n train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.CIFAR10(DATA_PATH, train=False, download=download, transform=test_transform)\n\n elif dataset == 'cifar100':\n image_size = (32, 32, 3)\n n_classes = 100\n train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.CIFAR100(DATA_PATH, train=False, download=download, transform=test_transform)\n\n elif dataset == 'svhn':\n assert test_only and image_size is not None\n test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)\n\n elif dataset == 'lsun_resize':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'LSUN_resize')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'lsun_fix':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'LSUN_fix')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet_resize':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'Imagenet_resize')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet_fix':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'Imagenet_fix')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet':\n image_size = (224, 224, 3)\n n_classes = 30\n train_dir = os.path.join(IMAGENET_PATH, 'one_class_train')\n test_dir = os.path.join(IMAGENET_PATH, 'one_class_test')\n train_set = datasets.ImageFolder(train_dir, transform=train_transform)\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'stanford_dogs':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'stanford_dogs')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'cub':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'cub200')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'flowers102':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'flowers102')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'places365':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'places365')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'food_101':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'food-101', 'images')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'caltech_256':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'caltech-256')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'dtd':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'dtd', 'images')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'pets':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'pets')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n else:\n raise NotImplementedError()\n\n if test_only:\n return test_set\n else:\n return train_set, test_set, image_size, n_classes\n\n\ndef get_superclass_list(dataset):\n if dataset == 'cifar10':\n return CIFAR10_SUPERCLASS\n elif dataset == 'cifar100':\n return CIFAR100_SUPERCLASS\n elif dataset == 'imagenet':\n return IMAGENET_SUPERCLASS\n else:\n raise NotImplementedError()\n\n\ndef get_subclass_dataset(dataset, classes):\n if not isinstance(classes, list):\n classes = [classes]\n\n indices = []\n for idx, tgt in enumerate(dataset.targets):\n if tgt in classes:\n indices.append(idx)\n\n dataset = Subset(dataset, indices)\n return dataset\n\ndef get_subclass_contaminated_dataset(dataset, normal_classes, known_outlier_classes, ratio_known_normal, ratio_known_outlier, ratio_pollution):\n \n \n outlier_classes = list(set(dataset.targets))\n \n for normal_cls in normal_classes:\n outlier_classes.remove(normal_cls)\n \n idx_normal = np.argwhere(np.isin(dataset.targets, normal_classes)).flatten()\n idx_outlier = np.argwhere(np.isin(dataset.targets, outlier_classes)).flatten()\n idx_known_outlier_candidates = np.argwhere(np.isin(dataset.targets, known_outlier_classes)).flatten()\n\n n_normal = len(idx_normal)\n\n # Solve system of linear equations to obtain respective number of samples\n a = np.array([[1, 1, 0, 0],\n [(1-ratio_known_normal), -ratio_known_normal, -ratio_known_normal, -ratio_known_normal],\n [-ratio_known_outlier, -ratio_known_outlier, -ratio_known_outlier, (1-ratio_known_outlier)],\n [0, -ratio_pollution, (1-ratio_pollution), 0]])\n b = np.array([n_normal, 0, 0, 0])\n x = np.linalg.solve(a, b)\n\n # Get number of samples\n n_known_normal = int(x[0])\n n_unlabeled_normal = int(x[1])\n n_unlabeled_outlier = int(x[2])\n n_known_outlier = int(x[3])\n \n print(\"# of known normal: \", n_known_normal)\n print(\"# of known outlier: \", n_known_outlier)\n\n # Sample indices\n perm_normal = np.random.permutation(n_normal)\n perm_outlier = np.random.permutation(len(idx_outlier))\n perm_known_outlier = np.random.permutation(len(idx_known_outlier_candidates))\n\n idx_known_normal = idx_normal[perm_normal[:n_known_normal]].tolist()\n idx_unlabeled_normal = idx_normal[perm_normal[n_known_normal:n_known_normal+n_unlabeled_normal]].tolist()\n idx_unlabeled_outlier = idx_outlier[perm_outlier[:n_unlabeled_outlier]].tolist()\n idx_known_outlier = idx_known_outlier_candidates[perm_known_outlier[:n_known_outlier]].tolist()\n\n # Get original class labels\n labels_known_normal = np.array(dataset.targets)[idx_known_normal].tolist() \n labels_unlabeled_normal = np.array(dataset.targets)[idx_unlabeled_normal].tolist()\n labels_unlabeled_outlier = np.array(dataset.targets)[idx_unlabeled_outlier].tolist()\n labels_known_outlier = np.array(dataset.targets)[idx_known_outlier].tolist()\n\n\n # Get semi-supervised setting labels\n semi_labels_known_normal = np.ones(n_known_normal).astype(np.int32).tolist()\n semi_labels_unlabeled_normal = np.zeros(n_unlabeled_normal).astype(np.int32).tolist()\n semi_labels_unlabeled_outlier = np.zeros(n_unlabeled_outlier).astype(np.int32).tolist()\n semi_labels_known_outlier = (-np.ones(n_known_outlier).astype(np.int32)).tolist()\n\n # Create final lists\n list_idx = idx_known_normal + idx_unlabeled_normal + idx_unlabeled_outlier + idx_known_outlier\n list_labels = labels_known_normal + labels_unlabeled_normal + labels_unlabeled_outlier + labels_known_outlier\n list_semi_labels = (semi_labels_known_normal + semi_labels_unlabeled_normal + semi_labels_unlabeled_outlier\n + semi_labels_known_outlier)\n print(\"# of training set: \", len(list_idx))\n \n dataset = Subset(dataset, list_idx)\n dataset.targets = list_semi_labels\n \n return dataset\n\n\ndef get_simclr_eval_transform_imagenet(sample_num, resize_factor, resize_fix):\n\n resize_scale = (resize_factor, 1.0) # resize scaling factor\n if resize_fix: # if resize_fix is True, use same scale\n resize_scale = (resize_factor, resize_factor)\n\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224, scale=resize_scale),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n\n clean_trasform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n transform = MultiDataTransformList(transform, clean_trasform, sample_num)\n\n return transform, transform\n\n\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.permutation", "numpy.ones", "numpy.random.shuffle", "torch.from_numpy", "numpy.arange", "numpy.linalg.solve", "torch.utils.data.dataset.Subset", "numpy.isin" ] ]
JIABI/GhostShiftAddNet
[ "870c38248fa1df23ec1262b6690e20c437d1d5d4" ]
[ "adder/check.py" ]
[ "import torch as tt\nfrom torch import nn\nfrom torch.utils.cpp_extension import load\nimport torch.nn.functional as F\n\n#adder_cuda = load(\n# 'adder_cuda', ['adder_cuda.cpp', 'adder_cuda_kernel.cu'], verbose=True)\n\nimport adder, quantize\n#from .adder import Adder2D\n#from .adder_slow import adder2d, adder2d_function\nfrom adder import Adder2D\nfrom adder_slow import adder2d, adder2d_function\n# help(adder_cuda)``1 1`\n\ndef check_forward():\n batch_size = 1\n in_channels = 64\n out_channels = 64\n # in_channels = 1\n # out_channels = 1\n in_size = 256\n # in_size = 3\n kernel_size = 3\n padding = 1\n stride = 1\n out_size = (in_size + 2 * padding - kernel_size) // stride + 1\n print(out_size)\n\n input = tt.randn(batch_size, in_channels, in_size, in_size).cuda()\n weight = tt.randn(out_channels, in_channels, kernel_size, kernel_size).cuda()\n bias = tt.randn(out_channels).cuda()\n # output = tt.randn(batch_size, out_channels, out_size, out_size).cuda()\n\n adder_ref = adder2d(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True).cuda()\n adder_ref.adder.data.copy_(weight)\n adder_ref.b.data.copy_(bias)\n\n adder = Adder2D(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True,\n eta = 0.2).cuda()\n adder.weight.data.copy_(weight)\n adder.b.data.copy_(bias)\n\n # adder_cuda.forward(input,\n # weight,\n # # bias,\n # output,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n adder(input)\n adder_ref(input)\n input.clone()\n weight.clone()\n # output.clone()\n\n # print(input)\n # print(weight)\n # print(\"our output: \", output)\n # out_ref = adder2d_function(input, weight, stride, padding)\n # print(\"addernet ref: \", out_ref)\n # print(\"by hand no bias: \", -(input - weight).abs().sum())\n # print(F.conv2d(input, weight, bias, padding=padding))\n # out_ref = F.conv2d(input, weight, bias, padding=padding)\n\n import time\n\n time_b = time.time()\n # adder_cuda.forward(input,\n # weight,\n # # bias,\n # output,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output = adder(input)\n time_e = time.time()\n print(\"hand_written_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n time_b = time.time()\n # out_ref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n out_ref = adder_ref(input)\n time_e = time.time()\n print(\"builtin_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n print(\"max error: {:.3e}\".format(float((out_ref - output).abs().max())))\n\n time_b = time.time()\n # adder_cuda.forward(input,\n # weight,\n # # bias,\n # output,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output = adder(input)\n time_e = time.time()\n print(\"hand_written_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n time_b = time.time()\n # out_ref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n out_ref = adder_ref(input)\n time_e = time.time()\n print(\"builtin_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n print(\"max error: {:.3e}\".format(float((out_ref - output).abs().max())))\n\n\ndef check_grad_in():\n batch_size = 1\n in_channels = 64\n out_channels = 64\n in_size = 128\n kernel_size = 3\n padding = 1\n stride = 1\n # batch_size = 1\n # in_channels = 1\n # out_channels = 1\n # in_size = 2\n # kernel_size = 2\n # padding = 0\n # stride = 1\n out_size = (in_size + 2 * padding - kernel_size) // stride + 1\n print(out_size)\n\n input = tt.randn(batch_size, in_channels, in_size, in_size).cuda()\n grad_input = tt.randn(batch_size, in_channels, in_size, in_size).cuda()\n input.requires_grad = True\n weight = tt.randn(out_channels, in_channels, kernel_size, kernel_size).cuda()\n weight.requires_grad = True\n bias = tt.randn(out_channels).cuda()\n grad_output = tt.randn(batch_size, out_channels, out_size, out_size).cuda()\n\n adder_ref = adder2d(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True).cuda()\n adder_ref.adder.data.copy_(weight)\n adder_ref.b.data.copy_(bias)\n\n adder = Adder2D(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True,\n eta = 0.2).cuda()\n adder.weight.data.copy_(weight)\n adder.b.data.copy_(bias)\n\n # outref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n out_ref = adder_ref(input)\n out_ref.backward(grad_output)\n\n grad_clone = input.grad.clone()\n input.grad.zero_()\n\n # adder_cuda.backward_input(grad_output,\n # input,\n # weight,\n # grad_input,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output = adder(input)\n output.backward(grad_output)\n grad_input = input.grad.clone()\n\n # print(\"input\")\n # print(input)\n # print(\"weight ref\")\n # print(adder_ref.adder)\n # print(\"weight our\")\n # print(adder.weight)\n # print(\"output ref\")\n # print(out_ref)\n # print(\"output our\")\n # print(output)\n # print(\"grad output\")\n # print(grad_output)\n # print(\"grad_in ref\")\n # print(grad_clone)\n # print(\"grad_in our\")\n # print(grad_input)\n\n print(((grad_clone - grad_input)).abs().max())\n\n\ndef check_grad_weight():\n batch_size = 1\n in_channels = 6\n out_channels = 6\n in_size = 128\n kernel_size = 3\n padding = 1\n stride = 1\n # batch_size = 1\n # in_channels = 1\n # out_channels = 1\n # in_size = 1\n # kernel_size = 1\n # padding = 0\n # stride = 1\n out_size = (in_size + 2 * padding - kernel_size) // stride + 1\n print(out_size)\n\n input = tt.randn(batch_size, in_channels, in_size, in_size).cuda()\n input.requires_grad = True\n weight = tt.randn(out_channels, in_channels, kernel_size, kernel_size).cuda()\n weight.requires_grad = True\n bias = tt.randn(out_channels).cuda()\n grad_output = tt.randn(batch_size, out_channels, out_size, out_size).cuda()\n\n adder_ref = adder2d(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True).cuda()\n adder_ref.adder.data.copy_(weight)\n adder_ref.b.data.copy_(bias)\n\n adder = Adder2D(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n bias = True,\n eta = 0.2).cuda()\n adder.weight.data.copy_(weight)\n adder.b.data.copy_(bias)\n\n # outref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n out_ref = adder_ref(input)\n out_ref.backward(grad_output, retain_graph=True)\n grad_clone = adder_ref.adder.grad.clone()\n adder_ref.adder.grad.zero_()\n\n # grad_weight = weight.clone()\n # adder_cuda.backward_weight(grad_output,\n # input,\n # weight,\n # grad_weight,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output = adder(input)\n output.backward(grad_output, retain_graph=True)\n grad_weight = adder.weight.grad.clone()\n adder.weight.grad.zero_()\n\n # print(\"input\")\n # print(input)\n # print(\"weight\")\n # print(weight)\n # print(\"output ref\")\n # print(out_ref)\n # # print(\"output our\", output)\n # print(\"grad output\")\n # print(grad_output)\n # print(\"grad_weight ref\")\n # print(grad_clone)\n # print(\"grad_weight our\")\n # print(grad_weight)\n\n eps = 1e-6\n print(((grad_clone - grad_weight) / (grad_clone.abs() + eps)).abs().max())\n\n import time\n time_b = time.time()\n # adder_cuda.backward_weight(grad_output,\n # input,\n # weight,\n # grad_weight,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"hand_written_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n # outref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n time_b = time.time()\n out_ref.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"builtin_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n time_b = time.time()\n # adder_cuda.backward_weight(grad_output,\n # input,\n # weight,\n # grad_weight,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"hand_written_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n # outref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n time_b = time.time()\n out_ref.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"builtin_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n time_b = time.time()\n # adder_cuda.backward_weight(grad_output,\n # input,\n # weight,\n # grad_weight,\n # kernel_size, kernel_size,\n # stride, stride,\n # padding, padding)\n output.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"hand_written_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n # outref = F.conv2d(input, weight, bias, padding=padding)\n # out_ref = adder2d_function(input, weight, stride, padding)\n time_b = time.time()\n out_ref.backward(grad_output, retain_graph=True)\n time_e = time.time()\n print(\"builtin_conv: {:.4f}us\".format((time_e - time_b) * 1e6))\n\n\ndef check_naive_clone():\n batch_size = 1\n in_channels = 1\n out_channels = 1\n in_size = 3\n kernel_size = 1\n padding = 0\n stride = 1\n out_size = (in_size + 2 * padding - kernel_size) // stride + 1\n print(out_size)\n\n input = tt.randn(batch_size, in_channels, in_size, in_size).cuda()\n weight = tt.randn(out_channels, in_channels, kernel_size, kernel_size).cuda()\n # bias = tt.randn(out_channels).cuda()\n output = tt.randn(batch_size, out_channels, out_size, out_size).cuda()\n\n result = adder_cuda.forward(input,\n weight,\n # bias,\n output,\n kernel_size, kernel_size,\n stride, stride,\n padding, padding)\n print(result)\n input.clone()\n weight.clone()\n # bias.clone()\n output.clone()\n\n # F.conv2d(input, weight, bias, padding=padding)\n\n # input.clone()\n\n\nif __name__ == '__main__':\n check_forward()\n check_grad_in()\n check_grad_weight()\n # check_naive_clone()\n" ]
[ [ "torch.randn" ] ]
hangpy/tacotron
[ "35c8873d088a11b624add9ad65ffb108ee4e4a0f" ]
[ "train.py" ]
[ "import argparse\nfrom datetime import datetime\nimport math\nimport os\nimport subprocess\nimport time\nimport tensorflow as tf\nimport traceback\n\nfrom datasets.datafeeder import DataFeeder\nfrom hparams import hparams, hparams_debug_string\nfrom models import create_model\nfrom text import sequence_to_text\nfrom util import audio, infolog, plot, ValueWindow\nlog = infolog.log\n\n\ndef get_git_commit():\n subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean\n commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]\n log('Git commit: %s' % commit)\n return commit\n\n\ndef add_stats(model):\n with tf.variable_scope('stats') as scope:\n tf.summary.histogram('linear_outputs', model.linear_outputs)\n tf.summary.histogram('linear_targets', model.linear_targets)\n tf.summary.histogram('mel_outputs', model.mel_outputs)\n tf.summary.histogram('mel_targets', model.mel_targets)\n tf.summary.scalar('loss_mel', model.mel_loss)\n tf.summary.scalar('loss_linear', model.linear_loss)\n tf.summary.scalar('learning_rate', model.learning_rate)\n tf.summary.scalar('loss', model.loss)\n gradient_norms = [tf.norm(grad) for grad in model.gradients]\n tf.summary.histogram('gradient_norm', gradient_norms)\n tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))\n return tf.summary.merge_all()\n\n\ndef time_string():\n return datetime.now().strftime('%Y-%m-%d %H:%M')\n\n\ndef train(log_dir, args):\n commit = get_git_commit() if args.git else 'None'\n checkpoint_path = os.path.join(log_dir, 'model.ckpt')\n # args.input tacotron\\\\assets\n if(args.target == 'benedict'):\n target_voice = 'Benedict'\n elif(args.target == 'ljspeech'):\n target_voice = 'LJSpeech-1.1'\n elif(args.target == 'blizzard'):\n target_voice = 'Blizzard2012'\n elif (args.target == 'test'):\n target_voice = 'Test'\n\n input_path = os.path.join(args.base_dir, args.input, target_voice, 'training', 'train.txt')\n log('Checkpoint path: %s' % checkpoint_path)\n log('Loading training data from: %s' % input_path)\n log('Using model: %s' % args.model)\n log(hparams_debug_string())\n\n # Set up DataFeeder:\n coord = tf.train.Coordinator()\n with tf.variable_scope('datafeeder') as scope:\n feeder = DataFeeder(coord, input_path, hparams)\n\n # Set up model:\n global_step = tf.Variable(0, name='global_step', trainable=False)\n with tf.variable_scope('model') as scope:\n model = create_model(args.model, hparams)\n model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)\n model.add_loss()\n model.add_optimizer(global_step)\n stats = add_stats(model)\n\n # Bookkeeping:\n step = 0\n time_window = ValueWindow(100)\n loss_window = ValueWindow(100)\n saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)\n\n # Train!\n with tf.Session() as sess:\n try:\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n sess.run(tf.global_variables_initializer())\n\n if args.restore_step:\n # Restore from a checkpoint if the user requested it.\n restore_path = '%s-%d' % (checkpoint_path, args.restore_step)\n saver.restore(sess, restore_path)\n log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)\n else:\n log('Starting new training run at commit: %s' % commit, slack=True)\n\n feeder.start_in_session(sess)\n\n while not coord.should_stop():\n start_time = time.time()\n step, loss, opt = sess.run([global_step, model.loss, model.optimize])\n time_window.append(time.time() - start_time)\n loss_window.append(loss)\n message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (\n step, time_window.average, loss, loss_window.average)\n log(message, slack=(step % args.checkpoint_interval == 0))\n\n if loss > 100 or math.isnan(loss):\n log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)\n raise Exception('Loss Exploded')\n\n if step % args.summary_interval == 0:\n log('Writing summary at step: %d' % step)\n summary_writer.add_summary(sess.run(stats), step)\n\n if step % args.checkpoint_interval == 0:\n log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))\n saver.save(sess, checkpoint_path, global_step=step)\n log('Saving audio and alignment...')\n input_seq, spectrogram, alignment = sess.run([\n model.inputs[0], model.linear_outputs[0], model.alignments[0]])\n waveform = audio.inv_spectrogram(spectrogram.T)\n audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))\n plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),\n info='%s, %s, %s, step=%d, loss=%.5f' % (args.model, commit, time_string(), step, loss))\n log('Input: %s' % sequence_to_text(input_seq))\n\n except Exception as e:\n log('Exiting due to exception: %s' % e, slack=True)\n traceback.print_exc()\n coord.request_stop(e)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--base_dir', default=os.path.abspath(''))\n parser.add_argument('--input', default='assets')\n parser.add_argument('--target', required=True, choices=['ljspeech', 'benedict', 'blizzard', 'test'])\n parser.add_argument('--model', default='tacotron')\n parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')\n parser.add_argument('--hparams', default='',\n help='Hyperparameter overrides as a comma-separated list of name=value pairs')\n parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')\n parser.add_argument('--summary_interval', type=int, default=100,\n help='Steps between running summary ops.')\n parser.add_argument('--checkpoint_interval', type=int, default=1000,\n help='Steps between writing checkpoints.')\n parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')\n parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')\n parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')\n args = parser.parse_args()\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)\n run_name = args.name or args.model\n # recording directory\n log_dir = os.path.join(args.base_dir, 'logs-%s' % args.target)\n os.makedirs(log_dir, exist_ok=True)\n infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)\n hparams.parse(args.hparams)\n train(log_dir, args)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.train.Coordinator", "tensorflow.summary.scalar", "tensorflow.summary.histogram", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.Variable", "tensorflow.norm", "tensorflow.variable_scope", "tensorflow.reduce_max", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.global_variables_initializer" ] ]
vshulyak/simd-structts
[ "c06972fc2395fffb559376b1d198135ad551eba5" ]
[ "src/simd_structts/base/model.py" ]
[ "import numpy as np\nfrom statsmodels.tsa.statespace.tools import companion_matrix\n\n\nclass BaseModel:\n \"\"\"A base for all models which takes care of all initialization\n procedures.\"\"\"\n\n def __init__(\n self,\n endog,\n level=False,\n trend=False,\n seasonal=None,\n freq_seasonal=None,\n cycle=False,\n autoregressive=None,\n exog=None,\n irregular=False,\n stochastic_level=False,\n stochastic_trend=False,\n stochastic_seasonal=True,\n stochastic_freq_seasonal=None,\n stochastic_cycle=False,\n damped_cycle=False,\n cycle_period_bounds=None,\n mle_regression=True,\n use_exact_diffuse=False,\n ):\n self.endog = endog\n\n # cycle NA\n assert cycle is False\n assert stochastic_cycle is False\n assert damped_cycle is False\n assert cycle_period_bounds is None\n # autoregressive NA\n assert autoregressive is None\n # irregular NA\n assert irregular is False\n\n assert exog is None or exog.ndim in (2, 3)\n assert (\n mle_regression is False\n ), \"MLE is not supported for estimating params currently\"\n\n self.mle_regression = mle_regression\n\n # Model options\n self.level = level\n self.trend = trend\n self.seasonal_periods = seasonal if seasonal is not None else 0\n self.seasonal = self.seasonal_periods > 0\n if freq_seasonal:\n self.freq_seasonal_periods = [d[\"period\"] for d in freq_seasonal]\n self.freq_seasonal_harmonics = [\n d.get(\"harmonics\", int(np.floor(d[\"period\"] / 2)))\n for d in freq_seasonal\n ]\n else:\n self.freq_seasonal_periods = []\n self.freq_seasonal_harmonics = []\n self.freq_seasonal = any(x > 0 for x in self.freq_seasonal_periods)\n self.cycle = cycle\n self.ar_order = autoregressive if autoregressive is not None else 0\n self.autoregressive = self.ar_order > 0\n self.irregular = irregular\n\n self.stochastic_level = stochastic_level\n self.stochastic_trend = stochastic_trend\n self.stochastic_seasonal = stochastic_seasonal\n if stochastic_freq_seasonal is None:\n self.stochastic_freq_seasonal = [True] * len(self.freq_seasonal_periods)\n else:\n if len(stochastic_freq_seasonal) != len(freq_seasonal):\n raise ValueError(\n \"Length of stochastic_freq_seasonal must equal length\"\n \" of freq_seasonal: {!r} vs {!r}\".format(\n len(stochastic_freq_seasonal), len(freq_seasonal)\n )\n )\n self.stochastic_freq_seasonal = stochastic_freq_seasonal\n self.stochastic_cycle = stochastic_cycle\n\n self.k_series = endog.shape[0]\n self.nobs = endog.shape[1]\n\n # Exogenous component\n if exog is None:\n self.k_exog = 0\n elif exog.ndim == 2:\n self.k_exog = exog.shape[1]\n elif exog.ndim == 3:\n self.k_exog = exog.shape[2]\n else:\n raise\n self.exog = exog\n\n self.regression = self.k_exog > 0\n\n # Model parameters\n self._k_seasonal_states = (self.seasonal_periods - 1) * self.seasonal\n self._k_freq_seas_states = (\n sum(2 * h for h in self.freq_seasonal_harmonics) * self.freq_seasonal\n )\n self._k_cycle_states = self.cycle * 2\n\n self.k_states = k_states = (\n self.level\n + self.trend\n + self._k_seasonal_states\n + self._k_freq_seas_states\n + self._k_cycle_states\n + self.ar_order\n + (not self.mle_regression) * self.k_exog\n )\n\n # initial SSM matrices\n self.transition = np.zeros((k_states, k_states))\n self.design = np.zeros((1, k_states))\n self.state_cov = np.zeros((self.k_series, k_states, k_states))\n\n # Initialized later\n self.obs_cov = np.array([[0.0]])\n\n # Initialized later\n self.initial_value = None\n self.initial_covariance = None\n\n self.setup()\n\n self.k_endog = 1\n self.selection = np.eye(self.k_states)[:, :, np.newaxis]\n self.obs_intercept = np.array([[0.0]])\n self.state_intercept = np.array([[0.0]] * self.k_states)\n self.time_invariant = self.design.ndim < 3\n\n \"\"\"\n A better definition if all matrices are time varying\n self.time_invariant = (\n self.design.shape[2] == 1 and\n self.obs_cov.shape[2] == 1 and\n self.transition.shape[2] == 1 and\n self.selection.shape[2] == 1 and\n self.state_cov.shape[2] == 1)\n \"\"\"\n\n def initialize_fixed(self, obs_cov=0, initial_state_cov=1e6):\n\n for series_idx in range(self.k_series):\n\n offset = 0\n\n # level\n self.state_cov[series_idx, offset, offset] = 1\n\n # trend\n if self.trend:\n offset += 1\n self.state_cov[series_idx, offset, offset] = 1\n\n # seasonal\n if self.seasonal:\n offset += 1\n self.state_cov[series_idx, offset, offset] = 1\n\n # account for added seasonal components\n offset += self._k_seasonal_states - 1\n\n # freq_seasonal\n for _ in range(self._k_freq_seas_states):\n offset += 1\n self.state_cov[series_idx, offset, offset] = 1\n\n self.obs_cov[0, 0] = obs_cov\n self.initial_value = np.zeros(self.k_states)\n self.initial_covariance = np.eye(self.k_states) * initial_state_cov\n\n def initialize_approx_diffuse(self, obs_cov=0, initial_state_cov=1e6):\n\n # sigma_epsilon = 2.0 # affects the measurement error\n # sigma_xi = 1.0 # affects the local level\n # sigma_omega = 1.0 # affects the seasonality\n # self.state_cov[0,0] = sigma_xi ** 2\n # self.state_cov[1,1] = sigma_omega ** 2\n # self.obs_cov[0,0] = sigma_epsilon ** 2\n\n from statsmodels.tsa.filters.hp_filter import hpfilter\n\n for series_idx in range(self.k_series):\n\n # Eliminate missing data to estimate starting parameters\n endog = self.endog[series_idx, :]\n exog = (\n self.exog[series_idx, ...]\n if self.exog is not None and self.exog.ndim == 3\n else self.exog\n )\n if np.any(np.isnan(endog)):\n mask = ~np.isnan(endog).squeeze()\n endog = endog[mask]\n if exog is not None:\n # WARN: currently unused\n exog = exog[mask]\n\n # Level / trend variances\n # (Use the HP filter to get initial estimates of variances)\n _start_params = {}\n\n resid, trend1 = hpfilter(endog)\n\n if self.stochastic_trend:\n cycle2, trend2 = hpfilter(trend1)\n _start_params[\"trend_var\"] = np.std(trend2) ** 2\n if self.stochastic_level:\n _start_params[\"level_var\"] = np.std(cycle2) ** 2\n elif self.stochastic_level:\n _start_params[\"level_var\"] = np.std(trend1) ** 2\n\n # The variance of the residual term can be used for all variances,\n # just to get something in the right order of magnitude.\n var_resid = np.var(resid)\n\n # Seasonal\n if self.stochastic_seasonal:\n _start_params[\"seasonal_var\"] = var_resid\n\n # Frequency domain seasonal\n if self.stochastic_freq_seasonal:\n _start_params[\"freq_seasonal_var\"] = var_resid\n\n offset = 0\n\n # level\n self.state_cov[series_idx, offset, offset] = _start_params[\"level_var\"]\n\n # trend\n if self.trend:\n offset += 1\n self.state_cov[series_idx, offset, offset] = _start_params[\"trend_var\"]\n\n # seasonal\n if self.seasonal:\n offset += 1\n self.state_cov[series_idx, offset, offset] = _start_params[\n \"seasonal_var\"\n ]\n\n # account for added seasonal components\n offset += self._k_seasonal_states - 1\n\n # freq_seasonal\n for _ in range(self._k_freq_seas_states):\n offset += 1\n self.state_cov[series_idx, offset, offset] = _start_params[\n \"freq_seasonal_var\"\n ]\n\n self.obs_cov[0, 0] = obs_cov\n self.initial_value = np.zeros(self.k_states)\n self.initial_covariance = np.eye(self.k_states) * initial_state_cov\n\n # self.state_cov = [self.state_cov, self.state_cov]\n # self.obs_cov = [self.obs_cov, self.obs_cov]\n\n def __str__(self):\n return (\n \"Transition:\\n\"\n + str(self.transition)\n + \"\\nDesign:\\n\"\n + str(self.design)\n + \"\\nState cov:\\n\"\n + str(self.state_cov)\n + \"\\nObs cov:\\n\"\n + str(self.obs_cov)\n )\n\n def setup(self):\n \"\"\"Setup the structural time series representation.\"\"\"\n # Initialize the ordered sets of parameters\n # self.parameters = {}\n # self.parameters_obs_intercept = {}\n # self.parameters_obs_cov = {}\n # self.parameters_transition = {}\n # self.parameters_state_cov = {}\n\n # Initialize the fixed components of the state space matrices,\n i = 0 # state offset\n j = 0 # state covariance offset\n\n # if self.irregular:\n # self.parameters_obs_cov['irregular_var'] = 1\n if self.level:\n self.design[0, i] = 1.0\n self.transition[i, i] = 1.0\n if self.trend:\n self.transition[i, i + 1] = 1.0\n if self.stochastic_level:\n # self.ssm['selection', i, j] = 1.\n # self.parameters_state_cov['level_var'] = 1\n j += 1\n i += 1\n if self.trend:\n self.transition[i, i] = 1.0\n if self.stochastic_trend:\n # self.ssm['selection', i, j] = 1.\n # self.parameters_state_cov['trend_var'] = 1\n j += 1\n i += 1\n if self.seasonal:\n n = self.seasonal_periods - 1\n self.design[0, i] = 1.0\n self.transition[i : i + n, i : i + n] = companion_matrix(\n np.r_[1, [1] * n]\n ).transpose()\n if self.stochastic_seasonal:\n # self.ssm['selection', i, j] = 1.\n # self.parameters_state_cov['seasonal_var'] = 1\n j += 1\n i += n\n if self.freq_seasonal:\n for ix, h in enumerate(self.freq_seasonal_harmonics):\n # These are the \\gamma_jt and \\gamma^*_jt terms in D&K (3.8)\n n = 2 * h\n p = self.freq_seasonal_periods[ix]\n lambda_p = 2 * np.pi / float(p)\n\n t = 0 # frequency transition matrix offset\n for block in range(1, h + 1):\n # ibid. eqn (3.7)\n self.design[0, i + t] = 1.0\n\n # ibid. eqn (3.8)\n cos_lambda_block = np.cos(lambda_p * block)\n sin_lambda_block = np.sin(lambda_p * block)\n trans = np.array(\n [\n [cos_lambda_block, sin_lambda_block],\n [-sin_lambda_block, cos_lambda_block],\n ]\n )\n trans_s = np.s_[i + t : i + t + 2]\n self.transition[trans_s, trans_s] = trans\n t += 2\n\n # freq_seasonal is always stochastic\n\n j += n\n i += n\n\n # exog regression\n if self.regression:\n\n # add exog to the design matrix (3d matrices are a special case in our KF)\n if self.exog.ndim == 2:\n self.design = np.repeat(\n self.design[np.newaxis, :, :], self.nobs, axis=0\n )\n self.design[:, 0, i : i + self.k_exog] = self.exog\n elif self.exog.ndim == 3:\n self.design = np.repeat(\n self.design[np.newaxis, :, :], self.nobs, axis=0\n )\n self.design = np.repeat(\n self.design[np.newaxis, :, :], self.k_series, axis=0\n )\n self.design[:, :, 0, i : i + self.k_exog] = self.exog\n else:\n raise\n\n self.transition[i : i + self.k_exog, i : i + self.k_exog] = np.eye(\n self.k_exog\n )\n\n i += self.k_exog\n\n def filter(self):\n raise NotImplementedError\n\n def smooth(self):\n raise NotImplementedError\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.sin", "numpy.zeros", "numpy.eye", "numpy.std", "numpy.cos", "numpy.repeat", "numpy.var", "numpy.floor" ] ]
taimir/pixel-rnn-lasagne
[ "98ad36800b0d6865f97c236db6dc51101d8b96e1" ]
[ "layers/diag_lstm.py" ]
[ "import theano\nimport theano.tensor as T\nimport lasagne\nfrom layers.skew import skew, unskew\n\n\nclass DiagLSTMLayer(lasagne.layers.Layer):\n def __init__(self, incoming, K_ss=lasagne.init.GlorotUniform(), backwards=False, **kwargs):\n super(DiagLSTMLayer, self).__init__(incoming, **kwargs)\n\n self.K_ss = self.add_param(\n K_ss, (self.input_shape[1], self.input_shape[1] // 4, 2), name=\"K_ss\")\n self.b = self.add_param(lasagne.init.Constant(0.), (1,),\n name=\"K_ss_bias\", regularizable=False)\n self.backwards = backwards\n\n def get_output_shape_for(self, input_shape):\n return input_shape[0], input_shape[1] // 4, input_shape[2], input_shape[3]\n\n def get_output_for(self, input_to_state, **kwargs):\n # skew the input in the right direction\n if self.backwards:\n input_to_state = input_to_state[:, :, :, ::-1]\n\n skewed = skew(input_to_state)\n K_ss = self.K_ss\n b = self.b\n batch_size = self.input_shape[0]\n in_chan_dim = self.input_shape[1] // 4\n height = self.input_shape[2]\n\n def process_column(x, c_prev, h_prev):\n # dim (batch_size x in_chan_dim x height)\n column_in = lasagne.layers.InputLayer(\n input_var=h_prev, shape=(batch_size, in_chan_dim, height))\n\n # OK, conv1d with filter_size (2,) puts the value at the second position of the conv.\n # Which is ok for me, as long as I process the columns from top to bottom.\n convolved_states = lasagne.layers.Conv1DLayer(incoming=column_in, num_filters=4 * in_chan_dim,\n filter_size=(2,),\n W=K_ss, b=b,\n pad=\"full\",\n nonlinearity=lasagne.nonlinearities.identity,\n flip_filters=False)\n convolved_states = lasagne.layers.get_output(convolved_states)\n # \"full\" adds one unneeded element at the end for filter_size=2\n convolved_states = convolved_states[:, :, :-1]\n\n # the input x is already convolved at this point\n lstm_parts = convolved_states + x\n\n o = T.nnet.sigmoid(lstm_parts[:, 0:in_chan_dim])\n f = T.nnet.sigmoid(lstm_parts[:, in_chan_dim:2 * in_chan_dim])\n i = T.nnet.sigmoid(lstm_parts[:, 2 * in_chan_dim:3 * in_chan_dim])\n g = T.tanh(lstm_parts[:, 3 * in_chan_dim:])\n\n c = (f * c_prev) + (i * g)\n h = o * T.tanh(c)\n\n return c, h # dims of both are: (batch_size x in_chan_dim x height)\n\n column_shape = (skewed.shape[0], skewed.shape[1] // 4, skewed.shape[2])\n outputs, updates = theano.scan(fn=process_column,\n sequences=skewed.dimshuffle((3, 0, 1, 2)),\n outputs_info=[T.zeros(column_shape, dtype=theano.config.floatX),\n T.zeros(column_shape, dtype=theano.config.floatX)],\n allow_gc=True)\n _, hs = outputs\n hs = hs.dimshuffle((1, 2, 3, 0))\n hs = unskew(hs)\n if self.backwards:\n # we need to reverse the columns again\n hs = hs[:, :, :, ::-1]\n return hs\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n in_tensor = T.tensor3(\"in\")\n in_layer = lasagne.layers.InputLayer(input_var=in_tensor, shape=(1, 1, 5))\n out = lasagne.layers.Conv1DLayer(incoming=in_layer, num_filters=1,\n filter_size=(2,),\n W=np.ones((1, 1, 2), dtype=np.float32), b=lasagne.init.Constant(0.),\n pad=\"valid\",\n nonlinearity=lasagne.nonlinearities.identity,\n flip_filters=False)\n out_tensor = lasagne.layers.get_output(out)\n f = theano.function(inputs=[in_tensor], outputs=out_tensor)\n\n print(f(np.array([0, 1, 2, 3, 4, 5], dtype=np.float32).reshape((1, 1, 6))))\n # TODO: test the LSTM layer\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
stimsonc/Unit2_thermal_predictor
[ "390ad2c72ce469bf7e484c6c431fcbb1367f7636" ]
[ "pages/data.py" ]
[ "# Imports from 3rd party libraries\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n# My imports\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nfrom joblib import load\ndectree_model = load('assets/dectree.joblib')\n\n# Imports from this application\nfrom app import app\n\n# Load census dataframe\nfile = 'https://raw.githubusercontent.com/stimsonc/Unit2_thermal_predictor/master/census.csv'\ncensus = pd.read_csv(file)\n\n# Small df with only features, target, results \ndef wrangle(df, model):\n # Columns to include from original dataframe\n cols = ['Hispanic', 'Drive', 'MeanCommute', 'Black', 'PublicWork', 'Latitude', 'Longitude', 'to_spring_cat']\n \n # Predictions\n X = df[['Hispanic', 'Black', 'Drive', 'MeanCommute', 'PublicWork']]\n y_pred = model.predict(X)\n # Maximum probabilty for each row\n y_prob = model.predict_proba(X)\n probs = []\n for i in range(len(y_prob)):\n probs.append(max(y_prob[i]))\n \n new = df[cols]\n # Column of preciction = true/false\n new['Prediction'] = y_pred\n # Whether the prediction is correct or not\n correct = new['to_spring_cat'] == new['Prediction']\n new['Correct'] = correct\n # Column of probabiliities\n new['Probability'] = probs\n # Rename column\n new.rename(columns={\"to_spring_cat\": \"True\"}, inplace=True)\n return new\n\nwrangle_df = census.copy()\ndash_df = wrangle(wrangle_df, dectree_model)\n\n# Display random sample of dash_df\nimport plotly.graph_objects as go\nn=12\nsample = dash_df.sample(n=n)\n\nfig = go.Figure(data=[go.Table(\n header=dict(values=list(sample.columns),\n fill_color='paleturquoise',\n align='left'),\n cells=dict(values=[sample.Hispanic, sample.Drive, sample.MeanCommute, sample.Black, sample.PublicWork, sample.Longitude, sample.Latitude, sample['True'], sample.Prediction, sample.Correct, sample.Probability],\n fill_color='lavender',\n align='left'))\n])\n\ncolumn1 = dbc.Col(\n [\n dcc.Graph(figure=fig),\n ]\n)\n\nlayout = dbc.Row([column1])" ]
[ [ "pandas.read_csv" ] ]
Mdlglobal-atlassian-net/federated
[ "7797df103bf965a9d0cd70e20ae61066650382d9" ]
[ "tensorflow_federated/python/research/utils/checkpoint_utils.py" ]
[ "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Save or load a nested structure.\"\"\"\n\nimport os.path\nimport re\n\nfrom absl import logging\nimport tensorflow as tf\n\n\ndef get_serial_number(export_dir, prefix='ckpt_'):\n r\"\"\"Get the integer component of a checkpoint directory name.\n\n Args:\n export_dir: A checkpoint directory.\n prefix: Common prefix shared by all checkpoint directories.\n\n Returns:\n The number extracted from the checkpoint directory, or -1 if the directory\n is not formatted correctly.\n \"\"\"\n matcher = re.match(r'^{}(?P<num>\\d+)$'.format(prefix),\n os.path.basename(export_dir))\n return int(matcher.group('num')) if matcher else -1\n\n\ndef latest_checkpoint(root_output_dir, prefix='ckpt_'):\n r\"\"\"Get the latest checkpoint name.\n\n Searches `root_output_dir` for directories matching the regular expression\n `prefix_\\d+$` and returns the directory with the largest integer suffix.\n\n Args:\n root_output_dir: The directory where all checkpoints stored.\n prefix: The common prefix shared by all checkpoint directories.\n\n Returns:\n Dirname of the latest checkpoint. If there are no checkpoints (or\n root_output_dir does not exist), returns None.\n \"\"\"\n if not tf.io.gfile.exists(root_output_dir):\n return None\n checkpoints = tf.io.gfile.glob(\n os.path.join(root_output_dir, '{}*'.format(prefix)))\n if not checkpoints:\n return None\n return max(checkpoints, key=lambda ckpt: get_serial_number(ckpt, prefix))\n\n\ndef save(obj, export_dir, prefix=None):\n r\"\"\"Save a nested structure to `export_dir`.\n\n Note: to be compatible with `latest_checkpoint`, the basename of `export_dir`\n must follow the regular expression pattern `<prefix>\\d+`, where the final\n digit matcher determines the ordering of the checkpoints.\n\n Args:\n obj: A nested structure which `tf.convert_to_tensor` supports.\n export_dir: A directory in which to write the state.\n prefix: The common prefix shared by all checkpoint directories. If provided,\n we will fail if the export directory doesn't match this prefix. If not\n provided, no check will be performed.\n\n Raises:\n ValueError: If `prefix` is provided and `export_dir` doesn't use the prefix.\n \"\"\"\n if prefix is not None and get_serial_number(export_dir, prefix) < 0:\n raise ValueError('Checkpoint dir \"{}\" is not named like \"{}XXXX!'.format(\n export_dir, prefix))\n\n model = tf.Module()\n model.obj = tf.nest.flatten(obj)\n model.build_obj_fn = tf.function(lambda: model.obj, input_signature=())\n\n # First write to a temporary directory.\n temp_export_dir = os.path.join(\n os.path.dirname(export_dir), '.temp_' + os.path.basename(export_dir))\n try:\n tf.io.gfile.rmtree(temp_export_dir)\n except tf.errors.NotFoundError:\n pass\n tf.io.gfile.makedirs(temp_export_dir)\n tf.saved_model.save(model, temp_export_dir, signatures={})\n\n # Rename the temp directory to the final location atomically.\n tf.io.gfile.rename(temp_export_dir, export_dir)\n logging.info('Checkpoint saved to: %s', export_dir)\n\n\ndef load(export_dir, obj_template):\n \"\"\"Load a nested structure from `export_dir`.\n\n Args:\n export_dir: The directory to load from.\n obj_template: An object that provides the nested structure to mimic.\n\n Returns:\n Loaded nested structure.\n\n Raises:\n FileNotFoundError: No such file or directory.\n \"\"\"\n if tf.io.gfile.exists(export_dir):\n loaded = tf.compat.v2.saved_model.load(export_dir)\n\n flat_obj = loaded.build_obj_fn()\n obj = tf.nest.pack_sequence_as(obj_template, flat_obj)\n\n logging.info('Checkpoint loaded from: %s', export_dir)\n else:\n raise FileNotFoundError('No such file or directory: %s' % export_dir)\n\n return obj\n" ]
[ [ "tensorflow.nest.pack_sequence_as", "tensorflow.io.gfile.rename", "tensorflow.io.gfile.rmtree", "tensorflow.function", "tensorflow.io.gfile.makedirs", "tensorflow.nest.flatten", "tensorflow.io.gfile.exists", "tensorflow.saved_model.save", "tensorflow.compat.v2.saved_model.load", "tensorflow.Module" ] ]
zhiru-liu/microbiome_evolution
[ "5a08fbf41357d845236e3ff46c31315929d2b649" ]
[ "utils/hmm.py" ]
[ "import numpy as np\nfrom hmmlearn.base import _BaseHMM\nfrom scipy.stats import poisson, bernoulli\nimport os\nimport config\n\n\nclass PoissonHMM(_BaseHMM):\n def __init__(self,\n init_means=None, n_components=1,\n startprob_prior=1.0, transmat_prior=1.0,\n algorithm=\"viterbi\", random_state=None,\n n_iter=10, tol=1e-2, verbose=False,\n params=\"stm\", init_params=\"stm\"):\n _BaseHMM.__init__(self, n_components,\n startprob_prior=startprob_prior,\n transmat_prior=transmat_prior,\n algorithm=algorithm,\n random_state=random_state,\n n_iter=n_iter, tol=tol, verbose=verbose,\n params=params, init_params=init_params)\n self.init_means = init_means\n\n def _init(self, X, lengths=None):\n super(PoissonHMM, self)._init(X, lengths=lengths)\n _n_samples, n_features = X.shape\n if n_features != 1:\n raise ValueError(\"Only supporting 1d Poisson for our purpose. \"\n \"Input data must have shape (n_samples, 1)\")\n if self.init_means is not None:\n self.means_ = np.squeeze(np.array(self.init_means))\n else:\n raise ValueError(\"Must supply the initial means for Poisson\")\n return\n\n def _check(self):\n super(PoissonHMM, self)._check()\n # checking the shape of means of Poisson\n if self.means_.shape != (self.n_components, ):\n raise ValueError(\"Means must have shape (n_components, ),\"\n \"actual shape: {}\".format(self.means_.shape))\n return\n\n def _generate_sample_from_state(self, state, random_state=None):\n return\n\n def _compute_log_likelihood(self, X):\n n_samples = X.shape[0]\n logp = np.zeros(shape=(n_samples, self.n_components))\n for i in range(self.n_components):\n logp[:, i] = np.squeeze(poisson.logpmf(X, self.means_[i]))\n return logp\n\n def _initialize_sufficient_statistics(self):\n stats = super(PoissonHMM, self)._initialize_sufficient_statistics()\n stats['sum_p'] = np.zeros(self.n_components)\n stats['sum_px'] = np.zeros(self.n_components)\n return stats\n\n def _accumulate_sufficient_statistics(self, stats, X, framelogprob,\n posteriors, fwdlattice, bwdlattice):\n super(PoissonHMM, self)._accumulate_sufficient_statistics(\n stats, X, framelogprob, posteriors, fwdlattice, bwdlattice)\n if 'm' in self.params:\n stats['sum_p'] += np.transpose(np.sum(posteriors, axis=0))\n stats['sum_px'] += np.squeeze(np.dot(np.transpose(posteriors), X))\n return\n\n def _do_mstep(self, stats):\n super(PoissonHMM, self)._do_mstep(stats)\n if 'm' in self.params:\n self.means_ = np.divide(stats['sum_px'], stats['sum_p'])\n return\n\n\nclass ClosePairHMM(_BaseHMM):\n def __init__(self,\n species_name=None, block_size=1,\n transfer_emissions=np.array([0.1]),\n transfer_rate=1e-2, clonal_emission=1e-3,\n transfer_length=5e2, transition_prior=None,\n algorithm=\"viterbi\", n_iter=10, tol=1e-2,\n verbose=False, params=\"m\"):\n if species_name is not None:\n self.transfer_emissions, self.transition_prior = self.get_empirical_emissions(\n species_name, block_size)\n else:\n self._init_emissions_manual(transfer_emissions, transition_prior)\n n_components = 1 + len(self.transfer_emissions)\n # normalizing the transition prior\n self.transition_prior = self.transition_prior.astype(np.float32) / np.sum(self.transition_prior)\n self.transfer_rate = transfer_rate\n self.clonal_emission = clonal_emission\n self.exit_rate = 1. / transfer_length # rate of leaving the transferred state\n self.all_emissions = np.concatenate([[self.clonal_emission], self.transfer_emissions])\n\n _BaseHMM.__init__(self, n_components,\n algorithm=algorithm,\n n_iter=n_iter, tol=tol, verbose=verbose,\n params=params)\n\n def get_empirical_emissions(self, species_name, block_size):\n path = os.path.join(config.hmm_data_directory, species_name + '.csv')\n if not os.path.exists(path):\n raise ValueError(\"No empirical data found for {}\".format(species_name))\n dat = np.loadtxt(path)\n prob_has_snp = 1 - np.power(1 - dat[0, :], block_size)\n return prob_has_snp, dat[1, :]\n\n def _init_emissions_manual(self, transfer_emissions, transition_prior):\n if transfer_emissions is not None:\n if transition_prior is not None:\n if len(transition_prior) != len(transfer_emissions):\n raise ValueError(\"Transition prior must have the same length as transfer emissions\")\n if (transition_prior < 0).any():\n raise ValueError(\"Transition prior must be all positive\")\n self.transfer_emissions = transfer_emissions\n self.transition_prior = transition_prior\n else:\n print(\"No transition prior provided. Assuming uniform transition probability\")\n self.transfer_emissions = transfer_emissions\n self.transition_prior = np.ones(transfer_emissions.shape)\n else:\n raise ValueError(\"Please provide either the species name for empirical emission rates \"\n \"or relevant parameters directly\")\n\n def _init(self, X, lengths=None):\n init = 1. / self.n_components\n self.startprob_ = np.zeros(self.n_components)\n self.startprob_[0] = 1 # always starts from clonal state\n\n # transmat is very sparse; no transitions between the recombined/transferred states\n self.transmat_ = np.zeros((self.n_components, self.n_components))\n # transitions from the recombined state\n self.transmat_[1:, 0] = self.exit_rate\n self.transmat_[np.diag_indices(self.n_components)] = 1 - self.exit_rate\n # transitions from the clonal state\n self.transmat_[0, 0] = 1 - self.transfer_rate\n self.transmat_[0, 1:] = self.transfer_rate * self.transition_prior\n\n _n_samples, n_features = X.shape\n if n_features != 1:\n raise ValueError(\"Only supports binned 1d genome as input data\")\n return\n\n def _compute_log_likelihood(self, X):\n # each observation will be either \"snp in bin / 1\" or \"no snp in bin/ 0\"\n # so the emission simply follows bernoulli RV\n # logp = np.zeros((X.shape[0], self.transfer_emissions.shape[0] + 1))\n # logp[:, 0] = np.squeeze(bernoulli.logpmf(X, self.clonal_emission))\n # logp[:, 1:] = bernoulli.logpmf(X, self.transfer_emissions)\n # clonal_logp = poisson.logpmf(X, self.clonal_emission)\n # transfer_logp = poisson.logpmf(X, self.transfer_emissions)\n # logp = np.hstack([clonal_logp, transfer_logp])\n logp = np.log(1 - self.all_emissions + np.outer(X, 2 * self.all_emissions - 1))\n return logp\n\n def _initialize_sufficient_statistics(self):\n # TODO may need to implement for inferring wall clock time\n return\n\n def _accumulate_sufficient_statistics(self, stats, X, framelogprob,\n posteriors, fwdlattice, bwdlattice):\n # TODO may need to implement for inferring wall clock time\n print(\"Skipping\")\n return\n\n def _do_mstep(self, stats):\n # TODO may need to implement for inferring wall clock time\n print(\"skipping m step\")\n return\n" ]
[ [ "numpy.concatenate", "numpy.divide", "numpy.array", "numpy.diag_indices", "numpy.zeros", "numpy.sum", "numpy.ones", "scipy.stats.poisson.logpmf", "numpy.loadtxt", "numpy.transpose", "numpy.power", "numpy.outer" ] ]
ByrdOfAFeather/Catawba-County-ArcGis
[ "8688918c00218a0a283bbdd7ed9d252e9a91a885" ]
[ "Accountability Report/data_classes.py" ]
[ "import xlrd\nfrom data_transformation_functions import setup_nc_dataframe, setup_dicts, remove_section\nfrom sklearn.preprocessing import LabelEncoder, PolynomialFeatures, StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n\nclass NCDatabase:\n\tdef __init__(self):\n\t\tself.report = xlrd.open_workbook('Databases/acctsumm15.xlsx').sheet_by_index(0)\n\t\tself.overall = setup_dicts()\n\t\tself.overall_dataframe = self.overall[0]\n\t\tself.overall_grades = self.overall[1]\n\t\tself.database = setup_nc_dataframe(self.overall_grades, self.overall_dataframe)\n\n\tdef classification_setup(self, target_subject='Math', score_threshold=None):\n\t\t\"\"\"Sets up the NC Database for classification based on input\n\t\t:param target_subject: Target subject, valid options are \"Math\", \"English\", or \"Biology\"\n\t\t:param score_threshold: Optional to split the database into two classes, below and above the threshold\"\"\"\n\n\t\t# Given a score threshold: there are only two classes, one less than the score and one greater than the score\n\t\tif score_threshold:\n\t\t\tself.database.loc[self.database[target_subject] < score_threshold, target_subject] = 0\n\t\t\tself.database.loc[self.database[target_subject] >= score_threshold, target_subject] = 1\n\t\t\n\t\telse:\n\t\t\t# Splits into 8 classes\n\t\t\tself.database[target_subject][(self.database[target_subject] < 14)] = 0\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 14) & (self.database[target_subject] < 25)] = 1\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 25) & (self.database[target_subject] < 37)] = 2\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 37) & (self.database[target_subject] < 50)] = 3\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 50) & (self.database[target_subject] < 63)] = 4\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 63) & (self.database[target_subject] < 75)] = 5\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 75) & (self.database[target_subject] < 87.5)] = 6\n\t\t\tself.database[target_subject][(self.database[target_subject] >= 87.5) & (self.database[target_subject] < 100)] = 7\n\n\t\t# Sets up an encoder to encode school names\n\t\tx_plot_encoder = LabelEncoder()\n\t\t# Gets the full y-value vector\n\t\ty = self.database[target_subject].values.astype(float)\n\t\t# Removes the irrelevant sections of the original data set\n\t\tx = remove_section(self.database, ['Biology', 'Math', 'English', 'StateNamePublicSchoolLatestavailableyear',\n\t\t 'LocationAddress1PublicSchool201415', 'LocationCityPublicSchool201415',\n\t\t 'LocationZIPPublicSchool201415', 'TitleISchoolStatusPublicSchool201415',\n\t\t 'LowestGradeOfferedPublicSchool201415',\n\t\t 'HighestGradeOfferedPublicSchool201415', 'District',\n\t\t 'Grades912StudentsPublicSchool201415',\n\t\t 'Grade12offeredPublicSchool201415',\n\t\t 'Grade11offeredPublicSchool201415',\n\t\t 'Grade10offeredPublicSchool201415',\n\t\t 'Grade9offeredPublicSchool201415'])\n\n\t\t# Gets a dataset without the names of the schools\n\t\tx_without_school_names = remove_section(x, ['SchoolNamePublicSchool201415'])\n\t\t# Gets training and validation sets\n\t\tx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=.7, random_state=225530)\n\n\t\t# Fits an encoder to the school names in the training set\n\t\tx_train.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_train.SchoolNamePublicSchool201415)\n\n\t\t# gets the integer values of the school names as they are encoded\n\t\tschool_encoded_train = x_train.SchoolNamePublicSchool201415.astype(int)\n\n\t\t# removes the school names from the training set\n\t\tx_train = remove_section(x_train, ['SchoolNamePublicSchool201415'])\n\n\t\t# creates a standard scaler and fits it to x_train\n\t\tka = StandardScaler().fit(x_train)\n\t\t# scales x_train\n\t\tx_train = ka.transform(x_train)\n\n\t\t# Does the previous steps to the testing set\n\t\tx_test.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_test.SchoolNamePublicSchool201415)\n\t\tschool_encoded_test = x_test.SchoolNamePublicSchool201415\n\t\tx_test = remove_section(x_test, ['SchoolNamePublicSchool201415',])\n\t\tx_test = ka.transform(x_test)\n\n\t\t# writes the database out to a csv\n\t\ttry:\n\t\t\tx.to_csv('Databases/classification.csv')\n\t\texcept IOError:\n\t\t\tprint(\"Error writing database to file! Continuing...\")\n\n\t\t# Returns the segmented values for model building functions\n\t\treturn x_without_school_names, y, x_train, school_encoded_train, y_train, x_test, school_encoded_test, y_test\n\n\tdef regression_setup(self, target_subject='Math', degree=2):\n\t\t\"\"\"Setups NC Database for regression\n\t\t:param target_subject: Target subject, valid options are \"Math\", \"English\", or \"Biology\"\n\t\t:param degree: Optional definition to declare the degree of polynomial features\"\"\"\n\n\t\t# sets up target values\n\t\ty = self.database[target_subject].astype(float).values\n\n\t\t# Removes irrelevant values\n\t\tx = remove_section(self.database, ['Biology', 'Math', 'English', 'StateNamePublicSchoolLatestavailableyear',\n\t\t 'LocationAddress1PublicSchool201415', 'LocationCityPublicSchool201415',\n\t\t 'LocationZIPPublicSchool201415', 'TitleISchoolStatusPublicSchool201415',\n\t\t 'LowestGradeOfferedPublicSchool201415',\n\t\t 'HighestGradeOfferedPublicSchool201415', 'District',\n\t\t 'Grades912StudentsPublicSchool201415',\n\t\t 'Grade12offeredPublicSchool201415',\n\t\t 'Grade11offeredPublicSchool201415',\n\t\t 'Grade10offeredPublicSchool201415',\n\t\t 'Grade9offeredPublicSchool201415'])\n\t\t# Creates an encoder\n\t\tx_plot_encoder = LabelEncoder()\n\n\t\t# Gets rid of schools names and splits the data sets\n\t\tx_without_school_names = remove_section(x, ['SchoolNamePublicSchool201415'])\n\t\tx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=.7, random_state=225)\n\n\t\t# Fits the encoder to the school names and remove the sections\n\t\tx_train.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_train.SchoolNamePublicSchool201415)\n\t\tschool_encoded_train = x_train.SchoolNamePublicSchool201415.astype(int)\n\t\tx_train = remove_section(x_train, ['SchoolNamePublicSchool201415'])\n\n\t\t# Creates polynomial features\n\t\tx_train = PolynomialFeatures(degree).fit_transform(x_train)\n\t\tka = StandardScaler().fit(x_train)\n\t\tx_train = ka.transform(x_train)\n\n\t\t# sets the same features up on the test set\n\t\tx_test.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_test.SchoolNamePublicSchool201415)\n\t\tschool_encoded_test = x_test.SchoolNamePublicSchool201415\n\t\tx_test = remove_section(x_test, ['SchoolNamePublicSchool201415',])\n\t\tx_test = PolynomialFeatures(degree).fit_transform(x_test)\n\t\tx_test = ka.transform(x_test)\n\n\t\t# Saves a copy of the current database\n\t\ttry:\n\t\t\tx.to_csv('Databases/regression.csv')\n\t\texcept IOError:\n\t\t\tprint(\"Failed to save the database to file! Continuing....\")\n\t\t\n\t\treturn x_without_school_names, y, x_train, school_encoded_train, y_train, x_test, school_encoded_test, y_test\n" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.PolynomialFeatures", "sklearn.preprocessing.StandardScaler" ] ]
aishifugi/generating-sound-with-neural-networks
[ "4e71d22683edb9bd56aa46de3f022f4e1dec1cf1" ]
[ "11 Implementing VAE/code/analysis.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom autoencoder import VAE\nfrom train import load_mnist\n\n\ndef select_images(images, labels, num_images=10):\n sample_images_index = np.random.choice(range(len(images)), num_images)\n sample_images = images[sample_images_index]\n sample_labels = labels[sample_images_index]\n return sample_images, sample_labels\n\n\ndef plot_reconstructed_images(images, reconstructed_images):\n fig = plt.figure(figsize=(15, 3))\n num_images = len(images)\n for i, (image, reconstructed_image) in enumerate(zip(images, reconstructed_images)):\n image = image.squeeze()\n ax = fig.add_subplot(2, num_images, i + 1)\n ax.axis(\"off\")\n ax.imshow(image, cmap=\"gray_r\")\n reconstructed_image = reconstructed_image.squeeze()\n ax = fig.add_subplot(2, num_images, i + num_images + 1)\n ax.axis(\"off\")\n ax.imshow(reconstructed_image, cmap=\"gray_r\")\n plt.show()\n\n\ndef plot_images_encoded_in_latent_space(latent_representations, sample_labels):\n plt.figure(figsize=(10, 10))\n plt.scatter(latent_representations[:, 0],\n latent_representations[:, 1],\n cmap=\"rainbow\",\n c=sample_labels,\n alpha=0.5,\n s=2)\n plt.colorbar()\n plt.show()\n\n\nif __name__ == \"__main__\":\n autoencoder = VAE.load(\"model\")\n x_train, y_train, x_test, y_test = load_mnist()\n\n num_sample_images_to_show = 8\n sample_images, _ = select_images(x_test, y_test, num_sample_images_to_show)\n reconstructed_images, _ = autoencoder.reconstruct(sample_images)\n plot_reconstructed_images(sample_images, reconstructed_images)\n\n num_images = 6000\n sample_images, sample_labels = select_images(x_test, y_test, num_images)\n _, latent_representations = autoencoder.reconstruct(sample_images)\n plot_images_encoded_in_latent_space(latent_representations, sample_labels)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure" ] ]
kamalkraj/TAPAS-TF2
[ "d4ecc09d418d8de5481220b44548e437c77af9bc" ]
[ "tapas/models/segmented_tensor.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"A tensor supporting reduction over irregularly grouped entries.\n\nA segmented tensor is a tensor with a set of indices {0, ..., num_segments - 1}\nand an index map that assigns an index to each element of the tensor. Two\nelements with the same index are considered grouped together. The set of all\nelements with index `k` is called the segment over k.\n\nSegmented tensors support reductions over segments (reduce_mean, reduce_sum,\netc.). A typical example is performing reductions over table cells:\n\n```\n# Prepare a tf.Tensor with table values.\nvalues = ...\n\n# Prepare the table indices, either by rows or columns. The shape of `row_ids`\n# and `col_ids` has to be a prefix of the shape of `values`.\nrow_index = segmented_tensor.IndexMap(indices=row_ids, num_segments=max_rows)\ncol_index = segmented_tensor.IndexMap(indices=col_ids, num_segments=max_cols)\n\n# Combine the indices to get a table indexed by cell. The result has\n# `num_segments` equal to row_ids * col_ids.\ncell_index = segmented_tensor.ProductIndexMap(row_index, col_index)\n\n# Compute the averages per cell. The result is a `Tensor` with shape\n# [max_rows * max_cols, ..] together with an index map on it. The index map is\n# equal to range(max_rows * max_cols).\ncell_averages, _ = segmented_tensor.reduce_mean(values, cell_index)\n\n# Gather the results to get back a Tensor with the same shape as `cell_index`.\n# If there are multiple elements in the same cell they will have the same value.\ntoken_to_its_cell_average = segmented_tensor.gather(cell_averages, cell_index)\n```\n\nBatching is supported by setting `batch_dims`. The first `batch_dims` dimensions\nwill be treated as the batch. Elements of different batches are never grouped\ntogether, not even if they have the same index.\n\"\"\"\n\nimport tensorflow as tf\n\n\nclass IndexMap(object):\n \"\"\"Index grouping entries within a tensor.\"\"\"\n\n def __init__(self, indices, num_segments, batch_dims=0):\n \"\"\"Creates an index.\n\n Args:\n indices: <int32> Tensor of indices, same shape as `values`.\n num_segments: <int32> Scalar tensor, the number of segments. All elements\n in a batched segmented tensor must have the same number of segments\n (although many segments can be empty).\n batch_dims: Python integer, the number of batch dimensions. The first\n `batch_dims` dimensions of a SegmentedTensor are treated as batch\n dimensions. Segments in different batch elements are always distinct\n even if they have the same index.\n \"\"\"\n self.indices = tf.convert_to_tensor(indices)\n self.num_segments = tf.convert_to_tensor(num_segments)\n self.batch_dims = batch_dims\n\n def batch_shape(self):\n return tf.shape(self.indices)[:self.batch_dims]\n\n\nclass ProductIndexMap(IndexMap):\n \"\"\"The product of two indices.\"\"\"\n\n def __init__(self, outer_index, inner_index):\n \"\"\"Combines indices i and j into pairs (i, j).\n\n The result is an index where each segment (i, j) is the intersection of\n segments i and j. For example if the inputs represent table cells indexed by\n respectively rows and columns the output will be a table indexed by\n (row, column) pairs, i.e. by cell.\n\n The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into\n {0, .., nm - 1}. The output has `num_segments` equal to\n `outer_index.num_segements` * `inner_index.num_segments`.\n\n Args:\n outer_index: IndexMap.\n inner_index: IndexMap, must have the same shape as `outer_index`.\n \"\"\"\n if outer_index.batch_dims != inner_index.batch_dims:\n raise ValueError('outer_index.batch_dims and inner_index.batch_dims '\n 'must be the same.')\n\n super(ProductIndexMap, self).__init__(\n indices=(inner_index.indices +\n outer_index.indices * inner_index.num_segments),\n num_segments=inner_index.num_segments * outer_index.num_segments,\n batch_dims=inner_index.batch_dims)\n self.outer_index = outer_index\n self.inner_index = inner_index\n\n def project_outer(self, index):\n \"\"\"Projects an index with the same index set onto the outer components.\"\"\"\n return IndexMap(\n indices=tf.math.floordiv(index.indices, self.inner_index.num_segments),\n num_segments=self.outer_index.num_segments,\n batch_dims=index.batch_dims)\n\n def project_inner(self, index):\n \"\"\"Projects an index with the same index set onto the inner components.\"\"\"\n return IndexMap(\n indices=tf.math.floormod(index.indices, self.inner_index.num_segments),\n num_segments=self.inner_index.num_segments,\n batch_dims=index.batch_dims)\n\n\ndef gather(values, index, name='segmented_gather'):\n \"\"\"Gathers from `values` using the index map.\n\n For each element in the domain of the index map this operation looks up a\n value for that index in `values`. Two elements from the same segment always\n get assigned the same value.\n\n Args:\n values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values.\n index: [B1, ..., Bn, I1, ..., Ik] IndexMap.\n name: Name for the TensorFlow operation.\n\n Returns:\n [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values.\n \"\"\"\n return tf.gather(\n values, index.indices, batch_dims=index.batch_dims, name=name)\n\n\ndef flatten(index, name='segmented_flatten'):\n \"\"\"Flattens a batched index map to a 1d index map.\n\n This operation relabels the segments to keep batch elements distinct. The k-th\n batch element will have indices shifted by `num_segments` * (k - 1). The\n result is a tensor with `num_segments` multiplied by the number of elements\n in the batch.\n\n Args:\n index: IndexMap to flatten.\n name: Name for the TensorFlow operation.\n\n Returns:\n The flattened IndexMap.\n \"\"\"\n batch_size = tf.reduce_prod(index.batch_shape())\n offset = tf.range(batch_size) * index.num_segments\n offset = tf.reshape(offset, index.batch_shape())\n for _ in range(index.batch_dims, index.indices.shape.rank):\n offset = tf.expand_dims(offset, -1)\n\n indices = offset + index.indices\n return IndexMap(\n indices=tf.reshape(indices, [-1]),\n num_segments=index.num_segments * batch_size,\n batch_dims=0)\n\n\ndef range_index_map(batch_shape, num_segments, name='range_index_map'):\n \"\"\"Constructs an index map equal to range(num_segments).\"\"\"\n batch_shape = tf.convert_to_tensor(batch_shape)\n batch_shape.shape.assert_has_rank(1)\n num_segments = tf.convert_to_tensor(num_segments)\n num_segments.shape.assert_has_rank(0)\n\n indices = tf.range(num_segments)\n shape = tf.concat([\n tf.ones_like(batch_shape, dtype=tf.int32),\n tf.expand_dims(num_segments, axis=0)\n ],\n axis=0)\n indices = tf.reshape(indices, shape)\n multiples = tf.concat([batch_shape, [1]], axis=0)\n indices = tf.tile(indices, multiples)\n return IndexMap(\n indices=indices,\n num_segments=num_segments,\n batch_dims=batch_shape.shape.as_list()[0])\n\n\ndef _segment_reduce(values, index, segment_reduce_fn, name):\n \"\"\"Applies a segment reduction segment-wise.\"\"\"\n\n # Flatten the batch dimensions, as segments ops do not support batching.\n # However if `values` has extra dimensions to the right keep them\n # unflattened. Segmented ops support vector-valued operations.\n flat_index = flatten(index)\n vector_shape = tf.shape(values)[index.indices.shape.rank:]\n flattened_shape = tf.concat([[-1], vector_shape], axis=0)\n flat_values = tf.reshape(values, flattened_shape)\n segment_means = segment_reduce_fn(\n data=flat_values,\n segment_ids=flat_index.indices,\n num_segments=flat_index.num_segments)\n\n # Unflatten the values.\n new_shape = tf.concat(\n [index.batch_shape(), [index.num_segments], vector_shape], axis=0)\n output_values = tf.reshape(segment_means, new_shape)\n output_index = range_index_map(index.batch_shape(), index.num_segments)\n return output_values, output_index\n\n\ndef reduce_mean(values, index, name='segmented_reduce_mean'):\n \"\"\"Averages a tensor over its segments.\n\n Outputs 0 for empty segments.\n\n This operations computes the mean over segments, with support for:\n - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in\n a batch can have different indices.\n - Vectorization using the last dimension [V1, V2, ...]. If they are present\n the output will be a mean of vectors rather than scalars.\n\n Only the middle dimensions [I1, ..., Ik] are reduced by the operation.\n\n Args:\n values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be\n averaged.\n index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.\n name: Name for the TensorFlow ops.\n\n Returns:\n A pair (output_values, output_index) where `output_values` is a tensor\n of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an\n IndexMap with shape [B1, B2, ..., Bn, num_segments].\n \"\"\"\n return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name)\n\n\ndef reduce_sum(values, index, name='segmented_reduce_sum'):\n \"\"\"Sums a tensor over its segments.\n\n Outputs 0 for empty segments.\n\n This operations computes the sum over segments, with support for:\n - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in\n a batch can have different indices.\n - Vectorization using the last dimension [V1, V2, ...]. If they are present\n the output will be a sum of vectors rather than scalars.\n\n Only the middle dimensions [I1, ..., Ik] are reduced by the operation.\n\n Args:\n values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be\n averaged.\n index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.\n name: Name for the TensorFlow ops.\n\n Returns:\n A pair (output_values, output_index) where `output_values` is a tensor\n of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an\n IndexMap with shape [B1, B2, ..., Bn, num_segments].\n \"\"\"\n return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name)\n\n\ndef reduce_max(values, index, name='segmented_reduce_max'):\n \"\"\"Computes the maximum over segments.\n\n This operations computes the maximum over segments, with support for:\n - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in\n a batch can have different indices.\n - Vectorization using the last dimension [V1, V2, ...]. If they are present\n the output will be an element-wise maximum of vectors rather than scalars.\n\n Only the middle dimensions [I1, ..., Ik] are reduced by the operation.\n\n Args:\n values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be\n averaged.\n index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.\n name: Name for the TensorFlow ops.\n\n Returns:\n A pair (output_values, output_index) where `output_values` is a tensor\n of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an\n IndexMap with shape [B1, B2, ..., Bn, num_segments].\n \"\"\"\n return _segment_reduce(values, index, tf.math.unsorted_segment_max, name)\n\n\ndef reduce_min(values, index, name='segmented_reduce_min'):\n \"\"\"Computes the minimum over segments.\"\"\"\n return _segment_reduce(values, index, tf.math.unsorted_segment_min, name)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.range", "tensorflow.shape", "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.tile", "tensorflow.ones_like", "tensorflow.reshape", "tensorflow.math.floordiv", "tensorflow.gather", "tensorflow.math.floormod" ] ]
jacke121/X-Detector
[ "a24e370a5acb6f5c29cd5db81fa4270f2697b8c1" ]
[ "dataset/pascal_voc.py" ]
[ "# --------------------------------------------------------\n# RON\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Kong Tao\n# date Nov.18, 2016\n# --------------------------------------------------------\nfrom voc_eval import voc_eval\nimport datasets\nimport datasets.pascal_voc\nimport os\nimport datasets.imdb\nimport xml.dom.minidom as minidom\nimport numpy as np\nimport scipy.sparse\nimport scipy.io as sio\n# import utils.cython_bbox\nimport pickle\nimport subprocess\n# from fast_rcnn.config import cfg\nimport cv2\nimport matplotlib.pyplot as plt\nimport copy\nimport numpy.random as npr\n\nclass pascal_voc(datasets.imdb):\n def __init__(self, image_set, year, devkit_path=None):\n datasets.imdb.__init__(self, 'voc_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)\n self._classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n \n self._image_index = self._load_image_set_index()\n \n # Default to roidb handler\n self._roidb_handler = self.gt_roidb\n self._comp_id = 'comp4'\n # PASCAL specific config options\n self.config = {'cleanup' : True,\n 'use_salt' : True}\n\n assert os.path.exists(self._devkit_path), \\\n 'VOCdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n \n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join('data', 'VOCdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n gt_roidb = [self._load_pascal_annotation(index) for index in self._image_index]\n \n for i in range(len(self._image_index)):\n gt_roidb[i]['image'] = self.image_path_at(i)\n\n return gt_roidb \n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n def get_data_from_tag(node, tag):\n return node.getElementsByTagName(tag)[0].childNodes[0].data\n\n with open(filename) as f:\n data = minidom.parseString(f.read())\n\n objs = data.getElementsByTagName('object')\n num_objs = len(objs)\n #print self.num_classes\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n # Make pixel indexes 0-based\n x1 = float(get_data_from_tag(obj, 'xmin')) - 1\n y1 = float(get_data_from_tag(obj, 'ymin')) - 1\n x2 = float(get_data_from_tag(obj, 'xmax')) - 1\n y2 = float(get_data_from_tag(obj, 'ymax')) - 1\n cls = self._class_to_ind[\n str(get_data_from_tag(obj, \"name\")).lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps' : overlaps,\n 'flipped' : False}\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n path = os.path.join(\n self._devkit_path,\n 'results',\n 'VOC' + self._year,\n 'Main',\n filename)\n return path\n def _get_comp_id(self):\n comp_id = self._comp_id\n return comp_id\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n\n def _do_matlab_eval(self, comp_id, output_dir='output'):\n rm_results = self.config['cleanup']\n\n path = os.path.join(os.path.dirname(__file__),\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',{:d}); quit;\"' \\\n .format(self._devkit_path, comp_id,\n self._image_set, output_dir, int(rm_results))\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n comp_id = self._write_voc_results_file(all_boxes)\n #self._do_matlab_eval(comp_id, output_dir)\n self._do_python_eval(output_dir)\n \n def _do_python_eval(self, output_dir = 'output'):\n annopath = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(self._year) < 2010 else False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n rec, prec, ap = voc_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\nif __name__ == '__main__':\n d = datasets.pascal_voc('trainval', '2007')\n res = d.roidb\n from IPython import embed; embed()\n" ]
[ [ "numpy.mean", "numpy.zeros" ] ]
markop159/plugin.video.flixtor
[ "b5aedf559772e2e50ed28be576e6962080d03c93" ]
[ "resources/lib/js2py/base.py" ]
[ "'''Most important file in Js2Py implementation: PyJs class - father of all PyJs objects'''\nfrom copy import copy\nimport re\n\nfrom .translators.friendly_nodes import REGEXP_CONVERTER\nfrom .utils.injector import fix_js_args\nfrom types import FunctionType, ModuleType, GeneratorType, BuiltinFunctionType, MethodType, BuiltinMethodType\nfrom math import floor, log10\nimport traceback\ntry:\n import numpy\n NUMPY_AVAILABLE = True\nexcept:\n NUMPY_AVAILABLE = False\n\n# python 3 support\nfrom .utils import six\nif six.PY3:\n basestring = str\n long = int\n xrange = range\n unicode = str\n\n\ndef str_repr(s):\n if six.PY2:\n return repr(s.encode('utf-8'))\n else:\n return repr(s)\n\n\ndef MakeError(name, message):\n \"\"\"Returns PyJsException with PyJsError inside\"\"\"\n return JsToPyException(ERRORS[name](Js(message)))\n\n\ndef to_python(val):\n if not isinstance(val, PyJs):\n return val\n if isinstance(val, PyJsUndefined) or isinstance(val, PyJsNull):\n return None\n elif isinstance(val, PyJsNumber):\n # this can be either float or long/int better to assume its int/long when a whole number...\n v = val.value\n try:\n i = int(v) if v == v else v # nan...\n return v if i != v else i\n except:\n return v\n elif isinstance(val, (PyJsString, PyJsBoolean)):\n return val.value\n elif isinstance(val, PyObjectWrapper):\n return val.__dict__['obj']\n elif isinstance(val, PyJsArray) and val.CONVERT_TO_PY_PRIMITIVES:\n return to_list(val)\n elif isinstance(val, PyJsObject) and val.CONVERT_TO_PY_PRIMITIVES:\n return to_dict(val)\n else:\n return JsObjectWrapper(val)\n\n\ndef to_dict(js_obj,\n known=None): # fixed recursion error in self referencing objects\n res = {}\n if known is None:\n known = {}\n if js_obj in known:\n return known[js_obj]\n known[js_obj] = res\n for k in js_obj:\n name = k.value\n input = js_obj.get(name)\n output = to_python(input)\n if isinstance(output, JsObjectWrapper):\n if output._obj.Class == 'Object':\n output = to_dict(output._obj, known)\n known[input] = output\n elif output._obj.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array'\n ]:\n output = to_list(output._obj)\n known[input] = output\n res[name] = output\n return res\n\n\ndef to_list(js_obj, known=None):\n res = len(js_obj) * [None]\n if known is None:\n known = {}\n if js_obj in known:\n return known[js_obj]\n known[js_obj] = res\n for k in js_obj:\n try:\n name = int(k.value)\n except:\n continue\n input = js_obj.get(str(name))\n output = to_python(input)\n if isinstance(output, JsObjectWrapper):\n if output._obj.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array', 'Arguments'\n ]:\n output = to_list(output._obj, known)\n known[input] = output\n elif output._obj.Class in ['Object']:\n output = to_dict(output._obj)\n known[input] = output\n res[name] = output\n return res\n\n\ndef HJs(val):\n if hasattr(val, '__call__'): #\n\n @Js\n def PyWrapper(this, arguments, var=None):\n args = tuple(to_python(e) for e in arguments.to_list())\n try:\n py_res = val.__call__(*args)\n except Exception as e:\n message = 'your Python function failed! '\n try:\n message += str(e)\n except:\n pass\n raise MakeError('Error', message)\n return py_wrap(py_res)\n\n try:\n PyWrapper.func_name = val.__name__\n except:\n pass\n return PyWrapper\n if isinstance(val, tuple):\n val = list(val)\n return Js(val)\n\n\ndef Js(val, Clamped=False):\n '''Converts Py type to PyJs type'''\n if isinstance(val, PyJs):\n return val\n elif val is None:\n return undefined\n elif isinstance(val, basestring):\n return PyJsString(val, StringPrototype)\n elif isinstance(val, bool):\n return true if val else false\n elif isinstance(val, float) or isinstance(val, int) or isinstance(\n val, long) or (NUMPY_AVAILABLE and isinstance(\n val,\n (numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,\n numpy.int32, numpy.uint32, numpy.float32, numpy.float64))):\n # This is supposed to speed things up. may not be the case\n if val in NUM_BANK:\n return NUM_BANK[val]\n return PyJsNumber(float(val), NumberPrototype)\n elif isinstance(val, FunctionType):\n return PyJsFunction(val, FunctionPrototype)\n #elif isinstance(val, ModuleType):\n # mod = {}\n # for name in dir(val):\n # value = getattr(val, name)\n # if isinstance(value, ModuleType):\n # continue # prevent recursive module conversion\n # try:\n # jsval = HJs(value)\n # except RuntimeError:\n # print 'Could not convert %s to PyJs object!' % name\n # continue\n # mod[name] = jsval\n # return Js(mod)\n #elif isintance(val, ClassType):\n\n elif isinstance(val, dict): # convert to object\n temp = PyJsObject({}, ObjectPrototype)\n for k, v in six.iteritems(val):\n temp.put(Js(k), Js(v))\n return temp\n elif isinstance(val, (list, tuple)): #Convert to array\n return PyJsArray(val, ArrayPrototype)\n # convert to typedarray\n elif isinstance(val, JsObjectWrapper):\n return val.__dict__['_obj']\n elif NUMPY_AVAILABLE and isinstance(val, numpy.ndarray):\n if val.dtype == numpy.int8:\n return PyJsInt8Array(val, Int8ArrayPrototype)\n elif val.dtype == numpy.uint8 and not Clamped:\n return PyJsUint8Array(val, Uint8ArrayPrototype)\n elif val.dtype == numpy.uint8 and Clamped:\n return PyJsUint8ClampedArray(val, Uint8ClampedArrayPrototype)\n elif val.dtype == numpy.int16:\n return PyJsInt16Array(val, Int16ArrayPrototype)\n elif val.dtype == numpy.uint16:\n return PyJsUint16Array(val, Uint16ArrayPrototype)\n\n elif val.dtype == numpy.int32:\n return PyJsInt32Array(val, Int32ArrayPrototype)\n elif val.dtype == numpy.uint32:\n return PyJsUint16Array(val, Uint32ArrayPrototype)\n\n elif val.dtype == numpy.float32:\n return PyJsFloat32Array(val, Float32ArrayPrototype)\n elif val.dtype == numpy.float64:\n return PyJsFloat64Array(val, Float64ArrayPrototype)\n else: # try to convert to js object\n return py_wrap(val)\n #raise RuntimeError('Cant convert python type to js (%s)' % repr(val))\n #try:\n # obj = {}\n # for name in dir(val):\n # if name.startswith('_'): #dont wrap attrs that start with _\n # continue\n # value = getattr(val, name)\n # import types\n # if not isinstance(value, (FunctionType, BuiltinFunctionType, MethodType, BuiltinMethodType,\n # dict, int, basestring, bool, float, long, list, tuple)):\n # continue\n # obj[name] = HJs(value)\n # return Js(obj)\n #except:\n # raise RuntimeError('Cant convert python type to js (%s)' % repr(val))\n\n\ndef Type(val):\n try:\n return val.TYPE\n except:\n raise RuntimeError('Invalid type: ' + str(val))\n\n\ndef is_data_descriptor(desc):\n return desc and ('value' in desc or 'writable' in desc)\n\n\ndef is_accessor_descriptor(desc):\n return desc and ('get' in desc or 'set' in desc)\n\n\ndef is_generic_descriptor(desc):\n return desc and not (is_data_descriptor(desc)\n or is_accessor_descriptor(desc))\n\n\n##############################################################################\n\n\nclass PyJs(object):\n PRIMITIVES = frozenset(\n ['String', 'Number', 'Boolean', 'Undefined', 'Null'])\n TYPE = 'Object'\n Class = None\n extensible = True\n prototype = None\n own = {}\n GlobalObject = None\n IS_CHILD_SCOPE = False\n CONVERT_TO_PY_PRIMITIVES = False\n value = None\n buff = None\n\n def __init__(self, value=None, prototype=None, extensible=False):\n '''Constructor for Number String and Boolean'''\n # I dont think this is needed anymore\n # if self.Class=='String' and not isinstance(value, basestring):\n # raise TypeError\n # if self.Class=='Number':\n # if not isinstance(value, float):\n # if not (isinstance(value, int) or isinstance(value, long)):\n # raise TypeError\n # value = float(value)\n # if self.Class=='Boolean' and not isinstance(value, bool):\n # raise TypeError\n self.value = value\n self.extensible = extensible\n self.prototype = prototype\n self.own = {}\n self.buff = None\n\n def is_undefined(self):\n return self.Class == 'Undefined'\n\n def is_null(self):\n return self.Class == 'Null'\n\n def is_primitive(self):\n return self.TYPE in self.PRIMITIVES\n\n def is_object(self):\n return not self.is_primitive()\n\n def _type(self):\n return Type(self)\n\n def is_callable(self):\n return hasattr(self, 'call')\n\n def get_own_property(self, prop):\n return self.own.get(prop)\n\n def get_property(self, prop):\n cand = self.get_own_property(prop)\n if cand:\n return cand\n if self.prototype is not None:\n return self.prototype.get_property(prop)\n\n def update_array(self):\n for i in range(self.get('length').to_uint32()):\n self.put(str(i), Js(self.buff[i]))\n\n def get(self, prop): #external use!\n #prop = prop.value\n if self.Class == 'Undefined' or self.Class == 'Null':\n raise MakeError('TypeError',\n 'Undefined and null dont have properties (tried getting property %s)' % repr(prop))\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n if not isinstance(prop, basestring): raise RuntimeError('Bug')\n if NUMPY_AVAILABLE and prop.isdigit():\n if isinstance(self.buff, numpy.ndarray):\n self.update_array()\n cand = self.get_property(prop)\n if cand is None:\n return Js(None)\n if is_data_descriptor(cand):\n return cand['value']\n if cand['get'].is_undefined():\n return cand['get']\n return cand['get'].call(self)\n\n def can_put(self, prop): #to check\n desc = self.get_own_property(prop)\n if desc: #if we have this property\n if is_accessor_descriptor(desc):\n return desc['set'].is_callable(\n ) # Check if setter method is defined\n else: #data desc\n return desc['writable']\n if self.prototype is not None:\n return self.extensible\n inherited = self.get_property(prop)\n if inherited is None:\n return self.extensible\n if is_accessor_descriptor(inherited):\n return not inherited['set'].is_undefined()\n elif self.extensible:\n return inherited['writable']\n return False\n\n def put(self, prop, val, op=None): #external use!\n '''Just like in js: self.prop op= val\n for example when op is '+' it will be self.prop+=val\n op can be either None for simple assignment or one of:\n * / % + - << >> & ^ |'''\n if self.Class == 'Undefined' or self.Class == 'Null':\n raise MakeError('TypeError',\n 'Undefined and null don\\'t have properties (tried setting property %s)' % repr(prop))\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n if NUMPY_AVAILABLE and prop.isdigit():\n if self.Class == 'Int8Array':\n val = Js(numpy.int8(val.to_number().value))\n elif self.Class == 'Uint8Array':\n val = Js(numpy.uint8(val.to_number().value))\n elif self.Class == 'Uint8ClampedArray':\n if val < Js(numpy.uint8(0)):\n val = Js(numpy.uint8(0))\n elif val > Js(numpy.uint8(255)):\n val = Js(numpy.uint8(255))\n else:\n val = Js(numpy.uint8(val.to_number().value))\n elif self.Class == 'Int16Array':\n val = Js(numpy.int16(val.to_number().value))\n elif self.Class == 'Uint16Array':\n val = Js(numpy.uint16(val.to_number().value))\n elif self.Class == 'Int32Array':\n val = Js(numpy.int32(val.to_number().value))\n elif self.Class == 'Uint32Array':\n val = Js(numpy.uint32(val.to_number().value))\n elif self.Class == 'Float32Array':\n val = Js(numpy.float32(val.to_number().value))\n elif self.Class == 'Float64Array':\n val = Js(numpy.float64(val.to_number().value))\n if isinstance(self.buff, numpy.ndarray):\n self.buff[int(prop)] = int(val.to_number().value)\n #we need to set the value to the incremented one\n if op is not None:\n val = getattr(self.get(prop), OP_METHODS[op])(val)\n if not self.can_put(prop):\n return val\n own_desc = self.get_own_property(prop)\n if is_data_descriptor(own_desc):\n if self.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array'\n ]:\n self.define_own_property(prop, {'value': val})\n else:\n self.own[prop]['value'] = val\n return val\n desc = self.get_property(prop)\n if is_accessor_descriptor(desc):\n desc['set'].call(self, (val, ))\n else:\n new = {\n 'value': val,\n 'writable': True,\n 'configurable': True,\n 'enumerable': True\n }\n if self.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array'\n ]:\n self.define_own_property(prop, new)\n else:\n self.own[prop] = new\n return val\n\n def has_property(self, prop):\n return self.get_property(prop) is not None\n\n def delete(self, prop):\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n desc = self.get_own_property(prop)\n if desc is None:\n return Js(True)\n if desc['configurable']:\n del self.own[prop]\n return Js(True)\n return Js(False)\n\n def default_value(\n self, hint=None\n ): # made a mistake at the very early stage and made it to prefer string... caused lots! of problems\n order = ('valueOf', 'toString')\n if hint == 'String' or (hint is None and self.Class == 'Date'):\n order = ('toString', 'valueOf')\n for meth_name in order:\n method = self.get(meth_name)\n if method is not None and method.is_callable():\n cand = method.call(self)\n if cand.is_primitive():\n return cand\n raise MakeError('TypeError',\n 'Cannot convert object to primitive value')\n\n def define_own_property(self, prop,\n desc): #Internal use only. External through Object\n # prop must be a Py string. Desc is either a descriptor or accessor.\n #Messy method - raw translation from Ecma spec to prevent any bugs. # todo check this\n current = self.get_own_property(prop)\n\n extensible = self.extensible\n if not current: #We are creating a new property\n if not extensible:\n return False\n if is_data_descriptor(desc) or is_generic_descriptor(desc):\n DEFAULT_DATA_DESC = {\n 'value': undefined, #undefined\n 'writable': False,\n 'enumerable': False,\n 'configurable': False\n }\n DEFAULT_DATA_DESC.update(desc)\n self.own[prop] = DEFAULT_DATA_DESC\n else:\n DEFAULT_ACCESSOR_DESC = {\n 'get': undefined, #undefined\n 'set': undefined, #undefined\n 'enumerable': False,\n 'configurable': False\n }\n DEFAULT_ACCESSOR_DESC.update(desc)\n self.own[prop] = DEFAULT_ACCESSOR_DESC\n return True\n if not desc or desc == current: #We dont need to change anything.\n return True\n configurable = current['configurable']\n if not configurable: #Prevent changing configurable or enumerable\n if desc.get('configurable'):\n return False\n if 'enumerable' in desc and desc['enumerable'] != current[\n 'enumerable']:\n return False\n if is_generic_descriptor(desc):\n pass\n elif is_data_descriptor(current) != is_data_descriptor(desc):\n if not configurable:\n return False\n if is_data_descriptor(current):\n del current['value']\n del current['writable']\n current['set'] = undefined #undefined\n current['get'] = undefined #undefined\n else:\n del current['set']\n del current['get']\n current['value'] = undefined #undefined\n current['writable'] = False\n elif is_data_descriptor(current) and is_data_descriptor(desc):\n if not configurable:\n if not current['writable'] and desc.get('writable'):\n return False\n if not current['writable'] and 'value' in desc and current[\n 'value'] != desc['value']:\n return False\n elif is_accessor_descriptor(current) and is_accessor_descriptor(desc):\n if not configurable:\n if 'set' in desc and desc['set'] is not current['set']:\n return False\n if 'get' in desc and desc['get'] is not current['get']:\n return False\n current.update(desc)\n return True\n\n #these methods will work only for Number class\n def is_infinity(self):\n assert self.Class == 'Number'\n return self.value == float('inf') or self.value == -float('inf')\n\n def is_nan(self):\n assert self.Class == 'Number'\n return self.value != self.value #nan!=nan evaluates to true\n\n def is_finite(self):\n return not (self.is_nan() or self.is_infinity())\n\n #Type Conversions. to_type. All must return pyjs subclass instance\n\n def to_primitive(self, hint=None):\n if self.is_primitive():\n return self\n if hint is None and (\n self.Class == 'Number' or self.Class == 'Boolean'\n ): # favour number for Class== Number or Boolean default = String\n hint = 'Number'\n return self.default_value(hint)\n\n def to_boolean(self):\n typ = Type(self)\n if typ == 'Boolean': #no need to convert\n return self\n elif typ == 'Null' or typ == 'Undefined': #they are both always false\n return false\n elif typ == 'Number' or typ == 'String': #false only for 0, '' and NaN\n return Js(bool(\n self.value\n and self.value == self.value)) # test for nan (nan -> flase)\n else: #object - always true\n return true\n\n def to_number(self):\n typ = Type(self)\n if typ == 'Null': #null is 0\n return Js(0)\n elif typ == 'Undefined': # undefined is NaN\n return NaN\n elif typ == 'Boolean': # 1 for true 0 for false\n return Js(int(self.value))\n elif typ == 'Number': # or self.Class=='Number': # no need to convert\n return self\n elif typ == 'String':\n s = self.value.strip() #Strip white space\n if not s: # '' is simply 0\n return Js(0)\n if 'x' in s or 'X' in s[:3]: #hex (positive only)\n try: # try to convert\n num = int(s, 16)\n except ValueError: # could not convert > NaN\n return NaN\n return Js(num)\n sign = 1 #get sign\n if s[0] in '+-':\n if s[0] == '-':\n sign = -1\n s = s[1:]\n if s == 'Infinity': #Check for infinity keyword. 'NaN' will be NaN anyway.\n return Js(sign * float('inf'))\n try: #decimal try\n num = sign * float(s) # Converted\n except ValueError:\n return NaN # could not convert to decimal > return NaN\n return Js(num)\n else: #object - most likely it will be NaN.\n return self.to_primitive('Number').to_number()\n\n def to_string(self):\n typ = Type(self)\n if typ == 'Null':\n return Js('null')\n elif typ == 'Undefined':\n return Js('undefined')\n elif typ == 'Boolean':\n return Js('true') if self.value else Js('false')\n elif typ == 'Number': #or self.Class=='Number':\n return Js(unicode(js_dtoa(self.value)))\n elif typ == 'String':\n return self\n else: #object\n return self.to_primitive('String').to_string()\n\n def to_object(self):\n typ = self.TYPE\n if typ == 'Null' or typ == 'Undefined':\n raise MakeError('TypeError',\n 'undefined or null can\\'t be converted to object')\n elif typ == 'Boolean': # Unsure here... todo repair here\n return Boolean.create(self)\n elif typ == 'Number': #?\n return Number.create(self)\n elif typ == 'String': #?\n return String.create(self)\n else: #object\n return self\n\n def to_int32(self):\n num = self.to_number()\n if num.is_nan() or num.is_infinity():\n return 0\n int32 = int(num.value) % 2**32\n return int(int32 - 2**32 if int32 >= 2**31 else int32)\n\n def strict_equality_comparison(self, other):\n return PyJsStrictEq(self, other)\n\n def cok(self):\n \"\"\"Check object coercible\"\"\"\n if self.Class in ('Undefined', 'Null'):\n raise MakeError('TypeError',\n 'undefined or null can\\'t be converted to object')\n\n def to_int(self):\n num = self.to_number()\n if num.is_nan():\n return 0\n elif num.is_infinity():\n return 10**20 if num.value > 0 else -10**20\n return int(num.value)\n\n def to_uint32(self):\n num = self.to_number()\n if num.is_nan() or num.is_infinity():\n return 0\n return int(num.value) % 2**32\n\n def to_uint16(self):\n num = self.to_number()\n if num.is_nan() or num.is_infinity():\n return 0\n return int(num.value) % 2**16\n\n def to_int16(self):\n num = self.to_number()\n if num.is_nan() or num.is_infinity():\n return 0\n int16 = int(num.value) % 2**16\n return int(int16 - 2**16 if int16 >= 2**15 else int16)\n\n def same_as(self, other):\n typ = Type(self)\n if typ != other.Class:\n return False\n if typ == 'Undefined' or typ == 'Null':\n return True\n if typ == 'Boolean' or typ == 'Number' or typ == 'String':\n return self.value == other.value\n else: #object\n return self is other #Id compare.\n\n #Not to be used by translation (only internal use)\n def __getitem__(self, item):\n return self.get(\n str(item) if not isinstance(item, PyJs) else item.to_string().\n value)\n\n def __setitem__(self, item, value):\n self.put(\n str(item) if not isinstance(item, PyJs) else\n item.to_string().value, Js(value))\n\n def __len__(self):\n try:\n return self.get('length').to_uint32()\n except:\n raise TypeError(\n 'This object (%s) does not have length property' % self.Class)\n\n #Oprators-------------\n #Unary, other will be implemented as functions. Increments and decrements\n # will be methods of Number class\n def __neg__(self): #-u\n return Js(-self.to_number().value)\n\n def __pos__(self): #+u\n return self.to_number()\n\n def __invert__(self): #~u\n return Js(Js(~self.to_int32()).to_int32())\n\n def neg(self): # !u cant do 'not u' :(\n return Js(not self.to_boolean().value)\n\n def __nonzero__(self):\n return self.to_boolean().value\n\n def __bool__(self):\n return self.to_boolean().value\n\n def typeof(self):\n if self.is_callable():\n return Js('function')\n typ = Type(self).lower()\n if typ == 'null':\n typ = 'object'\n return Js(typ)\n\n #Bitwise operators\n # <<, >>, &, ^, |\n\n # <<\n def __lshift__(self, other):\n lnum = self.to_int32()\n rnum = other.to_uint32()\n shiftCount = rnum & 0x1F\n return Js(Js(lnum << shiftCount).to_int32())\n\n # >>\n def __rshift__(self, other):\n lnum = self.to_int32()\n rnum = other.to_uint32()\n shiftCount = rnum & 0x1F\n return Js(Js(lnum >> shiftCount).to_int32())\n\n # >>>\n def pyjs_bshift(self, other):\n lnum = self.to_uint32()\n rnum = other.to_uint32()\n shiftCount = rnum & 0x1F\n return Js(Js(lnum >> shiftCount).to_uint32())\n\n # &\n def __and__(self, other):\n lnum = self.to_int32()\n rnum = other.to_int32()\n return Js(Js(lnum & rnum).to_int32())\n\n # ^\n def __xor__(self, other):\n lnum = self.to_int32()\n rnum = other.to_int32()\n return Js(Js(lnum ^ rnum).to_int32())\n\n # |\n def __or__(self, other):\n lnum = self.to_int32()\n rnum = other.to_int32()\n return Js(Js(lnum | rnum).to_int32())\n\n # Additive operators\n # + and - are implemented here\n\n # +\n def __add__(self, other):\n a = self.to_primitive()\n b = other.to_primitive()\n if a.TYPE == 'String' or b.TYPE == 'String':\n return Js(a.to_string().value + b.to_string().value)\n a = a.to_number()\n b = b.to_number()\n return Js(a.value + b.value)\n\n # -\n def __sub__(self, other):\n return Js(self.to_number().value - other.to_number().value)\n\n #Multiplicative operators\n # *, / and % are implemented here\n\n # *\n def __mul__(self, other):\n return Js(self.to_number().value * other.to_number().value)\n\n # /\n def __div__(self, other):\n a = self.to_number().value\n b = other.to_number().value\n if b:\n return Js(a / b)\n if not a or a != a:\n return NaN\n return Infinity if a > 0 else -Infinity\n\n # %\n def __mod__(self, other):\n a = self.to_number().value\n b = other.to_number().value\n if abs(a) == float('inf') or not b:\n return NaN\n if abs(b) == float('inf'):\n return Js(a)\n pyres = Js(a % b) #different signs in python and javascript\n #python has the same sign as b and js has the same\n #sign as a.\n if a < 0 and pyres.value > 0:\n pyres.value -= abs(b)\n elif a > 0 and pyres.value < 0:\n pyres.value += abs(b)\n return Js(pyres)\n\n #Comparisons (I dont implement === and !== here, these\n # will be implemented as external functions later)\n # <, <=, !=, ==, >=, > are implemented here.\n\n def abstract_relational_comparison(self, other, self_first=True):\n ''' self<other if self_first else other<self.\n Returns the result of the question: is self smaller than other?\n in case self_first is false it returns the answer of:\n is other smaller than self.\n result is PyJs type: bool or undefined'''\n px = self.to_primitive('Number')\n py = other.to_primitive('Number')\n if not self_first: #reverse order\n px, py = py, px\n if not (px.Class == 'String' and py.Class == 'String'):\n px, py = px.to_number(), py.to_number()\n if px.is_nan() or py.is_nan():\n return undefined\n return Js(px.value < py.value) # same cmp algorithm\n else:\n # I am pretty sure that python has the same\n # string cmp algorithm but I have to confirm it\n return Js(px.value < py.value)\n\n #<\n def __lt__(self, other):\n res = self.abstract_relational_comparison(other, True)\n if res.is_undefined():\n return false\n return res\n\n #<=\n def __le__(self, other):\n res = self.abstract_relational_comparison(other, False)\n if res.is_undefined():\n return false\n return res.neg()\n\n #>=\n def __ge__(self, other):\n res = self.abstract_relational_comparison(other, True)\n if res.is_undefined():\n return false\n return res.neg()\n\n #>\n def __gt__(self, other):\n res = self.abstract_relational_comparison(other, False)\n if res.is_undefined():\n return false\n return res\n\n def abstract_equality_comparison(self, other):\n ''' returns the result of JS == compare.\n result is PyJs type: bool'''\n tx, ty = self.TYPE, other.TYPE\n if tx == ty:\n if tx == 'Undefined' or tx == 'Null':\n return true\n if tx == 'Number' or tx == 'String' or tx == 'Boolean':\n return Js(self.value == other.value)\n return Js(self is other) # Object\n elif (tx == 'Undefined' and ty == 'Null') or (ty == 'Undefined'\n and tx == 'Null'):\n return true\n elif tx == 'Number' and ty == 'String':\n return self.abstract_equality_comparison(other.to_number())\n elif tx == 'String' and ty == 'Number':\n return self.to_number().abstract_equality_comparison(other)\n elif tx == 'Boolean':\n return self.to_number().abstract_equality_comparison(other)\n elif ty == 'Boolean':\n return self.abstract_equality_comparison(other.to_number())\n elif (tx == 'String' or tx == 'Number') and other.is_object():\n return self.abstract_equality_comparison(other.to_primitive())\n elif (ty == 'String' or ty == 'Number') and self.is_object():\n return self.to_primitive().abstract_equality_comparison(other)\n else:\n return false\n\n #==\n def __eq__(self, other):\n return self.abstract_equality_comparison(other)\n\n #!=\n def __ne__(self, other):\n return self.abstract_equality_comparison(other).neg()\n\n #Other methods (instanceof)\n\n def instanceof(self, other):\n '''checks if self is instance of other'''\n if not hasattr(other, 'has_instance'):\n return false\n return other.has_instance(self)\n\n #iteration\n def __iter__(self):\n #Returns a generator of all own enumerable properties\n # since the size od self.own can change we need to use different method of iteration.\n # SLOW! New items will NOT show up.\n returned = {}\n if not self.IS_CHILD_SCOPE:\n cands = sorted(\n name for name in self.own if self.own[name]['enumerable'])\n else:\n cands = sorted(name for name in self.own)\n for cand in cands:\n check = self.own.get(cand)\n if check and check['enumerable']:\n yield Js(cand)\n\n def contains(self, other):\n if not self.is_object():\n raise MakeError(\n 'TypeError',\n \"You can\\'t use 'in' operator to search in non-objects\")\n return Js(self.has_property(other.to_string().value))\n\n #Other Special methods\n def __call__(self, *args):\n '''Call a property prop as a function (this will be global object).\n\n NOTE: dont pass this and arguments here, these will be added\n automatically!'''\n if not self.is_callable():\n raise MakeError('TypeError',\n '%s is not a function' % self.typeof())\n return self.call(self.GlobalObject, args)\n\n def create(self, *args):\n '''Generally not a constructor, raise an error'''\n raise MakeError('TypeError', '%s is not a constructor' % self.Class)\n\n def __unicode__(self):\n return self.to_string().value\n\n def __repr__(self):\n if self.Class == 'Object':\n res = []\n for e in self:\n res.append(str_repr(e.value) + ': ' + str_repr(self.get(e)))\n return '{%s}' % ', '.join(res)\n elif self.Class == 'String':\n return str_repr(self.value)\n elif self.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array'\n ]:\n res = []\n for e in self:\n res.append(repr(self.get(e)))\n return '[%s]' % ', '.join(res)\n else:\n val = str_repr(self.to_string().value)\n return val\n\n def _fuck_python3(\n self\n ): # hack to make object hashable in python 3 (__eq__ causes problems)\n return object.__hash__(self)\n\n def callprop(self, prop, *args):\n '''Call a property prop as a method (this will be self).\n\n NOTE: dont pass this and arguments here, these will be added\n automatically!'''\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n cand = self.get(prop)\n if not cand.is_callable():\n raise MakeError('TypeError',\n '%s is not a function (tried calling property %s of %s)' % (\n cand.typeof(), repr(prop), repr(self.Class)))\n return cand.call(self, args)\n\n def to_python(self):\n \"\"\"returns equivalent python object.\n for example if this object is javascript array then this method will return equivalent python array\"\"\"\n return to_python(self)\n\n def to_py(self):\n \"\"\"returns equivalent python object.\n for example if this object is javascript array then this method will return equivalent python array\"\"\"\n return self.to_python()\n\n\nif six.PY3:\n PyJs.__hash__ = PyJs._fuck_python3\n PyJs.__truediv__ = PyJs.__div__\n#Define some more classes representing operators:\n\n\ndef PyJsStrictEq(a, b):\n '''a===b'''\n tx, ty = Type(a), Type(b)\n if tx != ty:\n return false\n if tx == 'Undefined' or tx == 'Null':\n return true\n if a.is_primitive(): #string bool and number case\n return Js(a.value == b.value)\n if a.Class == b.Class == 'PyObjectWrapper':\n return Js(a.obj == b.obj)\n return Js(a is b) # object comparison\n\n\ndef PyJsStrictNeq(a, b):\n ''' a!==b'''\n return PyJsStrictEq(a, b).neg()\n\n\ndef PyJsBshift(a, b):\n \"\"\"a>>>b\"\"\"\n return a.pyjs_bshift(b)\n\n\ndef PyJsComma(a, b):\n return b\n\n\nfrom .internals.simplex import JsException as PyJsException, js_dtoa\nimport pyjsparser\npyjsparser.parser.ENABLE_JS2PY_ERRORS = lambda msg: MakeError('SyntaxError', msg)\n\n\nclass PyJsSwitchException(Exception):\n pass\n\n\nPyJs.MakeError = staticmethod(MakeError)\n\n\ndef JsToPyException(js):\n temp = PyJsException()\n temp.mes = js\n return temp\n\n\ndef PyExceptionToJs(py):\n return py.mes\n\n\n#Scope class it will hold all the variables accessible to user\nclass Scope(PyJs):\n Class = 'global'\n extensible = True\n IS_CHILD_SCOPE = True\n\n # todo speed up\n # in order to speed up this very important class the top scope should behave differently than\n # child scopes, child scope should not have this property descriptor thing because they cant be changed anyway\n # they are all confugurable= False\n\n def __init__(self, scope, closure=None):\n \"\"\"Doc\"\"\"\n self.prototype = closure\n if closure is None:\n # global, top level scope\n self.own = {}\n for k, v in six.iteritems(scope):\n # set all the global items\n self.define_own_property(\n k, {\n 'value': v,\n 'configurable': False,\n 'writable': False,\n 'enumerable': False\n })\n else:\n # not global, less powerful but faster closure.\n self.own = scope # simple dictionary which maps name directly to js object.\n\n def register(self, lval):\n # registered keeps only global registered variables\n if self.prototype is None:\n # define in global scope\n if lval in self.own:\n self.own[lval]['configurable'] = False\n else:\n self.define_own_property(\n lval, {\n 'value': undefined,\n 'configurable': False,\n 'writable': True,\n 'enumerable': True\n })\n elif lval not in self.own:\n # define in local scope since it has not been defined yet\n self.own[lval] = undefined # default value\n\n def registers(self, lvals):\n \"\"\"register multiple variables\"\"\"\n for lval in lvals:\n self.register(lval)\n\n def put(self, lval, val, op=None):\n if self.prototype is None:\n # global scope put, simple\n return PyJs.put(self, lval, val, op)\n else:\n # trying to put in local scope\n # we dont know yet in which scope we should place this var\n if lval in self.own:\n if op: # increment operation\n val = getattr(self.own[lval], OP_METHODS[op])(val)\n self.own[lval] = val\n return val\n else:\n #try to put in the lower scope since we cant put in this one (var wasn't registered)\n return self.prototype.put(lval, val, op)\n\n def force_own_put(self, prop, val, configurable=False):\n if self.prototype is None: # global scope\n self.own[prop] = {\n 'value': val,\n 'writable': True,\n 'enumerable': True,\n 'configurable': configurable\n }\n else:\n self.own[prop] = val\n\n def get(self, prop, throw=True):\n #note prop is always a Py String\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n if self.prototype is not None:\n # fast local scope\n cand = self.own.get(prop)\n if cand is None:\n return self.prototype.get(prop, throw)\n return cand\n # slow, global scope\n if prop not in self.own:\n if throw:\n raise MakeError('ReferenceError', '%s is not defined' % prop)\n return undefined\n return PyJs.get(self, prop)\n\n def delete(self, lval):\n if self.prototype is not None:\n if lval in self.own:\n return false\n return self.prototype.delete(lval)\n # we are in global scope here. Must exist and be configurable to delete\n if lval not in self.own:\n # this lval does not exist, why do you want to delete it???\n return true\n if self.own[lval]['configurable']:\n del self.own[lval]\n return true\n # not configurable, cant delete\n return false\n\n def pyimport(self, name, module):\n self.register(name)\n self.put(name, py_wrap(module))\n\n def __repr__(self):\n return u'[Object Global]'\n\n def to_python(self):\n return to_python(self)\n\n\nclass This(Scope):\n IS_CHILD_SCOPE = False\n\n def get(self, prop, throw=False):\n return Scope.get(self, prop, throw)\n\n\nclass JsObjectWrapper(object):\n def __init__(self, obj):\n self.__dict__['_obj'] = obj\n\n def __call__(self, *args):\n args = tuple(Js(e) for e in args)\n if '_prop_of' in self.__dict__:\n parent, meth = self.__dict__['_prop_of']\n return to_python(parent._obj.callprop(meth, *args))\n return to_python(self._obj(*args))\n\n def __getattr__(self, item):\n if item == 'new' and self._obj.is_callable():\n # return instance initializer\n def PyJsInstanceInit(*args):\n args = tuple(Js(e) for e in args)\n return self._obj.create(*args).to_python()\n\n return PyJsInstanceInit\n cand = to_python(self._obj.get(str(item)))\n # handling method calling... obj.meth(). Value of this in meth should be self\n if isinstance(cand, self.__class__):\n cand.__dict__['_prop_of'] = self, str(item)\n return cand\n\n def __setattr__(self, item, value):\n self._obj.put(str(item), Js(value))\n\n def __getitem__(self, item):\n cand = to_python(self._obj.get(str(item)))\n if isinstance(cand, self.__class__):\n cand.__dict__['_prop_of'] = self, str(item)\n return cand\n\n def __setitem__(self, item, value):\n self._obj.put(str(item), Js(value))\n\n def __iter__(self):\n if self._obj.Class in [\n 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',\n 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array'\n ]:\n return iter(self.to_list())\n elif self._obj.Class == 'Object':\n return iter(self.to_dict())\n else:\n raise MakeError('TypeError',\n '%s is not iterable in Python' % self._obj.Class)\n\n def __repr__(self):\n if self._obj.is_primitive() or self._obj.is_callable():\n return repr(self._obj)\n elif self._obj.Class in ('Array', 'Int8Array', 'Uint8Array',\n 'Uint8ClampedArray', 'Int16Array',\n 'Uint16Array', 'Int32Array', 'Uint32Array',\n 'Float32Array', 'Float64Array', 'Arguments'):\n return repr(self.to_list())\n return repr(self.to_dict())\n\n def __len__(self):\n return len(self._obj)\n\n def __nonzero__(self):\n return bool(self._obj)\n\n def __bool__(self):\n return bool(self._obj)\n\n def to_dict(self):\n return to_dict(self.__dict__['_obj'])\n\n def to_list(self):\n return to_list(self.__dict__['_obj'])\n\n\nclass PyObjectWrapper(PyJs):\n Class = 'PyObjectWrapper'\n\n def __init__(self, obj):\n self.obj = obj\n\n def get(self, prop):\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n try:\n if prop.isdigit():\n return py_wrap(self.obj[int(prop)])\n return py_wrap(getattr(self.obj, prop))\n except:\n return undefined\n\n def put(self, prop, val, op=None, throw=False):\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n try:\n if isinstance(op, bool):\n raise ValueError(\"Op must be a string\")\n elif op is not None:\n if op: # increment operation\n val = getattr(self.get(prop), OP_METHODS[op])(val)\n setattr(self.obj, prop, to_python(val))\n except AttributeError:\n raise MakeError('TypeError', 'Read only object probably...')\n return val\n\n def __call__(self, *args):\n py_args = tuple(to_python(e) for e in args)\n try:\n py_res = self.obj.__call__(*py_args)\n except Exception as e:\n message = 'your Python function failed! '\n try:\n message += str(e)\n except:\n pass\n raise MakeError('Error', message)\n return py_wrap(py_res)\n\n def callprop(self, prop, *args):\n py_args = tuple(to_python(e) for e in args)\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n return self.get(prop)(*py_args)\n\n def delete(self, prop):\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n try:\n if prop.isdigit():\n del self.obj[int(prop)]\n else:\n delattr(self.obj, prop)\n return true\n except:\n return false\n\n def __repr__(self):\n return 'PyObjectWrapper(%s)' % str(self.obj)\n\n def to_python(self):\n return self.obj\n\n def to_py(self):\n return self.obj\n\n\ndef py_wrap(py):\n if isinstance(py, (FunctionType, BuiltinFunctionType, MethodType,\n BuiltinMethodType, dict, int, str, bool, float, list,\n tuple, long, basestring)) or py is None:\n return HJs(py)\n return PyObjectWrapper(py)\n\n\n##############################################################################\n#Define types\n\n\n#Object\nclass PyJsObject(PyJs):\n Class = 'Object'\n\n def __init__(self, prop_descs={}, prototype=None, extensible=True):\n self.prototype = prototype\n self.extensible = extensible\n self.own = {}\n for prop, desc in six.iteritems(prop_descs):\n self.define_own_property(prop, desc)\n\n def __repr__(self):\n return repr(self.to_python().to_dict())\n\n\nObjectPrototype = PyJsObject()\n\n\n#Function\nclass PyJsFunction(PyJs):\n Class = 'Function'\n\n def __init__(self, func, prototype=None, extensible=True, source=None):\n cand = fix_js_args(func)\n has_scope = cand is func\n func = cand\n self.argcount = six.get_function_code(func).co_argcount - 2 - has_scope\n self.code = func\n self.source = source if source else '{ [python code] }'\n self.func_name = func.__name__ if not func.__name__.startswith(\n 'PyJs_anonymous') else ''\n self.extensible = extensible\n self.prototype = prototype\n self.own = {}\n #set own property length to the number of arguments\n self.define_own_property(\n 'length', {\n 'value': Js(self.argcount),\n 'writable': False,\n 'enumerable': False,\n 'configurable': False\n })\n\n if self.func_name:\n self.define_own_property(\n 'name', {\n 'value': Js(self.func_name),\n 'writable': False,\n 'enumerable': False,\n 'configurable': True\n })\n\n # set own prototype\n proto = Js({})\n # constructor points to this function\n proto.define_own_property(\n 'constructor', {\n 'value': self,\n 'writable': True,\n 'enumerable': False,\n 'configurable': True\n })\n self.define_own_property(\n 'prototype', {\n 'value': proto,\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n })\n\n def _set_name(self, name):\n '''name is py type'''\n if self.own.get('name'):\n self.func_name = name\n self.own['name']['value'] = Js(name)\n\n def construct(self, *args):\n proto = self.get('prototype')\n if not proto.is_object(): # set to standard prototype\n proto = ObjectPrototype\n obj = PyJsObject(prototype=proto)\n cand = self.call(obj, *args)\n return cand if cand.is_object() else obj\n\n def call(self, this, args=()):\n '''Calls this function and returns a result\n (converted to PyJs type so func can return python types)\n\n this must be a PyJs object and args must be a python tuple of PyJs objects.\n\n arguments object is passed automatically and will be equal to Js(args)\n (tuple converted to arguments object).You dont need to worry about number\n of arguments you provide if you supply less then missing ones will be set\n to undefined (but not present in arguments object).\n And if you supply too much then excess will not be passed\n (but they will be present in arguments object).\n '''\n if not hasattr(args, '__iter__'): #get rid of it later\n args = (args, )\n args = tuple(Js(e) for e in args) # this wont be needed later\n\n arguments = PyJsArguments(\n args, self) # tuple will be converted to arguments object.\n arglen = self.argcount #function expects this number of args.\n if len(args) > arglen:\n args = args[0:arglen]\n elif len(args) < arglen:\n args += (undefined, ) * (arglen - len(args))\n args += this, arguments #append extra params to the arg list\n try:\n return Js(self.code(*args))\n except NotImplementedError:\n raise\n except RuntimeError as e: # maximum recursion\n try:\n msg = e.message\n except:\n msg = repr(e)\n raise MakeError('RangeError', msg)\n\n def has_instance(self, other):\n # I am not sure here so instanceof may not work lol.\n if not other.is_object():\n return false\n proto = self.get('prototype')\n if not proto.is_object():\n raise TypeError(\n 'Function has non-object prototype in instanceof check')\n while True:\n other = other.prototype\n if not other: # todo make sure that the condition is not None or null\n return false\n if other is proto:\n return true\n\n def create(self, *args):\n proto = self.get('prototype')\n if not proto.is_object():\n proto = ObjectPrototype\n new = PyJsObject(prototype=proto)\n res = self.call(new, args)\n if res.is_object():\n return res\n return new\n\n\nclass PyJsBoundFunction(PyJsFunction):\n def __init__(self, target, bound_this, bound_args):\n self.target = target\n self.bound_this = bound_this\n self.bound_args = bound_args\n self.argcount = target.argcount\n self.code = target.code\n self.source = target.source\n self.func_name = target.func_name\n self.extensible = True\n self.prototype = FunctionPrototype\n self.own = {}\n # set own property length to the number of arguments\n self.define_own_property(\n 'length', {\n 'value': target.get('length') - Js(len(self.bound_args)),\n 'writable': False,\n 'enumerable': False,\n 'configurable': False\n })\n\n if self.func_name:\n self.define_own_property(\n 'name', {\n 'value': Js(self.func_name),\n 'writable': False,\n 'enumerable': False,\n 'configurable': True\n })\n\n # set own prototype\n proto = Js({})\n # constructor points to this function\n proto.define_own_property(\n 'constructor', {\n 'value': self,\n 'writable': True,\n 'enumerable': False,\n 'configurable': True\n })\n self.define_own_property(\n 'prototype', {\n 'value': proto,\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n })\n\n def call(self, this, args=()):\n return self.target.call(self.bound_this, self.bound_args + args)\n\n def has_instance(self, other):\n return self.target.has_instance(other)\n\n\nPyJs.PyJsBoundFunction = PyJsBoundFunction\n\nOP_METHODS = {\n '*': '__mul__',\n '/': '__div__',\n '%': '__mod__',\n '+': '__add__',\n '-': '__sub__',\n '<<': '__lshift__',\n '>>': '__rshift__',\n '&': '__and__',\n '^': '__xor__',\n '|': '__or__',\n '>>>': 'pyjs_bshift'\n}\n\n\ndef Empty():\n return Js(None)\n\n\n#Number\nclass PyJsNumber(PyJs): #Note i dont implement +0 and -0. Just 0.\n TYPE = 'Number'\n Class = 'Number'\n\n\nNumberPrototype = PyJsObject({}, ObjectPrototype)\nNumberPrototype.Class = 'Number'\nNumberPrototype.value = 0\n\nInfinity = PyJsNumber(float('inf'), NumberPrototype)\nNaN = PyJsNumber(float('nan'), NumberPrototype)\nPyJs.NaN = NaN\nPyJs.Infinity = Infinity\n\n# This dict aims to increase speed of string creation by storing character instances\nCHAR_BANK = {}\nNUM_BANK = {}\nPyJs.CHAR_BANK = CHAR_BANK\n\n\n#String\n# Different than implementation design in order to improve performance\n#for example I dont create separate property for each character in string, it would take ages.\nclass PyJsString(PyJs):\n TYPE = 'String'\n Class = 'String'\n extensible = False\n\n def __init__(self, value=None, prototype=None):\n '''Constructor for Number String and Boolean'''\n if not isinstance(value, basestring):\n raise TypeError # this will be internal error\n self.value = value\n self.prototype = prototype\n self.own = {}\n # this should be optimized because its mych slower than python str creation (about 50 times!)\n # Dont create separate properties for every index. Just\n self.own['length'] = {\n 'value': Js(len(value)),\n 'writable': False,\n 'enumerable': False,\n 'configurable': False\n }\n if len(value) == 1:\n CHAR_BANK[value] = self #, 'writable': False,\n # 'enumerable': True, 'configurable': False}\n\n def get(self, prop):\n if not isinstance(prop, basestring):\n prop = prop.to_string().value\n try:\n index = int(prop)\n if index < 0:\n return undefined\n char = self.value[index]\n if char not in CHAR_BANK:\n Js(char) # this will add char to CHAR BANK\n return CHAR_BANK[char]\n except Exception:\n pass\n return PyJs.get(self, prop)\n\n def can_put(self, prop):\n return False\n\n def __iter__(self):\n for i in xrange(len(self.value)):\n yield Js(i) # maybe create an int bank?\n\n\nStringPrototype = PyJsObject({}, ObjectPrototype)\nStringPrototype.Class = 'String'\nStringPrototype.value = ''\n\nCHAR_BANK[''] = Js('')\n\n\n#Boolean\nclass PyJsBoolean(PyJs):\n TYPE = 'Boolean'\n Class = 'Boolean'\n\n\nBooleanPrototype = PyJsObject({}, ObjectPrototype)\nBooleanPrototype.Class = 'Boolean'\nBooleanPrototype.value = False\n\ntrue = PyJsBoolean(True, BooleanPrototype)\nfalse = PyJsBoolean(False, BooleanPrototype)\n\n\n#Undefined\nclass PyJsUndefined(PyJs):\n TYPE = 'Undefined'\n Class = 'Undefined'\n\n def __init__(self):\n pass\n\n\nundefined = PyJsUndefined()\n\n\n#Null\nclass PyJsNull(PyJs):\n TYPE = 'Null'\n Class = 'Null'\n\n def __init__(self):\n pass\n\n\nnull = PyJsNull()\nPyJs.null = null\n\n\nclass PyJsArray(PyJs):\n Class = 'Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsArrayBuffer(PyJs):\n Class = 'ArrayBuffer'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsInt8Array(PyJs):\n Class = 'Int8Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsUint8Array(PyJs):\n Class = 'Uint8Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsUint8ClampedArray(PyJs):\n Class = 'Uint8ClampedArray'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsInt16Array(PyJs):\n Class = 'Int16Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsUint16Array(PyJs):\n Class = 'Uint16Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsInt32Array(PyJs):\n Class = 'Int32Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsUint32Array(PyJs):\n Class = 'Uint32Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsFloat32Array(PyJs):\n Class = 'Float32Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nclass PyJsFloat64Array(PyJs):\n Class = 'Float64Array'\n\n def __init__(self, arr=[], prototype=None):\n self.extensible = True\n self.prototype = prototype\n self.own = {\n 'length': {\n 'value': Js(0),\n 'writable': True,\n 'enumerable': False,\n 'configurable': False\n }\n }\n\n for i, e in enumerate(arr):\n self.define_own_property(\n str(i), {\n 'value': Js(e),\n 'writable': True,\n 'enumerable': True,\n 'configurable': True\n })\n\n def define_own_property(self, prop, desc):\n old_len_desc = self.get_own_property('length')\n old_len = old_len_desc[\n 'value'].value # value is js type so convert to py.\n if prop == 'length':\n if 'value' not in desc:\n return PyJs.define_own_property(self, prop, desc)\n new_len = desc['value'].to_uint32()\n if new_len != desc['value'].to_number().value:\n raise MakeError('RangeError', 'Invalid range!')\n new_desc = dict((k, v) for k, v in six.iteritems(desc))\n new_desc['value'] = Js(new_len)\n if new_len >= old_len:\n return PyJs.define_own_property(self, prop, new_desc)\n if not old_len_desc['writable']:\n return False\n if 'writable' not in new_desc or new_desc['writable'] == True:\n new_writable = True\n else:\n new_writable = False\n new_desc['writable'] = True\n if not PyJs.define_own_property(self, prop, new_desc):\n return False\n if new_len < old_len:\n # not very efficient for sparse arrays, so using different method for sparse:\n if old_len > 30 * len(self.own):\n for ele in self.own.keys():\n if ele.isdigit() and int(ele) >= new_len:\n if not self.delete(\n ele\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n old_len = new_len\n else: # standard method\n while new_len < old_len:\n old_len -= 1\n if not self.delete(\n str(int(old_len))\n ): # if failed to delete set len to current len and reject.\n new_desc['value'] = Js(old_len + 1)\n if not new_writable:\n new_desc['writable'] = False\n PyJs.define_own_property(self, prop, new_desc)\n return False\n if not new_writable:\n self.own['length']['writable'] = False\n return True\n elif prop.isdigit():\n index = int(int(prop) % 2**32)\n if index >= old_len and not old_len_desc['writable']:\n return False\n if not PyJs.define_own_property(self, prop, desc):\n return False\n if index >= old_len:\n old_len_desc['value'] = Js(index + 1)\n return True\n else:\n return PyJs.define_own_property(self, prop, desc)\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n def __repr__(self):\n return repr(self.to_python().to_list())\n\n\nArrayPrototype = PyJsArray([], ObjectPrototype)\n\nArrayBufferPrototype = PyJsArrayBuffer([], ObjectPrototype)\n\nInt8ArrayPrototype = PyJsInt8Array([], ObjectPrototype)\n\nUint8ArrayPrototype = PyJsUint8Array([], ObjectPrototype)\n\nUint8ClampedArrayPrototype = PyJsUint8ClampedArray([], ObjectPrototype)\n\nInt16ArrayPrototype = PyJsInt16Array([], ObjectPrototype)\n\nUint16ArrayPrototype = PyJsUint16Array([], ObjectPrototype)\n\nInt32ArrayPrototype = PyJsInt32Array([], ObjectPrototype)\n\nUint32ArrayPrototype = PyJsUint32Array([], ObjectPrototype)\n\nFloat32ArrayPrototype = PyJsFloat32Array([], ObjectPrototype)\n\nFloat64ArrayPrototype = PyJsFloat64Array([], ObjectPrototype)\n\n\nclass PyJsArguments(PyJs):\n Class = 'Arguments'\n\n def __init__(self, args, callee):\n self.own = {}\n self.extensible = True\n self.prototype = ObjectPrototype\n self.define_own_property(\n 'length', {\n 'value': Js(len(args)),\n 'writable': True,\n 'enumerable': False,\n 'configurable': True\n })\n self.define_own_property(\n 'callee', {\n 'value': callee,\n 'writable': True,\n 'enumerable': False,\n 'configurable': True\n })\n for i, e in enumerate(args):\n self.put(str(i), Js(e))\n\n def to_list(self):\n return [\n self.get(str(e)) for e in xrange(self.get('length').to_uint32())\n ]\n\n\n#We can define function proto after number proto because func uses number in its init\nFunctionPrototype = PyJsFunction(Empty, ObjectPrototype)\nFunctionPrototype.own['name']['value'] = Js('')\n\n# I will not rewrite RegExp engine from scratch. I will use re because its much faster.\n# I have to only make sure that I am handling all the differences correctly.\nREGEXP_DB = {}\n\n\nclass PyJsRegExp(PyJs):\n Class = 'RegExp'\n extensible = True\n\n def __init__(self, regexp, prototype=None):\n\n self.prototype = prototype\n self.glob = False\n self.ignore_case = 0\n self.multiline = 0\n # self._cache = {'str':'NoStringEmpty23093',\n # 'iterator': None,\n # 'lastpos': -1,\n # 'matches': {}}\n flags = ''\n if not regexp[-1] == '/':\n #contains some flags (allowed are i, g, m\n spl = regexp.rfind('/')\n flags = set(regexp[spl + 1:])\n self.value = regexp[1:spl]\n if 'g' in flags:\n self.glob = True\n if 'i' in flags:\n self.ignore_case = re.IGNORECASE\n if 'm' in flags:\n self.multiline = re.MULTILINE\n else:\n self.value = regexp[1:-1]\n\n try:\n if self.value in REGEXP_DB:\n self.pat = REGEXP_DB[regexp]\n else:\n comp = 'None'\n # we have to check whether pattern is valid.\n # also this will speed up matching later\n # todo critical fix patter conversion etc. ..!!!!!\n # ugly hacks porting js reg exp to py reg exp works in 99% of cases ;)\n possible_fixes = [(u'[]', u'[\\0]'), (u'[^]', u'[^\\0]'),\n (u'nofix1791', u'nofix1791')]\n reg = self.value\n for fix, rep in possible_fixes:\n comp = REGEXP_CONVERTER._interpret_regexp(reg, flags)\n #print 'reg -> comp', reg, '->', comp\n try:\n self.pat = re.compile(\n comp, self.ignore_case | self.multiline)\n #print reg, '->', comp\n break\n except:\n reg = reg.replace(fix, rep)\n # print 'Fix', fix, '->', rep, '=', reg\n else:\n raise\n REGEXP_DB[regexp] = self.pat\n except:\n #print 'Invalid pattern but fuck it', self.value, comp\n raise MakeError(\n 'SyntaxError',\n 'Invalid RegExp pattern: %s -> %s' % (repr(self.value),\n repr(comp)))\n # now set own properties:\n self.own = {\n 'source': {\n 'value': Js(self.value),\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n },\n 'global': {\n 'value': Js(self.glob),\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n },\n 'ignoreCase': {\n 'value': Js(bool(self.ignore_case)),\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n },\n 'multiline': {\n 'value': Js(bool(self.multiline)),\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n },\n 'lastIndex': {\n 'value': Js(0),\n 'enumerable': False,\n 'writable': True,\n 'configurable': False\n }\n }\n\n def match(self, string, pos):\n '''string is of course py string'''\n return self.pat.match(string, pos) # way easier :)\n # assert 0<=pos <= len(string)\n # if not pos:\n # return re.match(self.pat, string)\n # else:\n # if self._cache['str']==string:\n # if pos>self._cache['lastpos']:\n # for m in self._cache['iterator']:\n # start = m.start()\n # self._cache['lastpos'] = start\n # self._cache['matches'][start] = m\n # if start==pos:\n # return m\n # elif start>pos:\n # return None\n # self._cache['lastpos'] = len(string)\n # return None\n # else:\n # return self._cache['matches'].get(pos)\n # else:\n # self._cache['str'] = string\n # self._cache['matches'] = {}\n # self._cache['lastpos'] = -1\n # self._cache['iterator'] = re.finditer(self.pat, string)\n # return self.match(string, pos)\n\n\ndef JsRegExp(source):\n # Takes regexp literal!\n return PyJsRegExp(source, RegExpPrototype)\n\n\nRegExpPrototype = PyJsRegExp('/(?:)/', ObjectPrototype)\n\n####Exceptions:\ndefault_attrs = {'writable': True, 'enumerable': False, 'configurable': True}\n\n\ndef fill_in_props(obj, props, default_desc):\n for prop, value in props.items():\n default_desc['value'] = Js(value)\n obj.define_own_property(prop, default_desc)\n\n\nclass PyJsError(PyJs):\n Class = 'Error'\n extensible = True\n\n def __init__(self, message=None, prototype=None):\n self.prototype = prototype\n self.own = {}\n if message is not None:\n self.put('message', Js(message).to_string())\n self.own['message']['enumerable'] = False\n\n\nErrorPrototype = PyJsError(Js(''), ObjectPrototype)\n\n\n@Js\ndef Error(message):\n return PyJsError(None if message.is_undefined() else message,\n ErrorPrototype)\n\n\nError.create = Error\nerr = {'name': 'Error', 'constructor': Error}\nfill_in_props(ErrorPrototype, err, default_attrs)\nError.define_own_property(\n 'prototype', {\n 'value': ErrorPrototype,\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n })\n\n\ndef define_error_type(name):\n TypeErrorPrototype = PyJsError(None, ErrorPrototype)\n\n @Js\n def TypeError(message):\n return PyJsError(None if message.is_undefined() else message,\n TypeErrorPrototype)\n\n err = {'name': name, 'constructor': TypeError}\n fill_in_props(TypeErrorPrototype, err, default_attrs)\n TypeError.define_own_property(\n 'prototype', {\n 'value': TypeErrorPrototype,\n 'enumerable': False,\n 'writable': False,\n 'configurable': False\n })\n ERRORS[name] = TypeError\n\n\nERRORS = {'Error': Error}\nERROR_NAMES = ['Eval', 'Type', 'Range', 'Reference', 'Syntax', 'URI']\n\nfor e in ERROR_NAMES:\n define_error_type(e + 'Error')\n\n##############################################################################\n# Import and fill prototypes here.\n\n\n#this works only for data properties\ndef fill_prototype(prototype, Class, attrs, constructor=False):\n for i in dir(Class):\n e = getattr(Class, i)\n if six.PY2:\n if hasattr(e, '__func__'):\n temp = PyJsFunction(e.__func__, FunctionPrototype)\n attrs = dict((k, v) for k, v in attrs.iteritems())\n attrs['value'] = temp\n prototype.define_own_property(i, attrs)\n else:\n if hasattr(e, '__call__') and not i.startswith('__'):\n temp = PyJsFunction(e, FunctionPrototype)\n attrs = dict((k, v) for k, v in attrs.items())\n attrs['value'] = temp\n prototype.define_own_property(i, attrs)\n if constructor:\n attrs['value'] = constructor\n prototype.define_own_property('constructor', attrs)\n\n\nPyJs.undefined = undefined\nPyJs.Js = staticmethod(Js)\n\nfrom .prototypes import jsfunction, jsobject, jsnumber, jsstring, jsboolean, jsarray, jsregexp, jserror, jsarraybuffer, jstypedarray\n\n#Object proto\nfill_prototype(ObjectPrototype, jsobject.ObjectPrototype, default_attrs)\n\n\n#Define __proto__ accessor (this cant be done by fill_prototype since)\n@Js\ndef __proto__():\n return this.prototype if this.prototype is not None else null\n\n\ngetter = __proto__\n\n\n@Js\ndef __proto__(val):\n if val.is_object():\n this.prototype = val\n\n\nsetter = __proto__\nObjectPrototype.define_own_property('__proto__', {\n 'set': setter,\n 'get': getter,\n 'enumerable': False,\n 'configurable': True\n})\n\n#Function proto\nfill_prototype(FunctionPrototype, jsfunction.FunctionPrototype, default_attrs)\n#Number proto\nfill_prototype(NumberPrototype, jsnumber.NumberPrototype, default_attrs)\n#String proto\nfill_prototype(StringPrototype, jsstring.StringPrototype, default_attrs)\n#Boolean proto\nfill_prototype(BooleanPrototype, jsboolean.BooleanPrototype, default_attrs)\n#Array proto\nfill_prototype(ArrayPrototype, jsarray.ArrayPrototype, default_attrs)\n# ArrayBuffer proto\nfill_prototype(ArrayBufferPrototype, jsarraybuffer.ArrayBufferPrototype,\n default_attrs)\n# Int8Array proto\nfill_prototype(Int8ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Uint8Array proto\nfill_prototype(Uint8ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Uint8ClampedArray proto\nfill_prototype(Uint8ClampedArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Int16Array proto\nfill_prototype(Int16ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Uint16Array proto\nfill_prototype(Uint16ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Int32Array proto\nfill_prototype(Int32ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Uint32Array proto\nfill_prototype(Uint32ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Float32Array proto\nfill_prototype(Float32ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n# Float64Array proto\nfill_prototype(Float64ArrayPrototype, jstypedarray.TypedArrayPrototype,\n default_attrs)\n#Error proto\nfill_prototype(ErrorPrototype, jserror.ErrorPrototype, default_attrs)\n#RegExp proto\nfill_prototype(RegExpPrototype, jsregexp.RegExpPrototype, default_attrs)\n# add exec to regexpfunction (cant add it automatically because of its name :(\nRegExpPrototype.own['exec'] = RegExpPrototype.own['exec2']\ndel RegExpPrototype.own['exec2']\n\n#########################################################################\n# Constructors\n\n\n# String\n@Js\ndef String(st):\n if not len(arguments):\n return Js('')\n return arguments[0].to_string()\n\n\n@Js\ndef string_constructor():\n temp = PyJsObject(prototype=StringPrototype)\n temp.Class = 'String'\n #temp.TYPE = 'String'\n if not len(arguments):\n temp.value = ''\n else:\n temp.value = arguments[0].to_string().value\n for i, ch in enumerate(temp.value): # this will make things long...\n temp.own[str(i)] = {\n 'value': Js(ch),\n 'writable': False,\n 'enumerable': True,\n 'configurable': True\n }\n temp.own['length'] = {\n 'value': Js(len(temp.value)),\n 'writable': False,\n 'enumerable': False,\n 'configurable': False\n }\n return temp\n\n\nString.create = string_constructor\n\n# RegExp\nREG_EXP_FLAGS = ('g', 'i', 'm')\n\n\n@Js\ndef RegExp(pattern, flags):\n if pattern.Class == 'RegExp':\n if not flags.is_undefined():\n raise MakeError(\n 'TypeError',\n 'Cannot supply flags when constructing one RegExp from another'\n )\n # return unchanged\n return pattern\n #pattern is not a regexp\n if pattern.is_undefined():\n pattern = ''\n else:\n pattern = pattern.to_string().value\n # try:\n # pattern = REGEXP_CONVERTER._unescape_string(pattern.to_string().value)\n # except:\n # raise MakeError('SyntaxError', 'Invalid regexp')\n flags = flags.to_string().value if not flags.is_undefined() else ''\n for flag in flags:\n if flag not in REG_EXP_FLAGS:\n raise MakeError(\n 'SyntaxError',\n 'Invalid flags supplied to RegExp constructor \"%s\"' % flag)\n if len(set(flags)) != len(flags):\n raise MakeError(\n 'SyntaxError',\n 'Invalid flags supplied to RegExp constructor \"%s\"' % flags)\n pattern = '/%s/' % (pattern if pattern else '(?:)') + flags\n return JsRegExp(pattern)\n\n\nRegExp.create = RegExp\nPyJs.RegExp = RegExp\n\n# Number\n\n\n@Js\ndef Number():\n if len(arguments):\n return arguments[0].to_number()\n else:\n return Js(0)\n\n\n@Js\ndef number_constructor():\n temp = PyJsObject(prototype=NumberPrototype)\n temp.Class = 'Number'\n #temp.TYPE = 'Number'\n if len(arguments):\n temp.value = arguments[0].to_number().value\n else:\n temp.value = 0\n return temp\n\n\nNumber.create = number_constructor\n\n# Boolean\n\n\n@Js\ndef Boolean(value):\n return value.to_boolean()\n\n\n@Js\ndef boolean_constructor(value):\n temp = PyJsObject(prototype=BooleanPrototype)\n temp.Class = 'Boolean'\n #temp.TYPE = 'Boolean'\n temp.value = value.to_boolean().value\n return temp\n\n\nBoolean.create = boolean_constructor\n\n##############################################################################\n\n\ndef appengine(code):\n try:\n return translator.translate_js(code.decode('utf-8'))\n except:\n return traceback.format_exc()\n\n\nbuiltins = ('true', 'false', 'null', 'undefined', 'Infinity', 'NaN')\n\nscope = dict(zip(builtins, [eval(e) for e in builtins]))\n\nJS_BUILTINS = dict((k, v) for k, v in scope.items())\n\n# Fill in NUM_BANK\nfor e in xrange(-2**10, 2**14):\n NUM_BANK[e] = Js(e)\n\nif __name__ == '__main__':\n print(ObjectPrototype.get('toString').callprop('call'))\n print(FunctionPrototype.own)\n a = null - Js(49404)\n x = a.put('ser', Js('der'))\n print(Js(0) or Js('p') and Js(4.0000000000050000001))\n FunctionPrototype.put('Chuj', Js(409))\n for e in FunctionPrototype:\n print('Obk', e.get('__proto__').get('__proto__').get('__proto__'), e)\n import code\n s = Js(4)\n b = Js(6)\n\n s2 = Js(4)\n o = ObjectPrototype\n o.put('x', Js(100))\n var = Scope(scope)\n e = code.InteractiveConsole(globals())\n #e.raw_input = interactor\n e.interact()\n" ]
[ [ "numpy.uint8" ] ]
yuishihara/chainer-bear
[ "e22a009dd1f6a1dcef5bf18d0849b6fa3e7df0db" ]
[ "models/torch_uniform_init.py" ]
[ "from chainer.initializers.uniform import Uniform\nfrom chainer import initializer\n\nimport numpy as np\n\n\nclass HeUniformTorch(initializer.Initializer):\n \"\"\"\n Compute initial parameters with He initialization.\n \"\"\"\n\n def __init__(self, a=np.sqrt(5), dtype=None, **kwargs):\n super(HeUniformTorch, self).__init__(dtype)\n self._a = a\n\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n fan_in, _ = initializer.get_fans(array.shape)\n gain = self._calculate_gain(self._a)\n std = gain / np.sqrt(fan_in)\n bound = np.sqrt(3.0) * std\n Uniform(scale=bound, dtype=self.dtype)(array)\n\n def _calculate_gain(self, a):\n return np.sqrt(2.0 / (1 + a**2))\n\n\nclass LinearBiasInitializerTorch(initializer.Initializer):\n \"\"\"\n Initializer same as pytorch's implementation\n \"\"\"\n\n def __init__(self, fan_in, dtype=None, **kwargs):\n super(LinearBiasInitializerTorch, self).__init__(dtype)\n self._fan_in = fan_in\n\n def __call__(self, array):\n bound = 1.0 / np.sqrt(self._fan_in)\n Uniform(scale=bound, dtype=self.dtype)(array)\n" ]
[ [ "numpy.sqrt" ] ]
CelineQiQi/lingvo
[ "4c6405a3c8b29764918dbfb599212dd7620ccf9c" ]
[ "lingvo/core/predictor_runner_base.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Interface for binaries built around predictor.\n\nTo use: subclass PredictorRunnerBase, implement the InputGenerator and RunBatch\nfunctions, and call Run().\n\nTo run on TPU, set:\n --device_type=tpu\n --xla_device=tpu\n --tf_master=url/to/tpu/server\n --inference_threads=num_tpu_cores\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport concurrent.futures\nimport os\nimport re\nimport threading\nimport time\n\nfrom absl import flags\nfrom lingvo.core import predictor\nfrom lingvo.core import py_utils\nimport tensorflow as tf\n\nflags.DEFINE_string(\n 'checkpoint', None, 'Either a checkpoint file to load,'\n ' or a directory containing multiple checkpoints, where'\n ' the latest checkpoint will be loaded.')\nflags.DEFINE_string(\n 'inference_graph', None, 'Path to an inference graph. '\n 'If not specified, will be inferred from the checkpoint path.')\nflags.DEFINE_string(\n 'inference_subgraph_name', '', 'The name of the inference subgraph to use. '\n 'Defaults to the default subgraph.')\nflags.DEFINE_enum('device_type', 'gpu', ['cpu', 'gpu', 'tpu'], 'Device type.')\nflags.DEFINE_string('tf_master', 'local', 'tf_master for predictor session.')\nflags.DEFINE_integer('inference_threads', '1', 'Number of inference threads.')\nflags.DEFINE_integer('batch_size', 64, 'Batch size.')\nflags.DEFINE_integer(\n 'prediction_step_interval', 3000, 'Number of steps between outputs. '\n 'Only meaningful if FLAGS.checkpoint is a directory.')\n\nflags.DEFINE_integer('max_inputs', 0, 'Only process the first n inputs.')\nflags.DEFINE_list(\n 'input_id_filter', [],\n 'If not empty, only process the input ids in the given list.')\nflags.DEFINE_string(\n 'output_dir', None, 'Output directory. '\n 'If FLAGS.checkpoint is a directory, a subdirectory will be created '\n 'for each checkpoint evaluated.')\nflags.DEFINE_integer(\n 'output_num_shards', 1, 'Each replica generates one shard of output '\n 'according to --output_shard_id.')\nflags.DEFINE_integer(\n 'output_shard_id', 0,\n 'The output shard id in range [0, output_num_shards - 1].')\n\nFLAGS = flags.FLAGS\n\n_RETRY_SLEEP_SECONDS = 10\n\n\nclass PredictorRunnerBase(object):\n \"\"\"Manages state for running predictor.\n\n Abstract class. Subclasses should override `InputGenerator` and `RunBatch`.\n Call `Subclass().Run()` in `main()` function to run.\n \"\"\"\n\n def __init__(self,\n checkpoint,\n output_dir,\n inference_graph=None,\n inference_subgraph_name='',\n device_type='cpu',\n output_num_shards=1,\n output_shard_id=0,\n max_inputs=0,\n input_id_filter=None,\n tf_master='local',\n inference_threads=1,\n batch_size=64,\n prediction_step_interval=3000):\n \"\"\"Constructor.\n\n Args:\n checkpoint: Either a checkpoint file to load, or a directory containing\n multiple checkpoints, where the latest checkpoint will be loaded.\n output_dir: Output directory. If `checkpoint` is a directory, a\n subdirectory will be created for each checkpoint evaluated.\n inference_graph: Path to an inference graph. If not specified, will be\n inferred from the checkpoint path.\n inference_subgraph_name: The name of the inference subgraph to use.\n Defaults to the default subgraph.\n device_type: Device type, either cpu, gpu, or tpu.\n output_num_shards: Each replica generates one shard of output according to\n `output_shard_id`.\n output_shard_id: The output shard id in range `[0, output_num_shards -\n 1]`.\n max_inputs: Only process the first n inputs. 0 means process all inputs.\n input_id_filter: If not empty, only process the input ids in the given\n list.\n tf_master: tf_master for predictor session.\n inference_threads: Number of inference threads.\n batch_size: Batch size.\n prediction_step_interval: Number of steps between outputs. Only meaningful\n if `checkpoint` is a directory.\n \"\"\"\n self._checkpoint = checkpoint\n self._output_dir = output_dir\n self._output_num_shards = output_num_shards\n self._output_shard_id = output_shard_id\n self._max_inputs = max_inputs\n input_id_filter = input_id_filter or []\n self._input_id_filter = [str(x) for x in input_id_filter]\n self._batch_size = batch_size\n self._prediction_step_interval = prediction_step_interval\n\n if device_type == 'tpu' and FLAGS.xla_device != 'tpu':\n raise ValueError('xla_device=tpu should be set with device_type=tpu!')\n\n while True:\n if tf.gfile.IsDirectory(self._checkpoint):\n if tf.train.latest_checkpoint(self._checkpoint):\n break\n elif tf.gfile.Exists(self._checkpoint + '.index'):\n break\n tf.logging.log_first_n(tf.logging.INFO,\n 'Waiting for checkpoint to be available.', 1)\n time.sleep(_RETRY_SLEEP_SECONDS)\n\n # Use saved inference graph.\n if inference_graph:\n self._inference_graph = inference_graph\n else:\n checkpoint_dir = self._checkpoint\n if not tf.gfile.IsDirectory(checkpoint_dir):\n checkpoint_dir = os.path.dirname(checkpoint_dir)\n logdir = os.path.dirname(checkpoint_dir)\n inference_graph_filename = 'inference.pbtxt'\n if device_type == 'tpu':\n inference_graph_filename = 'inference_tpu.pbtxt'\n self._inference_graph = os.path.join(logdir, 'inference_graphs',\n inference_graph_filename)\n self._predictor = predictor.Predictor(\n inference_graph=self._inference_graph,\n subgraph_name=inference_subgraph_name,\n device_type=device_type,\n tf_master=tf_master)\n self._threadpool = concurrent.futures.ThreadPoolExecutor(inference_threads)\n self._locks = [threading.Lock() for _ in range(inference_threads)]\n\n @classmethod\n def FromFlags(cls, **kwargs):\n \"\"\"Constructs an instance of this class from FLAGS.\"\"\"\n return cls(\n checkpoint=FLAGS.checkpoint,\n output_dir=FLAGS.output_dir,\n inference_graph=FLAGS.inference_graph,\n inference_subgraph_name=FLAGS.inference_subgraph_name,\n device_type=FLAGS.device_type,\n output_num_shards=FLAGS.output_num_shards,\n output_shard_id=FLAGS.output_shard_id,\n max_inputs=FLAGS.max_inputs,\n input_id_filter=FLAGS.input_id_filter,\n tf_master=FLAGS.tf_master,\n inference_threads=FLAGS.inference_threads,\n batch_size=FLAGS.batch_size,\n prediction_step_interval=FLAGS.prediction_step_interval,\n **kwargs)\n\n def _ShouldProcessInputId(self, input_id):\n if self._max_inputs > 0 and input_id >= self._max_inputs:\n return False\n if self._input_id_filter and str(input_id) not in self._input_id_filter:\n return False\n return input_id % self._output_num_shards == self._output_shard_id\n\n def _OutputFilename(self, output_dir, name):\n assert self._output_shard_id >= 0\n assert self._output_shard_id < self._output_num_shards\n return '%s-%.5d-of-%.5d' % (os.path.join(\n output_dir, name), self._output_shard_id, self._output_num_shards)\n\n def InputGenerator(self):\n \"\"\"Generator that yields the next input.\n\n Must yield in a deterministic order or raise an exception when\n self._output_num_shards > 1.\n \"\"\"\n raise NotImplementedError('Abstract method.')\n\n def RunBatch(self, output_dir, batch):\n \"\"\"Runs predictor on a single batch of data.\n\n Args:\n output_dir: the output directory.\n batch: a list of (input_id, element) pairs, where element is yielded from\n InputGenerator and input_id is a unique counter starting from 0.\n \"\"\"\n raise NotImplementedError('Abstract method.')\n\n def _PredictOneCheckpoint(self, checkpoint, output_dir):\n \"\"\"Runs predictor.\"\"\"\n tf.logging.info('Processing checkpoint %s.', checkpoint)\n self._predictor.Load(checkpoint)\n\n def LockedRunBatch(batch, batch_id):\n \"\"\"TPU inference runs the i-th batch on the i%num_cores-th core.\n\n Make sure that core is available before scheduling the next batch on it.\n\n Args:\n batch: The input to be passed to RunBatch.\n batch_id: The id of this batch, which determins which core it runs on.\n \"\"\"\n with self._locks[batch_id % len(self._locks)]:\n self.RunBatch(output_dir, batch)\n\n batch_id = 0\n batch = []\n futures = []\n # Iterate through the input and process it one batch at a time.\n for next_id, element in enumerate(self.InputGenerator()):\n if self._ShouldProcessInputId(next_id):\n batch.append((next_id, element))\n if len(batch) == self._batch_size:\n futures.append(\n self._threadpool.submit(LockedRunBatch, batch, batch_id))\n batch_id += 1\n batch = []\n # Last batch.\n if batch:\n futures.append(self._threadpool.submit(LockedRunBatch, batch, batch_id))\n # Wait for completion.\n for f in futures:\n f.result()\n\n def _PredictContinuously(self):\n \"\"\"Waits for new checkpoints and runs predictor continuously.\"\"\"\n prev_step = -1000000\n while True:\n # TODO(jonathanasdf): how to determine when training finished?\n path = tf.train.latest_checkpoint(self._checkpoint)\n step_str = re.search(r'ckpt-(\\d{8})', path).group(1)\n step = int(step_str)\n if step - prev_step >= self._prediction_step_interval:\n output_dir = os.path.join(self._output_dir, 'step_' + step_str)\n tf.gfile.MakeDirs(output_dir)\n self._PredictOneCheckpoint(path, output_dir)\n prev_step = step\n tf.logging.info('Waiting for next checkpoint...')\n time.sleep(_RETRY_SLEEP_SECONDS)\n\n @py_utils.RetryOnTransientTfError()\n def Run(self):\n \"\"\"Monitor checkpoints and runs predictor.\"\"\"\n tf.gfile.MakeDirs(self._output_dir)\n if tf.gfile.IsDirectory(self._checkpoint):\n self._PredictContinuously()\n else:\n self._PredictOneCheckpoint(self._checkpoint, self._output_dir)\n" ]
[ [ "tensorflow.gfile.IsDirectory", "tensorflow.train.latest_checkpoint", "tensorflow.logging.log_first_n", "tensorflow.gfile.Exists", "tensorflow.logging.info", "tensorflow.gfile.MakeDirs" ] ]
manuel-rdz/SGL-Retinal-Vessel-Segmentation
[ "7897d977e77aa0b5d3acb86e0aa74c6829d67415" ]
[ "sgl_labeling/data/common_chase.py" ]
[ "import random\nimport cv2\nimport numpy as np\nimport skimage.color as sc\nfrom PIL import Image, ImageDraw\nimport torch\nimport math\nimport pandas as pd\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom scipy.ndimage.filters import gaussian_filter\n\ndef augment_brightness_camera_images(image):\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n #print(random_bright)\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1\n\ndef transform_image(img, vessel, ang_range,shear_range,trans_range,brightness=0):\n '''\n This function transforms images to generate new images.\n The function takes in following arguments,\n 1- Image\n 2- ang_range: Range of angles for rotation\n 3- shear_range: Range of values to apply affine transform to\n 4- trans_range: Range of values to apply translations over.\n\n A Random uniform distribution is used to generate different parameters for transformation\n\n '''\n # Rotation\n\n ang_rot = np.random.uniform(ang_range)-ang_range/2\n rows,cols,ch = img.shape \n Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)\n\n # Translation\n tr_x = trans_range*np.random.uniform()-trans_range/2\n tr_y = trans_range*np.random.uniform()-trans_range/2\n Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])\n \n # Shear\n pts1 = np.float32([[5,5],[20,5],[5,20]])\n\n pt1 = 5+shear_range*np.random.uniform()-shear_range/2\n pt2 = 20+shear_range*np.random.uniform()-shear_range/2\n\n pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])\n\n shear_M = cv2.getAffineTransform(pts1,pts2)\n \n #Geometric Transformation\n img = cv2.warpAffine(img,Rot_M,(cols,rows))\n img = cv2.warpAffine(img,Trans_M,(cols,rows))\n img = cv2.warpAffine(img,shear_M,(cols,rows))\n \n vessel = cv2.warpAffine(vessel,Rot_M,(cols,rows))\n vessel = cv2.warpAffine(vessel,Trans_M,(cols,rows))\n vessel = cv2.warpAffine(vessel,shear_M,(cols,rows))\n\n if brightness == 1:\n img = augment_brightness_camera_images(img)\n\n return img\n\ndef elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n\n Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5\n \"\"\"\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n \n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)\n\n\ndef pixel_shuffle_ds(im):\n img_shape = im.shape\n im = np.array(im)\n H = img_shape[0]\n W = img_shape[1]\n C = img_shape[2] #total channels\n out = np.zeros((int(H/2), int(W/2), 4*C))\n for i in range(C):\n out_tmp = np.concatenate((np.expand_dims(im[0:H:2, 0:W:2,i], axis=2),\n np.expand_dims(im[0:H:2, 1:W:2,i], axis=2),\n np.expand_dims(im[1:H:2, 1:W:2,i], axis=2),\n np.expand_dims(im[1:H:2, 0:W:2,i], axis=2)), axis=2)\n out[:,:,i*4:i*4+4] = out_tmp\n return out\n\ndef add_AWGN(hr, level):\n w, h = hr.shape[:2]\n gauss = np.zeros((w, h, 3))\n for chn in range(3):\n gauss[:,:,chn] = np.random.normal(0, level, (w, h))\n noisy = hr + gauss\n noisy = np.clip(noisy, 0, 255)\n return noisy\n\n# For inpainting\ndef process(image, opt=2):\n luma = cv2.cvtColor(image[:,:,::-1], cv2.COLOR_BGR2YUV)[:,:,0]\n if opt==1:\n equ = cv2.equalizeHist(luma) \n else:\n clahe = cv2.createCLAHE(clipLimit = 3) \n equ= clahe.apply(luma)\n return equ \n\ndef stage_process(image, mask):\n m = mask.copy()\n m = m[:,:]\n vessel_seg = image * m\n non_vessel = image * (1 - m)\n vessel_seg = vessel_seg * 0.1\n non_vessel = cv2.GaussianBlur(non_vessel,(5,5), 0) \n non_vessel = np.expand_dims(non_vessel, 2)\n return vessel_seg * m + non_vessel * (1 - m)\n\ndef compute_dismap(mask, ind=0.99, dual=False):\n dist = cv2.distanceTransform((mask*255).astype(np.uint8), cv2.DIST_L2, 5)\n expo = np.log(ind) * dist\n dist = np.exp(expo)\n if dual:\n dist2 = cv2.distanceTransform(((1-mask)*255).astype(np.uint8), cv2.DIST_L2, 5)\n expo2 = np.log(ind) * dist2\n dist2 = np.exp(expo2)\n dist = dist * dist2 #dual mode\n\n return dist\n\n\ndef ttsep(ve_map):\n filterSize =(3, 3)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,\n filterSize)\n timage = cv2.erode(ve_map,kernel,iterations = 1) #erase thin vessel\n dia_timage = cv2.dilate(timage,kernel,iterations = 1) #dilate the vessel again\n dia_timage = np.expand_dims(dia_timage, 2)\n thin = ve_map * (ve_map - dia_timage) #thin vessel only\n thick = ve_map - thin #the left thick map\n return thin, thick\n\ndef get_patch(hr, ve_r, patch_size=256, deform = True, train=True, random_toggle=False):\n ve = np.zeros((hr.shape[:2]))\n ve[ve_r > 200] = 1\n ih, iw = hr.shape[:2]\n if train:\n ih, iw = hr.shape[:2]\n ip = patch_size\n ix = random.randrange(0, iw - ip + 1)\n iy = random.randrange(0, ih - ip + 1)\n #crop patch\n hr = hr[iy:iy + ip, ix:ix + ip, :]\n #hr = process(hr)\n ve = ve[iy:iy + ip, ix:ix + ip]\n if deform and np.random.rand() >0.5:#< 0.5: #whether to deform the inputs?\n im_merge = np.concatenate((hr, ve[...,None]), axis=2)\n im_merge_t = elastic_transform(im_merge, im_merge.shape[1] * 3, im_merge.shape[1] * 0.07, im_merge.shape[1] * 0.09)\n # The elaster augmentation after cropping may have some problems\n hr_t = im_merge_t[...,0:3]\n ve_t = im_merge_t[...,3]\n ve_t[ve_t>0.5] = 1 #thresholding\n #hr_t = np.expand_dims(hr_t, 2)\n ve_t = ve_t[..., None]\n else:\n hr_t = hr#np.expand_dims(hr, 2)\n ve_t = np.expand_dims(ve, 2)\n #med = stage_process(hr_t, ve_t)\n #ret = [hr_t, ve_t, med, ma]\n dp = compute_dismap(ve_t, 0.99, True)\n if random_toggle == True:\n toggle_reg = dp.copy()\n toggle_reg[dp>=0.99] = 1\n toggle_reg[dp<0.99] = 0 #select the toggle regions\n a,b = dp.shape\n rand_toggle_map = np.random.rand(a,b)\n rand_toggle_map[rand_toggle_map>=0.3] = 1\n rand_toggle_map[rand_toggle_map<0.3] = 0\n rand_toggle_map *= toggle_reg\n rand_toggle_map = np.expand_dims(rand_toggle_map, 2)\n ve_t[np.logical_and(rand_toggle_map==1, ve_t==1)] = 0 #toggle\n ve_t[np.logical_and(rand_toggle_map==1, ve_t==0)] = 1 #toggle\n ve_thin, ve_thick = ttsep(ve_t)\n ret = [hr_t, ve_t, dp, ve_thin, ve_thick]\n else:\n #if True:\n hr_img = np.zeros((1008, 1008, 3))\n hr_img[0:ih, 0:iw, :] = hr#process(hr)\n ve_img = np.zeros((1008, 1008))\n ve_img[0:ih, 0:iw] = ve\n #hr_img = np.expand_dims(hr_img, 2)\n ve_img = np.expand_dims(ve_img, 2)\n #med = stage_process(hr_img, ve_img)\n dp_img = compute_dismap(ve_img, 0.99, True)\n ve_thin, ve_thick = ttsep(ve_img)\n ret = [hr_img, ve_img, dp_img, ve_thin, ve_thick]\n #ret = [hr_img, ve_img, med, ma_img]\n return ret\n\ndef set_channel(*args, n_channels=3):\n def _set_channel(img):\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n\n c = img.shape[2]\n #if n_channels == 1 and c == 3:\n # img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)\n #elif n_channels == 3 and c == 1:\n # img = np.concatenate([img] * n_channels, 2)\n\n return img\n\n return [_set_channel(a) for a in args]\n\ndef np2Tensor(*args, rgb_range=255, single_test=False):\n def _np2Tensor(img):\n np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))\n if single_test:\n np_transpose = np.expand_dims(np_transpose, 0)\n tensor = torch.from_numpy(np_transpose).float()\n tensor.mul_(rgb_range / 255)\n\n return tensor\n\n return [_np2Tensor(a) for a in args]\n\ndef augment(hr,ve,hflip=True, rot=True):\n #current version does not support augmentation\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n def _augment(hr, ve):\n if hflip: \n hr = hr[:, ::-1, :]\n ve = ve[:, ::-1]\n if vflip: \n hr= hr[::-1, :, :]\n ve= ve[::-1, :]\n if rot90: \n hr = hr.transpose(1, 0, 2)\n ve = ve.transpose(1, 0)\n \n return hr, ve\n\n return _augment(hr, ve)\n\ndef raw_augment(*args):\n '''\n This codes partially borrow from Megvii Paper\n augment the images in all the images inside the batch\n '''\n hflip = random.random() < 0.5\n vflip = random.random() < 0.5\n transpose = random.random() < 0.5\n def flip(bayer):\n if vflip and hflip:\n bayer = bayer[::-1, ::-1]\n bayer = bayer[1:-1, 1:-1]\n elif vflip:\n bayer = bayer[::-1]\n bayer = bayer[1:-1] \n elif hflip:\n bayer = bayer[:, ::-1]\n bayer = bayer[:, 1:-1] \n if transpose:\n bayer = np.transpose(bayer, (1, 0, 2))\n return bayer\n return [flip(a) for a in args] \n\n#generate mask\ndef add_mask(image):\n w, h = image.shape[:2]\n mask = brush_stroke_mask(w, h) #(w, h, 1)\n mask_t = np.tile(mask, (1,1,3))\n result = image * (1- mask)\n return result, mask, image \n \n\n\ndef brush_stroke_mask(W, H):\n \"\"\"Generate random brush and stroke mask.\n Return a mask of (1, 1, W, H)\n partially fork from Jiahui Yu's Codes\n \"\"\"\n min_num_vertex = 4\n max_num_vertex = 12\n mean_angle = 2*math.pi / 5\n angle_range = 2*math.pi / 15\n min_width = 12\n max_width = 40\n def generate_mask(W, H):\n average_radius = math.sqrt(H*H+W*W) / 8\n mask = Image.new('L', (W, H), 0)\n\n for _ in range(np.random.randint(1, 4)):\n num_vertex = np.random.randint(min_num_vertex, max_num_vertex)\n angle_min = mean_angle - np.random.uniform(0, angle_range)\n angle_max = mean_angle + np.random.uniform(0, angle_range)\n angles = []\n vertex = []\n for i in range(num_vertex):\n if i % 2 == 0:\n angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))\n else:\n angles.append(np.random.uniform(angle_min, angle_max))\n\n h, w = mask.size\n vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))\n for i in range(num_vertex):\n r = np.clip(\n np.random.normal(loc=average_radius, scale=average_radius//2),\n 0, 2*average_radius)\n new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)\n new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)\n vertex.append((int(new_x), int(new_y)))\n\n draw = ImageDraw.Draw(mask)\n width = int(np.random.uniform(min_width, max_width))\n draw.line(vertex, fill=1, width=width)\n for v in vertex:\n draw.ellipse((v[0] - width//2,\n v[1] - width//2,\n v[0] + width//2,\n v[1] + width//2),\n fill=1)\n\n if np.random.normal() > 0:\n mask.transpose(Image.FLIP_LEFT_RIGHT)\n if np.random.normal() > 0:\n mask.transpose(Image.FLIP_TOP_BOTTOM)\n mask = np.asarray(mask, np.float32)\n mask = np.reshape(mask, (W, H, 1))\n return mask\n\n return generate_mask(W, H)\n\n\n\n\n\n" ]
[ [ "numpy.random.rand", "numpy.tile", "numpy.exp", "numpy.concatenate", "numpy.random.normal", "numpy.zeros_like", "numpy.log", "numpy.logical_and", "numpy.arange", "numpy.transpose", "numpy.random.randint", "numpy.expand_dims", "numpy.array", "numpy.reshape", "numpy.zeros", "scipy.ndimage.interpolation.map_coordinates", "numpy.float32", "numpy.clip", "numpy.asarray", "numpy.random.RandomState", "torch.from_numpy", "numpy.random.uniform" ] ]
atn832/model-analysis
[ "a96a9665e57a12cb4d3ad1f366dc347347e3a96e" ]
[ "tensorflow_model_analysis/evaluators/aggregate_test.py" ]
[ "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for using the Aggregate API.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport tensorflow as tf\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier\nfrom tensorflow_model_analysis.evaluators import aggregate\nfrom tensorflow_model_analysis.evaluators import poisson_bootstrap\n\n\ndef create_test_input(predict_list, slice_list):\n results = []\n for entry in predict_list:\n for slice_key in slice_list:\n results.append((slice_key, {constants.INPUT_KEY: entry}))\n return results\n\n\nclass AggregateTest(testutil.TensorflowModelAnalysisTest):\n\n def _getEvalExportDir(self):\n return os.path.join(self._getTempDir(), 'eval_export_dir')\n\n def testAggregateOverallSlice(self):\n\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir)\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(age=3.0, language='english', label=1.0)\n example2 = self._makeExample(age=3.0, language='chinese', label=0.0)\n example3 = self._makeExample(age=4.0, language='english', label=1.0)\n example4 = self._makeExample(age=5.0, language='chinese', label=0.0)\n\n predict_result = ([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString()\n ])\n\n metrics = (\n pipeline\n | 'CreateTestInput' >> beam.Create(\n create_test_input(predict_result, [()]))\n | 'ComputePerSliceMetrics' >> aggregate.ComputePerSliceMetrics(\n eval_shared_model=eval_shared_model, desired_batch_size=3))\n\n def check_result(got):\n self.assertEqual(1, len(got), 'got: %s' % got)\n slice_key, metrics = got[0]\n self.assertEqual(slice_key, ())\n self.assertDictElementsAlmostEqual(\n metrics, {\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.75,\n 'my_mean_age_times_label': 1.75,\n })\n\n util.assert_that(metrics, check_result)\n\n def testAggregateMultipleSlices(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir)\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(age=3.0, language='english', label=1.0)\n example2 = self._makeExample(age=3.0, language='chinese', label=0.0)\n example3 = self._makeExample(age=4.0, language='english', label=1.0)\n example4 = self._makeExample(age=5.0, language='chinese', label=0.0)\n\n predict_result_english_slice = ([\n example1.SerializeToString(),\n example3.SerializeToString()\n ])\n\n predict_result_chinese_slice = ([\n example2.SerializeToString(),\n example4.SerializeToString()\n ])\n\n test_input = (\n create_test_input(predict_result_english_slice, [(\n ('language', 'english'))]) +\n create_test_input(predict_result_chinese_slice, [(\n ('language', 'chinese'))]) +\n # Overall slice\n create_test_input(\n predict_result_english_slice + predict_result_chinese_slice,\n [()]))\n\n metrics = (\n pipeline\n | 'CreateTestInput' >> beam.Create(test_input)\n | 'ComputePerSliceMetrics' >> aggregate.ComputePerSliceMetrics(\n eval_shared_model=eval_shared_model, desired_batch_size=3))\n\n def check_result(got):\n self.assertEqual(3, len(got), 'got: %s' % got)\n slices = {}\n for slice_key, metrics in got:\n slices[slice_key] = metrics\n overall_slice = ()\n english_slice = (('language', 'english'))\n chinese_slice = (('language', 'chinese'))\n self.assertCountEqual(\n list(slices.keys()), [overall_slice, english_slice, chinese_slice])\n self.assertDictElementsAlmostEqual(\n slices[overall_slice], {\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.75,\n 'my_mean_age_times_label': 1.75,\n })\n self.assertDictElementsAlmostEqual(\n slices[english_slice], {\n 'accuracy': 1.0,\n 'label/mean': 1.0,\n 'my_mean_age': 3.5,\n 'my_mean_age_times_label': 3.5,\n })\n self.assertDictElementsAlmostEqual(\n slices[chinese_slice], {\n 'accuracy': 1.0,\n 'label/mean': 0.0,\n 'my_mean_age': 4.0,\n 'my_mean_age_times_label': 0.0,\n })\n\n util.assert_that(metrics, check_result)\n\n def testAggregateMultipleSlicesWithSampling(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir)\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(age=3.0, language='english', label=1.0)\n example2 = self._makeExample(age=3.0, language='chinese', label=0.0)\n example3 = self._makeExample(age=4.0, language='english', label=1.0)\n example4 = self._makeExample(age=5.0, language='chinese', label=0.0)\n\n predict_result_english_slice = ([\n example1.SerializeToString(),\n example3.SerializeToString()\n ])\n\n predict_result_chinese_slice = ([\n example2.SerializeToString(),\n example4.SerializeToString()\n ])\n\n test_input = (\n create_test_input(predict_result_english_slice, [(\n ('language', 'english'))]) +\n create_test_input(predict_result_chinese_slice, [(\n ('language', 'chinese'))]) +\n # Overall slice\n create_test_input(\n predict_result_english_slice + predict_result_chinese_slice,\n [()]))\n metrics = (\n pipeline\n | 'CreateTestInput' >> beam.Create(test_input)\n | 'ComputePerSliceMetrics' >>\n poisson_bootstrap.ComputeWithConfidenceIntervals(\n aggregate.ComputePerSliceMetrics,\n num_bootstrap_samples=10,\n eval_shared_model=eval_shared_model,\n desired_batch_size=3))\n\n def assert_almost_equal_to_value_with_t_distribution(\n target,\n unsampled_value,\n sample_mean,\n sample_standard_deviation,\n sample_degrees_of_freedom,\n delta=2):\n self.assertEqual(target.unsampled_value, unsampled_value)\n self.assertAlmostEqual(target.sample_mean, sample_mean, delta=delta)\n self.assertAlmostEqual(\n target.sample_standard_deviation,\n sample_standard_deviation,\n delta=delta)\n # The possion resampling could return [0, 0, ... ], which will reduce\n # the number of samples.\n self.assertLessEqual(target.sample_degrees_of_freedom,\n sample_degrees_of_freedom)\n\n def check_overall_slice(slices):\n my_dict = slices[()]\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age'], 3.75, 3.64, 0.34, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['accuracy'], 1.0, 1.0, 0, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['label/mean'], 0.5, 0.59, 0.29, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age_times_label'], 1.75, 2.15, 1.06, 19)\n\n def check_english_slice(slices):\n my_dict = slices[(('language', 'english'))]\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age'], 3.5, 3.18, 0.28, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['accuracy'], 1.0, 1.0, 0, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['label/mean'], 1.0, 1.0, 0, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age_times_label'], 3.5, 3.18, 0.28, 19)\n\n def check_chinese_slice(slices):\n my_dict = slices[(('language', 'chinese'))]\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age'], 4.0, 4.12, 0.83, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['accuracy'], 1.0, 1.0, 0, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['label/mean'], 0, 0, 0, 19)\n assert_almost_equal_to_value_with_t_distribution(\n my_dict['my_mean_age_times_label'], 0, 0, 0, 19)\n\n def check_result(got):\n self.assertEqual(3, len(got), 'got: %s' % got)\n slices = {}\n for slice_key, metrics in got:\n slices[slice_key] = metrics\n check_overall_slice(slices)\n check_english_slice(slices)\n check_chinese_slice(slices)\n\n util.assert_that(metrics, check_result)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
nomad-coe/electronic-parsers
[ "defb47be6ac22b2e48d4fb9204c85390a3c2f328" ]
[ "electronicparsers/cp2k/parser.py" ]
[ "#\n# Copyright The NOMAD Authors.\n#\n# This file is part of NOMAD.\n# See https://nomad-lab.eu for further info.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport numpy as np\nimport logging\nimport re\nimport ase\nfrom ase import io as aseio\n\nfrom .metainfo import m_env\nfrom nomad.units import ureg\nfrom nomad.parsing.file_parser import TextParser, Quantity, FileParser, DataTextParser\nfrom nomad.datamodel.metainfo.simulation.run import (\n Run, Program)\nfrom nomad.datamodel.metainfo.simulation.method import (\n Method, DFT, XCFunctional, Functional, BasisSet, BasisSetCellDependent, BasisSetAtomCentered,\n AtomParameters, Scf, Electronic\n)\nfrom nomad.datamodel.metainfo.simulation.system import (\n System, Atoms\n)\nfrom nomad.datamodel.metainfo.simulation.calculation import (\n Calculation, Energy, EnergyEntry, Stress, StressEntry, ScfIteration, Forces,\n ForcesEntry, Thermodynamics\n)\nfrom nomad.datamodel.metainfo.workflow import (\n Workflow, GeometryOptimization, MolecularDynamics\n)\n\nfrom .metainfo.cp2k_general import x_cp2k_section_quickstep_settings, x_cp2k_section_dbcsr,\\\n x_cp2k_section_startinformation, x_cp2k_section_end_information, x_cp2k_section_program_information,\\\n x_cp2k_section_global_settings, x_cp2k_section_vdw_settings, x_cp2k_section_atomic_kinds,\\\n x_cp2k_section_atomic_kind, x_cp2k_section_kind_basis_set, x_cp2k_section_total_numbers,\\\n x_cp2k_section_maximum_angular_momentum, x_cp2k_section_quickstep_calculation,\\\n x_cp2k_section_scf_iteration, x_cp2k_section_stress_tensor, x_cp2k_section_md_settings,\\\n x_cp2k_section_md_step, x_cp2k_section_restart_information, x_cp2k_section_geometry_optimization,\\\n x_cp2k_section_geometry_optimization_step\n\n\ntry:\n import mdtraj\nexcept ImportError:\n logging.getLogger(__name__).warn('Required MDTraj module not found.')\n mdtraj = False\n\n\nunits_map = {\n 'hbar': ureg.hbar, 'hartree': ureg.hartree, 'angstrom': ureg.angstrom,\n 'au_t': ureg.hbar / ureg.hartree}\n\n\ndef resolve_unit(unit_str, parts=[]):\n unit_str = unit_str.lower().replace(' ', '')\n parts = list(parts)\n\n if unit_str in units_map:\n return units_map[unit_str]\n\n try:\n return float(unit_str)\n except Exception:\n pass\n\n if unit_str == '':\n return 1\n\n open_p = unit_str.rfind('(')\n if open_p > -1:\n n_groups = unit_str.count('(')\n if n_groups != unit_str.count(')'):\n return unit_str\n for n in range(n_groups):\n part = unit_str[open_p + 1:]\n part = part[:part.find(')')]\n parts.append(resolve_unit(part, parts))\n unit_str = unit_str.replace('(%s)' % part, '[%d]' % n)\n open_p = unit_str.rfind('(')\n return resolve_unit(unit_str, parts)\n\n vals = unit_str.split('/')\n if len(vals) > 1:\n vals = [resolve_unit(v, parts) for v in vals]\n val = vals[0]\n for v in vals[1:]:\n val /= v\n return val\n\n vals = unit_str.split('**')\n if len(vals) > 1:\n vals = [resolve_unit(v, parts) for v in vals]\n val = vals[0]\n for v in reversed(vals[1:]):\n val = val ** v\n return val\n\n vals = unit_str.split('^')\n if len(vals) > 1:\n vals = [resolve_unit(v, parts) for v in vals]\n val = vals[0]\n for v in reversed(vals[1:]):\n val = val ** v\n return val\n\n vals = unit_str.split('*')\n if len(vals) > 1:\n vals = [resolve_unit(v, parts) for v in vals]\n unit = 1\n for v in vals:\n unit *= v\n return unit\n\n vals = unit_str.split('-1')\n if len(vals) == 2:\n return 1 / resolve_unit(vals[0], parts)\n\n vals = re.match(r'\\[(\\d+)\\]', unit_str)\n if vals:\n return parts[int(vals.group(1))]\n\n\nclass Property:\n def __init__(self, **kwargs):\n self._data = kwargs\n\n def __getattr__(self, key):\n return self._data.get(key, None)\n\n\nclass Trajectory(Property):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass XYZTrajParser(TextParser):\n def __init__(self):\n super().__init__()\n\n def init_quantities(self):\n\n def get_trajectory(val_in):\n val = np.transpose([v.split() for v in val_in.strip().split('\\n')])\n positions = np.array(val[1:4], dtype=float).T\n return Trajectory(labels=val[0], positions=positions)\n\n self.quantities = [\n Quantity(\n 'trajectory', r'([A-Z][a-z]?[\\w\\.\\-\\s]+?)(?:\\s+\\d\\n|\\Z)',\n str_operation=get_trajectory, repeats=True),\n Quantity(\n 'energy', r'E\\s*=\\s*(\\S+)', repeats=True, dtype=float)]\n\n\nclass TrajParser(FileParser):\n def __init__(self, **kwargs):\n super().__init__()\n self._xyz_parser = XYZTrajParser()\n self.format = None\n self.units = None\n self.type = kwargs.get('type', 'positions')\n\n @property\n def trajectory(self):\n if self._file_handler is None:\n if self.format is None:\n self.format = self.mainfile.split('.')[-1].lower()\n\n result = None\n labels = []\n # ase is better as it reads also symbols\n try:\n atoms_list = [atoms for atoms in aseio.iread(self.mainfile, format=self.format)]\n result = [atoms.positions for atoms in atoms_list]\n labels = [list(atoms.symbols) for atoms in atoms_list]\n\n # custom parser\n except Exception:\n if self.format == 'xyz':\n self._xyz_parser.mainfile = self.mainfile\n result = [traj.positions for traj in self._xyz_parser.get('trajectory', [])]\n labels = [traj.labels for traj in self._xyz_parser.get('trajectory', [])]\n\n if result is None and mdtraj:\n reader = None\n if self.format in ['xyz', 'xmol', 'atomic']:\n reader = mdtraj.formats.XYZTrajectoryFile(self.mainfile)\n elif self.format == 'dcd':\n reader = mdtraj.formats.DCDTrajectoryFile(self.mainfile)\n elif self.format == 'pdb':\n reader = mdtraj.formats.PDBTrajectoryFile(self.mainfile)\n else:\n self.logger.error('Unsupported trajectory format.')\n\n if reader is not None:\n try:\n # we do not stream to simplify archive writing\n result = reader.read()\n except Exception:\n pass\n\n result = result * self.units if self.units is not None else result\n\n result = [Trajectory(**{self.type: res}) for res in result]\n\n # add labels to trajectory\n for n, labels_i in enumerate(labels):\n result[n]._data.update({'labels': labels_i})\n\n self._file_handler = result\n\n return self._file_handler\n\n\nre_float = r'[-+]?\\d+\\.?\\d*(?:[Ee][-+]\\d+)?'\n\n\nclass ForceParser(TextParser):\n def __init__(self):\n super().__init__()\n\n def init_quantities(self):\n self._quantities = [Quantity(\n 'atom_forces',\n rf'\\d+\\s*\\d+\\s*\\w+\\s*({re_float})\\s*({re_float})\\s*({re_float})', repeats=True)]\n\n\nclass XCFunctionalProperty(Property):\n def __init__(self, name, **kwargs):\n super().__init__(name=name, **kwargs)\n\n\nclass InpValue:\n def __init__(self, name, **kwargs):\n self._data = kwargs\n self._name = name\n self._dict = None\n\n @property\n def name(self):\n return self._name\n\n def add(self, key, val):\n if key in self._data:\n self._data[key] = self._data[key] if isinstance(self._data[key], list) else [self._data[key]]\n self._data[key].append(val)\n else:\n self._data[key] = val\n\n def to_dict(self):\n if self._dict is None:\n def extract(data):\n out = dict()\n for key, val in data.items():\n if isinstance(val, InpValue):\n val = extract(val.to_dict())\n out[key] = val\n return out\n\n self._dict = extract(self._data)\n\n return self._dict\n\n def items(self):\n for key, val in self._data.items():\n yield key, val\n\n def __getattr__(self, key):\n return self._data.get(key, None)\n\n def __repr__(self):\n return self._name\n\n\nclass InpParser(FileParser):\n def __init__(self):\n super().__init__()\n self._re_open = re.compile(r'&(\\w+)\\s*(.*)[#!]*')\n self._re_close = re.compile(r'&END')\n self._re_key_value = re.compile(r'(\\w+)\\s+(.+)[#!]*')\n self._re_variable = re.compile(r'@SET (\\w+)\\s+(.+)[#!]*')\n\n @property\n def tree(self):\n if self._file_handler is None:\n def override(name, data):\n if data[0] == 'PROJECT':\n return 'PROJECT_NAME', data[1]\n elif not data[0].isupper():\n return 'DEFAULT_KEYWORD', ' '.join(data)\n return data\n\n self._variables = dict()\n line = True\n sections = [InpValue('tree')]\n while line:\n line = self.mainfile_obj.readline()\n # comments\n strip = line.strip()\n if not strip or strip[0] in ('#', '#', '!'):\n continue\n variable = self._re_variable.search(line)\n if variable:\n self._variables['${%s}' % variable.group(1)] = variable.group(2).strip()\n continue\n close_section = self._re_close.search(line)\n if close_section:\n sections.pop(-1)\n continue\n open_section = self._re_open.search(line)\n if open_section:\n section = InpValue(open_section.group(1))\n sections[-1].add(open_section.group(1), section)\n sections.append(section)\n if open_section.group(2):\n sections[-1].add('VALUE', open_section.group(2))\n continue\n key_value = self._re_key_value.search(line)\n if key_value:\n key, val = key_value.group(1), key_value.group(2)\n val = val.strip()\n if val in self._variables:\n val = self._variables[val]\n key, val = override(sections[-1].name, [key, val])\n sections[-1].add(key, val)\n continue\n self._file_handler = sections[0]\n return self._file_handler\n\n def parse(self, key):\n if self._results is None:\n self._results = dict()\n\n source = self.tree.to_dict()\n for sub_key in key.strip('/').split('/'):\n val = source.get(sub_key, None)\n source = val\n if val is None:\n break\n self._results[key] = val\n\n\nclass CP2KOutParser(TextParser):\n def __init__(self):\n super().__init__()\n\n def init_quantities(self):\n def str_to_header(val_in):\n val = val_in.split(' ', 1)\n return [val[0].strip().replace(' ', '_').lower(), val[-1].strip()]\n\n def str_to_program(val_in):\n val = val_in.split(' ', 2)\n return ['_'.join(val[:2]).lower(), ''.join([v.strip() for v in val[2].split('\\n')])]\n\n def str_to_atomic_coordinates(val_in):\n val = [v.split() for v in val_in.split('\\n')]\n lengthunit = val[0][0].lower()\n val = np.transpose(np.array([v for v in val if len(v) == 9]))\n labels = val[2]\n positions = np.transpose(np.array(val[4:7], dtype=float)) * resolve_unit(lengthunit)\n atomic_numbers = {element: int(val[3][n]) for n, element in enumerate(val[2])}\n return Trajectory(labels=labels, positions=positions, atomic_numbers=atomic_numbers)\n\n def str_to_stress_eigenvalues(val_in):\n val = [v.split() for v in val_in.strip().split('\\n')]\n val = np.array([v for v in val if v], dtype=float)\n return val[0] * ureg.GPa, val[1:]\n\n def str_to_iteration(val_in):\n val = val_in.strip().split()\n return {\n 'energy_total': float(val[-2]) * ureg.hartree,\n 'energy_change': float(val[-1]) * ureg.hartree}\n\n def str_to_information(val_in):\n val = [v.split('=') for v in val_in.strip().split('\\n')]\n return {v[0].strip().lower().replace(' ', '_').replace('.', ''): v[1] for v in val if len(v) == 2}\n\n n_orbital_basis_quantities = [Quantity(\n 'basis_set_number_of_%s' % key.lower().replace(' ', '_'),\n r'Number of %s:\\s+(\\d+)' % key, dtype=int) for key in [\n 'orbital shell sets', 'orbital shells', 'primitive Cartesian functions',\n 'Cartesian basis functions', 'spherical basis functions']]\n\n energy_quantities = [Quantity(\n '%s' % key.lower().replace(' ', '_').replace('-', '_'),\n rf'%s:\\s*({re_float})' % key, dtype=float, unit='hartree', repeats=True) for key in [\n 'Hartree energy', 'Exchange-correlation energy', 'Electronic kinetic energy',\n 'Total energy']]\n # what is the difference between Total energy and ENERGY| Total\n\n scf_wavefunction_optimization_quantities = [\n Quantity(\n 'iteration',\n rf'(\\d+\\s+\\S+\\s*\\S*\\s+{re_float}\\s+{re_float}\\s+{re_float}\\s+{re_float}\\s+{re_float})\\n',\n repeats=True, convert=False, str_operation=str_to_iteration),\n # TODO add minimizer info\n Quantity(\n 'converged',\n r'SCF run converged in\\s*(\\d+) steps', dtype=int),\n # find example with cueb file\n Quantity(\n 'cube_file',\n r' The electron density is written in cube file format to the file:\\s*(.+?\\.cube)'),\n # TODO add mulliken, hirschfield\n Quantity(\n 'energy_total',\n rf'ENERGY\\| Total FORCE_EVAL \\( \\w+ \\) energy \\(a\\.u\\.\\):\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'atom_forces',\n rf'ATOMIC FORCES in \\[a\\.u\\.\\]\\s*.+([\\s\\S]+?)SUM', convert=False,\n str_operation=lambda x: np.array(np.transpose(\n [v.split() for v in x.strip().split('\\n')])[3:6], dtype=float).T),\n # TODO test stress cannot find example\n Quantity(\n 'stress_tensor',\n r' (?:NUMERICAL )?STRESS TENSOR \\[GPa\\]\\s+X\\s+Y\\s+Z\\s+([\\d\\.\\-\\s]+)',\n str_operation=lambda x:np.array([v.split() for v in x.strip().split('\\n')], dtype=float),\n convert=False, unit='GPa'),\n Quantity(\n 'stress_tensor_one_third_of_trace',\n rf' 1/3 Trace\\(stress tensor\\)\\s*:\\s*({re_float})', dtype=float, unit='GPa'),\n Quantity(\n 'stress_tensor_determinant',\n rf'Det\\(stress tensor\\)\\s*:\\s*({re_float})', dtype=float, unit='GPa**3'),\n Quantity(\n 'stress_eigenvalues_eigenvectors',\n r' EIGENVECTORS AND EIGENVALUES OF THE STRESS TENSOR\\s*([\\d\\.\\-\\s]+)',\n str_operation=str_to_stress_eigenvalues, convert=False)] + energy_quantities\n\n single_point_quantities = [\n Quantity(\n 'self_consistent',\n r'SCF WAVEFUNCTION OPTIMIZATION([\\s\\S]+?)\\Z', repeats=False,\n sub_parser=TextParser(quantities=scf_wavefunction_optimization_quantities)),\n # TODO add rpa, etc.\n ]\n\n geometry_optimization_quantities = [\n Quantity(\n 'method',\n r'\\*{3}\\s*((?:CONJUGATE GRADIENTS|L-BFGS|BFGS))\\s*\\*{3}', flatten=False),\n Quantity(\n 'self_consistent',\n r'SCF WAVEFUNCTION OPTIMIZATION([\\s\\S]+?)OPTIMIZ', repeats=False,\n sub_parser=TextParser(quantities=scf_wavefunction_optimization_quantities)),\n Quantity(\n 'optimization_step',\n r'(ATION STEP:\\s*\\d+[\\s\\S]+?)(?:\\-\\s+OPTIMIZ|\\Z)', repeats=True,\n sub_parser=TextParser(quantities=[\n # TODO parse atomic positions\n Quantity('step', r'ATION STEP:\\s*(\\d+)'),\n # I do not quite get why there can be multiple scfs in a step\n Quantity(\n 'information',\n r'Informations at step\\s*=\\s*\\d+\\s*\\-+([\\s\\S]+?)\\-{5}',\n str_operation=str_to_information),\n Quantity(\n 'self_consistent',\n r'FUNCTION OPTIMIZATION([\\s\\S]+?)(?: SCF WAVE|\\Z)', repeats=True,\n sub_parser=TextParser(quantities=scf_wavefunction_optimization_quantities))]))\n ]\n\n molecular_dynamics_quantities = [\n Quantity(\n 'initial',\n r' INITIAL\\| (.+? {2})=\\s+(.+)', str_operation=str_to_header, repeats=True),\n Quantity(\n 'self_consistent',\n r'SCF WAVEFUNCTION OPTIMIZATION([\\s\\S]+?)(?:\\*\\n *ENSEM|\\Z)', repeats=False,\n sub_parser=TextParser(quantities=scf_wavefunction_optimization_quantities)),\n Quantity(\n 'md_step',\n r'(BLE TYPE[\\s\\S]+?)(?:\\*\\n *ENSEM|\\Z)',\n repeats=True, sub_parser=TextParser(quantities=[\n Quantity(\n 'ensemble_type', r'BLE TYPE\\s*=\\s*(.+)'),\n Quantity(\n 'step_number', r'STEP NUMBER\\s*=\\s*(\\d+)', dtype=int),\n Quantity(\n 'time', rf'TIME \\[fs\\]\\s*=\\s*({re_float})', dtype=float),\n Quantity(\n 'conserved_quantity',\n rf'CONSERVED QUANTITY \\[hartree\\]\\s*=\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'cpu_time',\n rf'CPU TIME \\[s\\]\\s*=\\s*({re_float})\\s*(re_float)',\n dtype=float),\n Quantity(\n 'energy_drift',\n rf'ENERGY DRIFT PER ATOM \\[K\\]\\s*=\\s*({re_float})\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'potential_energy',\n rf'POTENTIAL ENERGY\\[hartree\\]\\s*=\\s*({re_float})\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'kinetic_energy',\n rf'KINETIC ENERGY\\[hartree\\]\\s*=\\s*({re_float})\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'temperature',\n rf'TEMPERATURE \\[K\\]\\s*=\\s*({re_float})\\s*({re_float})', dtype=float),\n Quantity(\n 'pressure',\n rf'PRESSURE \\[bar\\]\\s*=\\s*({re_float})\\s*({re_float})',\n dtype=float, unit='bar'),\n Quantity(\n 'barostat_temperature',\n rf'BAROSTAT TEMP\\[K\\]\\s*=\\s*({re_float})\\s*({re_float})', dtype=float),\n Quantity(\n 'volume',\n rf'VOLUME\\[bohr\\^3\\]\\s*=\\s*({re_float})\\s*({re_float})',\n dtype=float, unit='bohr**3'),\n Quantity(\n 'cell_length_instantaneous',\n rf'CELL LNTHS\\[bohr\\]\\s*=\\s*({re_float})\\s*({re_float})\\s*({re_float})',\n dtype=float),\n Quantity(\n 'cell_length_average',\n rf'AVE\\. CELL LNTHS\\[bohr\\]\\s*=\\s*({re_float})\\s*({re_float})\\s*({re_float})',\n dtype=float),\n Quantity(\n 'cell_angle_instantaneous',\n rf'CELL ANGLS\\[deg\\]\\s*=\\s*({re_float})\\s*({re_float})\\s*({re_float})',\n dtype=float),\n Quantity(\n 'cell_angle_average',\n rf'AVE\\. CELL ANGLS\\[deg\\]\\s*=\\s*({re_float})\\s*({re_float})\\s*({re_float})',\n dtype=float),\n Quantity(\n 'self_consistent',\n r'FUNCTION OPTIMIZATION([\\s\\S]+?)(?: SCF WAVE|\\Z)', repeats=True,\n sub_parser=TextParser(quantities=scf_wavefunction_optimization_quantities))])\n )\n ]\n\n quickstep_quantities = [\n Quantity(\n 'dft',\n r' DFT\\| (.+? {2}) +(.+)', str_operation=str_to_header, repeats=True),\n Quantity('dft_u', r'(DFT\\+U\\|)'),\n Quantity('mp2', r'(MP2\\|)'),\n Quantity('rpa', r'(RI-RPA\\|)'),\n Quantity(\n 'functional', r' FUNCTIONAL\\| (\\S+):', repeats=True),\n # TODO find example and verify\n Quantity(\n 'vdw',\n r' vdW POTENTIAL\\| +(.+? {2}) +(.+)', str_operation=str_to_header, repeats=True),\n Quantity(\n 'qs',\n r' QS\\| ((?:Method|Density cutoff)).*?:( {2}) +(.+)',\n str_operation=str_to_header, repeats=True),\n Quantity(\n 'atomic_kind_information',\n r' ATOMIC KIND INFORMATION([\\s\\S]+?)\\n\\n\\n',\n sub_parser=TextParser(quantities=[Quantity(\n 'atom',\n r'(ic kind: [A-Z][a-z]?[\\s\\S]+?)(?:\\d+\\. Atom|\\Z)', repeats=True,\n sub_parser=TextParser(quantities=[\n Quantity('kind_label', r'ic kind:\\s*(\\w+)'),\n Quantity('kind_number_of_atoms', r'Number of atoms:\\s*(\\d+)', dtype=int),\n Quantity('kind_basis_set_name', r'Orbital Basis Set\\s*(.+)'),\n Quantity('basis_set_norm_type', r'Norm type:\\s*(\\d+)', dtype=int)\n ] + n_orbital_basis_quantities))])),\n Quantity(\n 'total_maximum_numbers',\n r' TOTAL NUMBERS AND MAXIMUM NUMBERS([\\s\\S]+?)\\n\\n\\n',\n sub_parser=TextParser(quantities=[Quantity(\n '%s' % key.lower().replace('the ', '').replace(' ', '_').replace('-', '_'),\n r'\\- %s:\\s*(\\d+)' % key, dtype=int) for key in [\n 'Atomic kinds', 'Atoms', 'Shell sets', 'Shells', 'Primitive Cartesian functions',\n 'Cartesian basis functions', 'Spherical basis functions',\n 'Orbital basis functions', 'Local part of the GTH pseudopotential',\n 'Non-local part of the GTH pseudopotential']])),\n Quantity(\n 'atomic_coordinates',\n r' ATOMIC COORDINATES IN (angstrom[\\s\\S]+?)\\n\\n\\n',\n convert=False, str_operation=str_to_atomic_coordinates),\n Quantity(\n 'scf_parameters',\n r' SCF PARAMETERS([\\s\\S]+?)\\*{79}',\n sub_parser=TextParser(quantities=[\n Quantity('n_max_iteration', r'max_scf:\\s*(\\d+)', dtype=int),\n Quantity(\n 'threshold_energy_change', rf'eps_scf:\\s*({re_float})',\n dtype=float, unit='hartree'),\n Quantity(\n 'md',\n r' MD\\| (.+? {2}) +(.+)', str_operation=str_to_header, repeats=True\n )])),\n # TODO add mp2, rpa, gw\n Quantity(\n 'single_point',\n r'( Iteration\\s*Convergence\\s*Energy[\\s\\S]+?(?:\\-{50}\\n\\s*\\-|MD_ENERGIES))',\n sub_parser=TextParser(quantities=single_point_quantities)),\n Quantity(\n 'geometry_optimization',\n r'STARTING GEOMETRY OPTIMIZATION([\\s\\S]+?(?:GEOMETRY OPTIMIZATION COMPLETED|\\Z))',\n sub_parser=TextParser(quantities=geometry_optimization_quantities)),\n Quantity(\n 'molecular_dynamics',\n r'(MD_ENERGIES\\| Initialization proceeding[\\s\\S]+?\\-{50}\\n\\s*\\-)',\n sub_parser=TextParser(quantities=molecular_dynamics_quantities))\n ]\n\n self._quantities = [\n Quantity(\n 'dbcsr',\n r' (DBCSR)\\| (.+? {2}) +(.+)', str_operation=str_to_header, repeats=True),\n Quantity(\n 'program',\n r'\\*\\*\\s*PROGRAM ([\\s\\S]+?)(?:\\*\\*|\\n\\n|\\Z)',\n str_operation=str_to_program, repeats=True),\n Quantity(\n 'cp2k',\n r' CP2K\\| (.+? {2}) +(.+)', str_operation=str_to_header, repeats=True),\n Quantity(\n 'global',\n r' GLOBAL\\| (.+? {2}) +(.+)', str_operation=str_to_header, repeats=True),\n Quantity(\n 'restart',\n r'RESTART INFORMATION\\s*\\*+\\s*\\*+([\\s\\S]+?)\\*{79}',\n sub_parser=TextParser(quantities=[\n Quantity('filename', r'RESTART FILE NAME: (\\S+)'),\n Quantity(\n 'quantities',\n r'RESTARTED QUANTITIES:\\s*\\*\\s*([\\s\\S]+?)\\Z',\n str_operation=lambda x: [v.split('*')[1].strip() for v in x.strip().split('\\n')])])),\n Quantity(\n 'lattice_vectors',\n rf' CELL\\| Vector [abc] \\[angstrom\\]:\\s*({re_float})\\s*({re_float})\\s*({re_float})',\n repeats=True),\n # TODO add restart find example\n Quantity(\n 'quickstep',\n r'\\.\\.\\. make the atoms dance([\\s\\S]+?(?:\\-{79}\\s*\\-|\\Z))',\n sub_parser=TextParser(quantities=quickstep_quantities)),\n Quantity(\n 'qs_dftb',\n r' ##### ##### # ###### ####### ####### ######\\s*'\n r' # # # # # # # # # # #\\s*'\n r' # # # # # # # # # #\\s*'\n r' # # ##### # # # ##### # ######\\s*'\n r' # # # # # # # # # # #\\s*'\n r' # # # # # # # # # # #\\s*'\n r' #### # ##### # ###### # # ######\\s*'\n r'([\\s\\S]+?(?:\\-{79}\\s*\\-|\\Z))',\n sub_parser=TextParser(quantities=quickstep_quantities))\n # TODO add other calculation types\n ]\n\n\nclass CP2KParser:\n def __init__(self):\n self.out_parser = CP2KOutParser()\n self.inp_parser = InpParser()\n # use a custom xyz parser as the output of cp2k is sometimes not up to standard\n self.traj_parser = TrajParser(type='positions')\n self.velocities_parser = TrajParser(type='velocities')\n self.cell_parser = DataTextParser()\n self.energy_parser = DataTextParser()\n self.force_parser = ForceParser()\n self._method = None\n self._calculation_type = None\n\n # TODO add vdw parameter\n self._metainfo_name_map = {\n 'started_at': 'start_time', 'started_on': 'start_host',\n 'started_by': 'start_user', 'process_id': 'id', 'started_in': 'start_path',\n 'ended_at': 'end_time', 'ran_on': 'end_host', 'ran_by': 'end_user',\n 'stopped_in': 'end_path', 'version_string:': 'program_version',\n 'source_code_revision_number:': 'svn_revision',\n 'program_compiled_at': 'program_compilation_datetime',\n 'program_compiled_on': 'program_compilation_host',\n 'input_file_name': 'input_filename',\n 'basis_set_file_name': 'basis_set_filename',\n 'geminal_file_name': 'geminal_filename',\n 'potential_file_name': 'potential_filename',\n 'mm_potential_file_name': 'mm_potential_filename',\n 'coordinate_file_name': 'coordinate_filename',\n 'preferred_diagonalization_lib.': 'preferred_diagonalization_library',\n 'spin_restricted_kohn-sham_(rks)_calculation': 'spin_restriction',\n 'multiplicity': 'spin_target_multiplicity',\n 'number_of_spin_states': 'number_of_spin_channels', 'charge': 'total_charge',\n 'self-interaction_correction_(sic)': 'self_interaction_correction_method',\n 'method': 'quickstep_method', 'density_cutoff': 'planewave_cutoff',\n 'temperature': 'target_temperature', 'temperature_tolerance': 'target_temperature_tolerance',\n 'pressure': 'target_pressure', 'print_md_information_every': 'print_frequency',\n 'potential_form:': 'vdw_name', 'bj_damping:': 'bj_damping_name',\n 'cutoff_radius_[bohr]:': 'cutoff_radius', 'scaling_factor:': 'scaling_factor',\n 'exp_prefactor_for_damping:': 'damping_factor', 's6_scaling_factor:': 's6_scaling_factor',\n 'sr6_scaling_factor:': 'sr6_scaling_factor', 's8_scaling_factor:': 's6_scaling_factor',\n 'cutoff_for_cn_calculation:': 'cn_cutoff', 'optimization_method': 'method',\n 'total_energy': 'energy', 'real_energy_change': 'energy_change',\n 'decrease_in_energy': 'energy_decrease', 'conv_limit_for_step_size': 'step_size_convergence_limit',\n 'convergence_in_step_size': 'step_size_convergence',\n 'convergence_in_rms_step_size': 'rms_step_size_convergence',\n 'conv_limit_for_gradients': 'gradient_convergence_limit',\n 'conv_for_gradients': 'max_gradient_convergence',\n 'conv_in_rms_gradients': 'rms_gradient_convergence',\n 'exchange_correlation_energy': 'energy_XC',\n 'electronic_kinetic_energy': 'energy_kinetic_electronic'}\n\n self._self_interaction_map = {\n 'NO': None, 'D SIC': 'SIC_AD', 'Explicit Orbital SIC': 'SIC_EXPLICIT_ORBITALS',\n 'SPZ/MAURI SIC': 'SIC_MAURI_SPZ', 'US/MAURI SIC': 'SIC_MAURI_US'}\n self._optimization_method_map = {\n 'CONJUGATE GRADIENTS': 'conjugate gradient', 'BFGS': 'bfgs', 'L-BFGS': 'bfgs'}\n self._file_extension_map = {\n 'XYZ': 'xyz', 'XMOL': 'xyz', 'ATOMIC': 'xyz', 'PDB': 'pdb', 'DCD': 'dcd'}\n self._xc_functional_map = {\n 'BLYP': [XCFunctionalProperty('GGA_X_B88'), XCFunctionalProperty('GGA_C_LYP')],\n 'LDA': [XCFunctionalProperty('LDA_XC_TETER93')],\n 'PADE': [XCFunctionalProperty('LDA_XC_TETER93')],\n 'PBE': [XCFunctionalProperty('GGA_X_PBE'), XCFunctionalProperty('GGA_C_PBE')],\n 'OLYP': [XCFunctionalProperty('GGA_X_OPTX'), XCFunctionalProperty('GGA_C_LYP')],\n 'HCTH120': [XCFunctionalProperty('GGA_XC_HCTH_120')],\n 'PBE0': [XCFunctionalProperty('HYB_GGA_XC_PBEH')],\n 'B3LYP': [XCFunctionalProperty('HYB_GGA_XC_B3LYP')],\n 'TPSS': [XCFunctionalProperty('MGGA_X_TPSS'), XCFunctionalProperty('MGGA_C_TPSS')]}\n self._ensemble_map = {'NVE': 'NVE', 'NVT': 'NVT', 'NPT_F': 'NPT', 'NPT_I': 'NPT'}\n self._vdw_map = {\n \"S. Grimme, JCC 27: 1787 (2006)\": \"G06\",\n \"S. Grimme et al, JCP 132: 154104 (2010)\": \"G10\"}\n\n self._settings = None\n\n def init_parser(self):\n self.out_parser.mainfile = self.filepath\n self.inp_parser.mainfile = None\n self.traj_parser.mainfile = None\n self.velocities_parser.mainfile = None\n self.energy_parser.mainfile = None\n self.force_parser.mainfile = None\n self.out_parser.logger = self.logger\n self.inp_parser.logger = self.logger\n self.traj_parser.logger = self.logger\n self.velocities_parser.logger = self.logger\n self.energy_parser.logger = self.logger\n self.force_parser.logger = self.logger\n self._settings = None\n self._method = None\n self._calculation_type = None\n\n @property\n def settings(self):\n if self._settings is None:\n def to_dict(data, repeats=True):\n data_dict = dict()\n for key, val in data:\n name = self._metainfo_name_map.get(key, key)\n if not repeats and name in data_dict:\n continue\n data_dict.setdefault(name, [])\n data_dict[name].append(val)\n for key, val in data_dict.items():\n data_dict[key] = val[0] if len(val) == 1 else val\n return data_dict\n\n self._settings = dict()\n self._settings['dft'] = to_dict(\n self.out_parser.get(self._calculation_type, {}).get('dft', []))\n self._settings['qs'] = to_dict(\n self.out_parser.get(self._calculation_type, {}).get('qs', []))\n self._settings['vdw'] = to_dict(\n self.out_parser.get(self._calculation_type, {}).get('vdw', []))\n self._settings['dbcsr'] = to_dict(self.out_parser.get('dbcsr', []), False)\n self._settings['program'] = to_dict(self.out_parser.get('program', []))\n self._settings['cp2k'] = to_dict(self.out_parser.get('cp2k', []), False)\n self._settings['global'] = to_dict(self.out_parser.get('global', []), False)\n self._settings['md'] = to_dict(\n self.out_parser.get(self._calculation_type, {}).get('scf_parameters', {}).get('md', []))\n\n return self._settings\n\n def _normalize_filename(self, filename):\n if filename.startswith('='):\n filename = filename[1:]\n elif re.match(r'./', filename):\n filename = filename\n else:\n project_name = self.inp_parser.get('GLOBAL/PROJECT_NAME')\n if filename:\n filename = '%s-%s' % (project_name, filename)\n else:\n filename = project_name\n return filename\n\n def get_atomic_number(self, element):\n atomic_numbers = self.out_parser.get(\n self._calculation_type, {}).get('atomic_coordinates').atomic_numbers\n return atomic_numbers.get(element, 0)\n\n def get_ensemble_type(self, frame):\n if self.sampling_method != 'molecular_dynamics':\n return\n\n if frame == 0:\n return self.settings['md'].get('ensemble_type', '')\n else:\n calculation = self.out_parser.get(self._calculation_type, '')\n if not calculation:\n return calculation\n return calculation.molecular_dynamics.md_step[frame - 1].get('ensemble_type', '')\n\n def get_velocities(self, frame):\n if self.out_parser.get(self._calculation_type, {}).get('molecular_dynamics') is not None:\n return\n\n if self.velocities_parser.mainfile is None:\n frequency, filename = self.settings['md'].get('velocities', '0 none').split()\n frequency = int(frequency)\n if frequency == 0:\n filename = '%s-vel-1.xyz' % self.inp_parser.get('GLOBAL/PROJECT_NAME')\n frequency = 1\n\n self.velocities_parser.mainfile = os.path.join(self.maindir, filename)\n self.velocities_parser.units = resolve_unit(\n self.inp_parser.get('MOTION/PRINT/VELOCITIES/UNIT', 'bohr*au_t^-1'))\n self.velocities_parser._frequency = frequency\n\n if self.get_ensemble_type(frame).lower() == 'REFTRAJ':\n frame -= 1\n\n if frame % self.velocities_parser._frequency != 0 or frame < 0:\n return\n\n try:\n return self.velocities_parser.trajectory[frame // self.velocities_parser._frequency]\n except Exception:\n self.logger.error('Error reading velocities.')\n\n def get_trajectory(self, frame):\n trajectory = None\n\n if frame == 0:\n coord = self.inp_parser.get('FORCE_EVAL/SUBSYS/COORD/DEFAULT_KEYWORD')\n units = resolve_unit(self.inp_parser.get('FORCE_EVAL/SUBSYS/COORD/UNIT', 'angstrom'))\n if coord is None:\n coord_filename = self.inp_parser.get('FORCE_EVAL/SUBSYS/TOPOLOGY/COORD_FILE_NAME', '')\n self.traj_parser.mainfile = os.path.join(self.maindir, coord_filename.strip())\n self.traj_parser.units = units\n if self.traj_parser.trajectory:\n result = self.traj_parser.trajectory[0]\n # reset for output trajectory\n self.traj_parser.mainfile = None\n trajectory = result\n\n else:\n coord = np.transpose([c.split() for c in coord])\n positions = np.array(coord[1:4], dtype=float).T * units\n scaled = 'T' in self.inp_parser.get('FORCE_EVAL/SUBSYS/COORD/SCALED', 'False')\n if scaled:\n trajectory = Trajectory(labels=coord[0], scaled_positions=positions)\n else:\n trajectory = Trajectory(labels=coord[0], positions=positions)\n\n if trajectory is not None:\n return trajectory\n\n if self.traj_parser.mainfile is None:\n # try to get it from md\n frequency, filename = self.settings['md'].get('coordinates', '0 none').split()\n frequency = int(frequency)\n if frequency == 0:\n filename = self.inp_parser.get('MOTION/PRINT/TRAJECTORY/FILENAME', '').strip()\n filename = self._normalize_filename(filename)\n traj_format = self.inp_parser.get('MOTION/PRINT/TRAJECTORY/FORMAT', 'XYZ').strip()\n traj_format = self._file_extension_map.get(traj_format, 'xyz')\n filename = '%s-pos-1.%s' % (filename, traj_format)\n frequency = 1\n\n self.traj_parser.mainfile = os.path.join(self.maindir, filename)\n self.traj_parser.units = resolve_unit(\n self.inp_parser.get('MOTION/PRINT/TRAJECTORY/UNIT', 'angstrom'))\n self.traj_parser._frequency = frequency\n\n if self.get_ensemble_type(frame) == 'REFTRAJ':\n frame -= 1\n\n if frame % self.traj_parser._frequency != 0 or frame < 0:\n return\n\n try:\n return self.traj_parser.trajectory[frame // self.traj_parser._frequency]\n except Exception:\n self.logger.error('Error reading trajectory.')\n\n def get_lattice_vectors(self, frame):\n lattice_vectors = None\n\n if frame == 0:\n lattice_vectors = self.out_parser.get('lattice_vectors')\n if lattice_vectors is None:\n # get it from input\n cell = self.inp_parser.get('FORCE_EVAL/SUBSYS/CELL')\n # is this the unit for cell? how about for angles\n units = resolve_unit(\n self.inp_parser.get('FORCE_EVAL/SUBSYS/COORD/UNIT', 'angstrom'))\n if cell is None:\n return\n\n if 'A' in cell and 'B' in cell and 'C' in cell:\n lattice_vectors = np.array([\n cell.get(c).split() for c in ('A', 'B', 'C')], dtype=float) * units\n elif 'ABC' in cell:\n abc = (np.array(\n cell.get('ABC').split(), dtype=float) * units).to('angstrom').magnitude\n angles = np.array(cell.get('ALPHA_BETA_GAMMA', '90. 90. 90.').split(), dtype=float)\n lattice_vectors = ase.geometry.cellpar_to_cell(np.hstack((abc, angles))) * ureg.angstrom\n\n else:\n units = resolve_unit(\n self.inp_parser.get('FORCE_EVAL/SUBSYS/COORD/UNIT', 'angstrom'))\n lattice_vectors = np.array(lattice_vectors[:3], dtype=np.float64) * units\n\n if lattice_vectors is not None:\n return lattice_vectors\n\n if self.cell_parser.mainfile is None:\n frequency, filename = self.settings['md'].get('simulation_cell', '0 none').split()\n frequency = int(frequency)\n if frequency == 0:\n # TODO test this I cannot find a sample output cell filee\n filename = self.inp_parser.get('MOTION/PRINT/CELL/FILENAME', '').strip()\n frequency = 1\n\n if filename:\n self.cell_parser.mainfile = os.path.join(self.maindir, filename)\n self.cell_parser.units = resolve_unit(\n self.inp_parser.get('MOTION/PRINT/TRAJECTORY/UNIT', 'angstrom'))\n self.cell_parser._frequency = frequency\n else:\n if self.sampling_method == 'molecular_dynamics':\n # check that this is not an NPT\n ensemble_type = self.get_ensemble_type(frame)\n if ensemble_type[:3] == 'NPT':\n return\n return self.get_lattice_vectors(0)\n\n if self.get_ensemble_type(frame) == 'REFTRAJ':\n frame -= 1\n\n if frame % self.cell_parser._frequency != 0 or frame < 0:\n return\n\n try:\n return self.cell_parser.data[frame // self.cell_parser._frequency] * resolve_unit(self.cell_parser.units)\n except Exception:\n self.logger.error('Error reading lattice vectors.')\n\n def get_md_output(self, frame):\n if self.energy_parser.mainfile is None:\n frequency, filename = self.settings['md'].get('energies', '0, none').split()\n frequency = int(frequency)\n if frequency == 0:\n return dict()\n self.energy_parser.mainfile = os.path.join(self.maindir, filename)\n self.energy_parser._frequency = frequency\n\n if self.get_ensemble_type(frame) == 'REFTRAJ':\n frame -= 1\n\n if frame % self.energy_parser._frequency != 0 or frame < 0:\n return dict()\n\n try:\n data = self.energy_parser.data[frame // self.energy_parser._frequency]\n return dict(\n time=data[1],\n kinetic_energy_instantaneous=data[2] * ureg.hartree,\n temperature_instantaneous=data[3],\n potential_energy_instantaneous=data[4] * ureg.hartree,\n conserved_quantity=data[5] * ureg.hartree,\n cpu_time_instantaneous=data[6])\n\n except Exception:\n self.logger.error('Error reading MD energies.')\n\n def get_forces(self, frame):\n filename = self.inp_parser.get('FORCE_EVAL/PRINT/FORCES/FILENAME', '').strip()\n filename = self._normalize_filename(filename)\n filename = '%s-1_%d.xyz' % (filename, frame)\n self.force_parser.mainfile = os.path.join(self.maindir, filename)\n return self.force_parser.get('atom_forces')\n\n def get_xc_functionals(self):\n functionals = self.inp_parser.get('FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/VALUE')\n if functionals is None or functionals == 'NO_SHORTCUT':\n functional_values = self.inp_parser.get('FORCE_EVAL/DFT/XC/XC_FUNCTIONAL', {})\n functionals = []\n for name, attrib in functional_values.items():\n name = name.upper()\n if name == 'VALUE':\n continue\n # get xc_func from mapping then apply read attributes\n # if func is not in mapping, create it\n values = self._xc_functional_map.get(name, [XCFunctionalProperty(name)])\n for n, value in enumerate(values):\n weight = attrib.get('SCALE_X', None) if n == 0 else attrib.get('SCALE_C', None)\n value._data.update({'weight': weight})\n functionals.extend(values)\n else:\n names = [functionals] if not isinstance(functionals, list) else functionals\n functionals = []\n for name in names:\n name = name.upper()\n if name not in self._xc_functional_map:\n self.logger.error('Cannot resolve xc functional')\n continue\n functionals.extend(self._xc_functional_map.get(name))\n\n return functionals\n\n def parse_scc(self, source):\n if source is None:\n return\n\n sec_scc = self.archive.run[-1].m_create(Calculation)\n\n sec_energy = sec_scc.m_create(Energy)\n if source.get('energy_total') is not None:\n sec_energy.total = EnergyEntry(value=source.get('energy_total'))\n if source.get('electronic_kinetic_energy') is not None:\n sec_energy.kinetic_electronic = EnergyEntry(value=source.get('electronic_kinetic_energy')[-1])\n if source.get('exchange_correlation_energy') is not None:\n sec_energy.xc = EnergyEntry(value=source.get('exchange_correlation_energy')[-1])\n\n if source.get('stress_tensor') is not None:\n sec_stress = sec_scc.m_create(Stress)\n sec_stress.total = StressEntry(value=source.get('stress_tensor'))\n\n # self consistency\n for iteration in source.get('iteration', []):\n sec_scf = sec_scc.m_create(ScfIteration)\n sec_scf_energy = sec_scf.m_create(Energy)\n for key, val in iteration.items():\n if val is not None:\n if key == 'energy_change':\n sec_scf_energy.change = val\n elif key.startswith('energy_'):\n sec_scf_energy.m_add_sub_section(getattr(\n Energy, key.replace('energy_', '')), EnergyEntry(value=val))\n else:\n setattr(sec_scf, key, val)\n\n atom_forces = source.get('atom_forces', self.get_forces(source._frame))\n if atom_forces is not None:\n atom_forces = np.array(atom_forces) * ureg.hartree / ureg.bohr\n sec_forces = sec_scc.m_create(Forces)\n sec_forces.total = ForcesEntry(value=atom_forces)\n\n # TODO add dos\n return sec_scc\n\n def parse_system(self, trajectory):\n\n trajectory = 0 if trajectory is None else trajectory\n\n if isinstance(trajectory, int):\n frame = trajectory\n trajectory = self.get_trajectory(frame)\n if trajectory is not None:\n trajectory._frame = frame\n\n if trajectory is None:\n return\n\n sec_system = self.archive.run[-1].m_create(System)\n sec_atoms = sec_system.m_create(Atoms)\n\n lattice_vectors = self.get_lattice_vectors(trajectory._frame)\n\n if trajectory.positions is not None:\n sec_atoms.positions = trajectory.positions\n elif trajectory.scaled_positions is not None and lattice_vectors is not None:\n sec_atoms.positions = np.dot(trajectory.scaled_positions, lattice_vectors)\n\n labels = trajectory.labels if trajectory.labels is not None else self.out_parser.get(\n self._calculation_type).get('atomic_coordinates')\n if labels is not None:\n sec_atoms.labels = labels\n\n if lattice_vectors is not None:\n sec_atoms.lattice_vectors = lattice_vectors\n periodic = self.inp_parser.get('FORCE_EVAL/SUBSYS/CELL/PERIODIC', 'xyz').lower()\n sec_atoms.periodic = [v in periodic for v in ('x', 'y', 'z')]\n\n # TODO test this I cannot find an example\n # velocities\n if self.sampling_method == 'molecular_dynamics':\n velocities = self.get_velocities(trajectory._frame)\n if velocities is not None:\n sec_atoms.velocities = velocities\n\n return sec_system\n\n def parse_configurations_quickstep(self):\n sec_run = self.archive.run[-1]\n quickstep = self.out_parser.get(self._calculation_type)\n\n # quickstep extension to scc quantities\n def parse_quickstep_calculation(source):\n if source is None:\n return\n sec_quickstep_calc = sec_run.m_create(x_cp2k_section_quickstep_calculation)\n for key in ['energy_total', 'atom_forces']:\n val = source.get(key)\n if val is not None:\n setattr(sec_quickstep_calc, 'x_cp2k_%s' % key, val)\n sec_quickstep_calc.x_cp2k_quickstep_converged = source.get('converged') is not None\n\n if source.get('electronic_kinetic_energy') is not None:\n sec_quickstep_calc.x_cp2k_electronic_kinetic_energy = source.get('electronic_kinetic_energy')[-1]\n\n for iteration in source.get('iteration', []):\n sec_scf = sec_quickstep_calc.m_create(x_cp2k_section_scf_iteration)\n for key, val in iteration.items():\n if val is not None:\n setattr(sec_scf, 'x_cp2k_%s' % key, val)\n\n if source.stress_tensor is not None:\n sec_stress = sec_quickstep_calc.m_create(x_cp2k_section_stress_tensor)\n if source.stress_tensor_one_third_of_trace is not None:\n val = source.stress_tensor_one_third_of_trace\n sec_stress.stress_tensor_one_third_of_trace = val.to('pascal').magnitude\n if source.stress_tensor_determinant is not None:\n sec_stress.stress_tensor_determinant = val.to('pascal**3').magnitude\n if source.stress_eigenvalues_eigenvectors is not None:\n val = source.stress_eigenvalues_eigenvectors\n sec_stress.x_cp2k_stress_tensor_eigenvalues = val[0].to('pascal').magnitude\n sec_stress.x_cp2k_stress_tensor_eigenvectors = val[1]\n\n def parse_md_step(source):\n # we put md output in scc, originally in frame sequence\n # TODO put in workflow\n md_output = self.get_md_output(source._frame)\n md_output = md_output if md_output else source\n sec_scc = sec_run.calculation[-1]\n sec_md_step = sec_scc.m_create(x_cp2k_section_md_step)\n sec_thermo = sec_scc.m_create(Thermodynamics)\n\n with_average = [\n 'cpu_time', 'energy_drift', 'potential_energy', 'kinetic_energy',\n 'temperature', 'pressure', 'barostat_temperature', 'volume']\n for key, val in md_output.items():\n if val is None:\n continue\n name = 'x_cp2k_md_%s' % key\n\n if 'energy' in key or 'conserved' in key:\n val = val.to('joule').magnitude\n elif 'pressure' in key:\n val - val.to('pascal').magnitude\n elif 'volume' in key:\n val = val.to('m**3').magnitude\n\n if key in with_average:\n setattr(sec_md_step, '%s_instantaneous' % name, val[0])\n name = '%s_average' % name\n val = val[1]\n setattr(sec_thermo, key.replace('_instantaneous', ''), val)\n setattr(sec_md_step, name, val)\n\n def parse_calculations(calculations):\n for n, calculation in enumerate(calculations):\n self_consistent = calculation.get('self_consistent', [])\n self_consistent = [self_consistent] if not isinstance(self_consistent, list) else self_consistent\n # there may be several wave function optimizations in a calculation\n for scf in self_consistent:\n parse_quickstep_calculation(scf)\n\n # write only the last one to scc\n scf = self_consistent[-1] if self_consistent else calculation\n scf._frame = n\n sec_scc = self.parse_scc(scf)\n if calculation.get('ensemble_type') is not None:\n calculation._frame = n\n parse_md_step(calculation)\n\n if n == 0:\n atomic_coord = quickstep.get('atomic_coordinates')\n if atomic_coord is not None:\n atomic_coord._frame = 0\n sec_system = self.parse_system(atomic_coord)\n else:\n sec_system = self.parse_system(n)\n\n if sec_system is not None:\n sec_scc.single_configuration_calculation_to_system_ref = sec_system\n\n sec_scc.single_configuration_to_calculation_method_ref = sec_run.method[-1]\n\n single_point = quickstep.get('single_point')\n if single_point is not None:\n parse_calculations([single_point])\n\n geometry_optimization = quickstep.get('geometry_optimization')\n if geometry_optimization is not None:\n # initial self consistent\n optimization_steps = [geometry_optimization]\n optimization_steps.extend(geometry_optimization.get('optimization_step', []))\n parse_calculations(optimization_steps)\n\n molecular_dynamics = quickstep.get('molecular_dynamics')\n if molecular_dynamics is not None:\n md_steps = [molecular_dynamics]\n md_steps.extend(molecular_dynamics.get('md_step', []))\n parse_calculations(md_steps)\n\n def parse_method_quickstep(self):\n sec_run = self.archive.run[-1]\n sec_method = sec_run.m_create(Method)\n\n sec_basis = sec_method.m_create(BasisSet)\n sec_basis.kind = 'wavefunction'\n sec_basis.type = 'gaussians'\n\n planewave_cutoff = self.settings.get('qs', {}).get('planewave_cutoff', None)\n if planewave_cutoff is not None:\n sec_basis_cell = sec_basis.m_create(BasisSetCellDependent)\n sec_basis_cell.planewave_cutoff = planewave_cutoff * ureg.hartree\n\n atoms = self.out_parser.get(\n self._calculation_type, {}).get('atomic_kind_information', {}).get('atom', [])\n for atom in atoms:\n basis_set = atom.get('kind_basis_set_name', None)\n if basis_set is not None:\n sec_basis_atom = sec_basis.m_create(BasisSetAtomCentered)\n sec_basis_atom.atom_number = self.get_atomic_number(atom.kind_label)\n sec_basis_atom.name = basis_set\n\n quickstep = self.out_parser.get(self._calculation_type)\n\n sec_dft = sec_method.m_create(DFT)\n # electronic structure method\n # TODO include methods\n if quickstep.get('dft') is not None:\n sec_method.electronic = Electronic(method='DFT')\n elif quickstep.get('dft_u') is not None:\n sec_method.electronic = Electronic(method='DFT+U')\n elif quickstep.get('mp2') is not None:\n sec_method.electronic = Electronic(method='MP2')\n elif quickstep.get('rpa') is not None:\n sec_method.electronic = Electronic(method='RPA')\n\n # xc functionals\n sec_xc_functional = sec_dft.m_create(XCFunctional)\n for functional in self.get_xc_functionals():\n if '_X_' in functional.name:\n sec_xc_functional.exchange.append(Functional(\n name=functional.name, weight=functional.weight))\n elif '_C_' in functional.name:\n sec_xc_functional.correlation.append(Functional(\n name=functional.name, weight=functional.weight))\n else:\n sec_xc_functional.contributions.append(Functional(\n name=functional.name, weight=functional.weight))\n\n # van der Waals settings\n # TODO test this no example, add parameter, put in main metainfo\n vdw = self.settings['vdw']\n if vdw:\n sec_vdw = sec_method.m_create(x_cp2k_section_vdw_settings)\n for key, val in vdw.items():\n if val == '':\n setattr(sec_vdw, 'x_cp2k_vdw_name', key)\n vdw_name = self._vdw_map.get(key, None)\n if vdw_name is not None:\n sec_method.van_der_Waals_method = vdw_name\n else:\n setattr(sec_vdw, 'x_cp2k_vdw_%s' % key, val)\n\n stress_method = self.inp_parser.get('FORCE_EVAL/STRESS_TENSOR')\n if stress_method is not None:\n sec_method.stress_tensor_method = stress_method.replace('_', ' ').title()\n\n sec_quickstep_settings = sec_method.m_create(x_cp2k_section_quickstep_settings)\n if self.settings['dft']:\n for key, val in self.settings['dft'].items():\n section = sec_dft\n if key == 'self_interaction_correction_method':\n val = self._self_interaction_map.get(val, None)\n elif key == 'spin_restriction':\n section = sec_quickstep_settings\n key = 'x_cp2k_%s' % key\n if val is None:\n continue\n setattr(section, key, val)\n if self.settings['qs']:\n for key, val in self.settings['qs'].items():\n setattr(sec_quickstep_settings, 'x_cp2k_%s' % key, val)\n\n atomic_kind_info = quickstep.get('atomic_kind_information', None)\n if atomic_kind_info is not None:\n sec_atom_kinds = sec_quickstep_settings.m_create(x_cp2k_section_atomic_kinds)\n for atom in atomic_kind_info.get('atom', []):\n # why necessary to make a separate section\n sec_atom_kind = sec_atom_kinds.m_create(x_cp2k_section_atomic_kind)\n sec_kind_basis_set = sec_atom_kind.m_create(x_cp2k_section_kind_basis_set)\n for key, val in atom.items():\n if val is None:\n continue\n if key in ['kind_label', 'kind_number_of_atoms']:\n setattr(sec_atom_kind, 'x_cp2k_%s' % key, str(val))\n else:\n setattr(sec_kind_basis_set, 'x_cp2k_%s' % key, val)\n\n sec_method_atom_kind = sec_method.m_create(AtomParameters)\n sec_method_atom_kind.label = atom.kind_label\n sec_method_atom_kind.atom_number = self.get_atomic_number(atom.kind_label)\n\n total_maximum_numbers = quickstep.get('total_maximum_numbers', None)\n if total_maximum_numbers is not None:\n sec_total = sec_quickstep_settings.m_create(x_cp2k_section_total_numbers)\n sec_maximum = sec_quickstep_settings.m_create(x_cp2k_section_maximum_angular_momentum)\n for key, val in total_maximum_numbers.items():\n if val is None:\n continue\n if key in ['orbital_basis_functions', 'local_part_of_gth_pseudopotential', 'non_local_part_of_gth_pseudopotential']:\n setattr(sec_maximum, 'x_cp2k_%s' % key, val)\n else:\n setattr(sec_total, 'x_cp2k_%s' % key, val)\n\n sec_scf = sec_method.m_create(Scf)\n scf_parameters = quickstep.get('scf_parameters', None)\n if scf_parameters is not None:\n for key, val in scf_parameters.items():\n if val is None:\n continue\n setattr(sec_scf, key, val)\n\n @property\n def sampling_method(self):\n if self._method is None:\n quickstep = self.out_parser.get(self._calculation_type, {})\n for method in ['single_point', 'geometry_optimization', 'molecular_dynamics']:\n if quickstep.get(method) is not None:\n self._method = method\n return self._method\n\n def parse_workflow(self):\n # TODO add vdW\n sec_workflow = self.archive.m_create(Workflow)\n sec_workflow.type = self.sampling_method\n\n if self.sampling_method == 'geometry_optimization':\n sec_geometry_optimization = sec_workflow.m_create(GeometryOptimization)\n optimization = self.out_parser.get(self._calculation_type).geometry_optimization\n if optimization.method is not None:\n method = self._optimization_method_map.get(optimization.method, '')\n if not method:\n self.logger.error('Cannot resolve optimization method.')\n sec_geometry_optimization.method = method\n sec_geometry_opt = sec_workflow.m_create(x_cp2k_section_geometry_optimization)\n for step in optimization.get('optimization_step', []):\n information = step.information\n if information is None:\n continue\n sec_geometry_opt_step = sec_geometry_opt.m_create(x_cp2k_section_geometry_optimization_step)\n for key, val in information.items():\n if val is None:\n continue\n\n name = self._metainfo_name_map.get(key, key)\n if name.startswith('energy') and isinstance(val, float):\n val = (val * ureg.hartree).to('joule').magnitude\n elif 'step_size' in name and isinstance(val, float):\n val = (val * ureg.bohr).to('m').magnitude\n elif 'gradient' in name and isinstance(val, float):\n val = (val * ureg.hartree / ureg.bohr).to('joule/m').magnitude\n elif isinstance(val, str):\n val = val.strip()\n\n setattr(sec_geometry_opt_step, 'x_cp2k_optimization_%s' % name, val)\n\n if sec_geometry_opt.x_cp2k_section_geometry_optimization_step:\n geometry_change = sec_geometry_opt_step.x_cp2k_optimization_step_size_convergence_limit\n if geometry_change is not None:\n sec_geometry_optimization.input_displacement_maximum_tolerance = geometry_change\n threshold_force = sec_geometry_opt_step.x_cp2k_optimization_gradient_convergence_limit\n if threshold_force is not None:\n sec_geometry_optimization.input_force_maximum_tolerance = threshold_force\n\n elif self.sampling_method == 'molecular_dynamics':\n sec_md = sec_workflow.m_create(MolecularDynamics)\n ensemble_type = self._ensemble_map.get(self.get_ensemble_type(0), None)\n if ensemble_type is not None:\n sec_md.ensemble_type = ensemble_type\n\n sec_md_settings = sec_workflow.m_create(x_cp2k_section_md_settings)\n for key, val in self.settings['md'].items():\n if val is None or key == 'file_type':\n continue\n if key in ['coordinates', 'simulation_cell', 'velocities', 'energies', 'dump']:\n val = val.split()\n setattr(sec_md_settings, 'x_cp2k_md_%s_print_frequency' % key, int(val[0]))\n setattr(sec_md_settings, 'x_cp2k_md_%s_filename' % key, val[1])\n elif key == 'print_frequency':\n setattr(sec_md_settings, 'x_cp2k_md_%s' % key, int(val.split()[0]))\n else:\n setattr(sec_md_settings, 'x_cp2k_md_%s' % key, val)\n\n def parse_input(self):\n # TODO include extended input\n input_filename = self.settings['cp2k'].get('input_filename', None)\n if input_filename is None:\n return\n\n definitions = dict(m_env.all_definitions_by_name)\n\n def resolve_definition(name):\n return definitions.get(name, [None])[0]\n\n def override_keyword(name):\n # override keys to be compatible with metainfo name\n # TODO change metainfo name\n if name.endswith('_VALUE'):\n return name.replace('VALUE', 'SECTION_PARAMETERS')\n elif name.endswith('KIND_RI_AUX_BASIS'):\n return name.replace('BASIS', 'BASIS_SET')\n return name\n\n def parse(name, data, section):\n if isinstance(data, InpValue):\n sec_def = resolve_definition(name)\n if sec_def is not None:\n sub_section = section.m_create(sec_def.section_cls)\n for key, val in data.items():\n sec_name = '%s_%s' % (name, key)\n parse(sec_name, val, sub_section)\n\n elif isinstance(data, list) and data:\n for val in data:\n parse(name, val, section)\n\n else:\n name = name.replace('_section', '')\n name = override_keyword(name)\n quantity_def = resolve_definition(name)\n if quantity_def is not None:\n setattr(section, name, quantity_def.type(data))\n\n self.inp_parser.mainfile = os.path.join(self.maindir, input_filename)\n if self.inp_parser.tree is None:\n return\n\n parse('x_cp2k_section_input', self.inp_parser.tree, self.archive.run[-1])\n\n def parse(self, filepath, archive, logger):\n self.filepath = os.path.abspath(filepath)\n self.archive = archive\n self.maindir = os.path.dirname(self.filepath)\n self.logger = logger if logger is not None else logging.getLogger(__name__)\n\n self.init_parser()\n\n # identify calculation type, TODO add more\n calculation_types = ['quickstep', 'qs_dftb']\n for calculation_type in calculation_types:\n if self.out_parser.get(calculation_type) is not None:\n self._calculation_type = calculation_type\n break\n\n sec_run = self.archive.m_create(Run)\n version = self.settings['cp2k']['program_version']\n host = self.settings['cp2k']['program_compilation_host']\n sec_run.program = Program(\n name='CP2K', version=version[0] if isinstance(version, list) else version,\n compilation_host=host[0] if isinstance(host, list) else host)\n\n if self.settings['dbcsr']:\n sec_dbcsr = sec_run.m_create(x_cp2k_section_dbcsr)\n for key, val in self.settings['dbcsr'].items():\n setattr(sec_dbcsr, 'x_cp2k_%s' % key, val)\n\n if self.settings['program']:\n sec_startinformation = sec_run.m_create(x_cp2k_section_startinformation)\n sec_endinformation = sec_run.m_create(x_cp2k_section_end_information)\n section = sec_startinformation\n for key, val in self.settings['program'].items():\n if key == 'id' and isinstance(val, list):\n sec_endinformation.x_cp2k_end_id = val[1]\n key, val = 'start_id', val[0]\n section = sec_endinformation if key.startswith('end') else sec_startinformation\n val = val[0] if isinstance(val, list) else val\n setattr(section, 'x_cp2k_%s' % key, val)\n\n if self.settings['cp2k']:\n sec_program_information = sec_run.m_create(x_cp2k_section_program_information)\n for key, val in self.settings['cp2k'].items():\n if key == 'svn_revision':\n try:\n val = int(val.strip('svn:'))\n except Exception:\n continue\n setattr(sec_program_information, 'x_cp2k_%s' % key, val)\n\n if self.settings['global']:\n sec_global_settings = sec_run.m_create(x_cp2k_section_global_settings)\n for key, val in self.settings['global'].items():\n setattr(sec_global_settings, 'x_cp2k_%s' % key, val)\n\n restart = self.out_parser.get('restart')\n if restart is not None:\n sec_restart = sec_run.m_create(x_cp2k_section_restart_information)\n sec_restart.x_cp2k_restart_file_name = restart.get('filename')\n sec_restart.x_cp2k_restarted_quantity_name = ' '.join(restart.get('quantities'))\n\n self.parse_input()\n\n if self._calculation_type in ['quickstep', 'qs_dftb']:\n self.parse_method_quickstep()\n self.parse_configurations_quickstep()\n\n self.parse_workflow()\n" ]
[ [ "numpy.hstack", "numpy.array", "numpy.dot" ] ]
ronitd/gtn_applications
[ "98d85643ef6c3ccf95fc122158dd4e8cc307d92e" ]
[ "datasets/audioset.py" ]
[ "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport itertools\nimport json\nimport os\nimport re\nimport torch\nimport torchaudio\nimport torchvision\n\n\ndef log_normalize(x):\n x.add_(1e-6).log_()\n mean = x.mean()\n std = x.std()\n return x.sub_(mean).div_(std + 1e-6)\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, data_path, preprocessor, split, splits, augmentation=None, sample_rate=16000):\n data = []\n for sp in splits[split]:\n data.extend(load_data_split(data_path, sp, preprocessor.wordsep))\n\n self.preprocessor = preprocessor\n\n # setup transforms:\n self.transforms = [\n torchaudio.transforms.MelSpectrogram(\n sample_rate=sample_rate,\n n_fft=sample_rate * 25 // 1000,\n n_mels=preprocessor.num_features,\n hop_length=sample_rate * 10 // 1000,\n ),\n torchvision.transforms.Lambda(log_normalize),\n ]\n if augmentation is not None:\n self.transforms.extend(augmentation)\n self.transforms = torchvision.transforms.Compose(self.transforms)\n\n # Load each audio file:\n audio = [example[\"audio\"] for example in data]\n text = [example[\"text\"] for example in data]\n duration = [example[\"duration\"] for example in data]\n self.dataset = list(zip(audio, text, duration))\n\n def sample_sizes(self):\n \"\"\"\n Returns a list of tuples containing the input size\n (time, 1) and the output length for each sample.\n \"\"\"\n return [((duration, 1), len(text)) for _, text, duration in self.dataset]\n\n def __getitem__(self, index):\n audio_file, text, _ = self.dataset[index]\n audio = torchaudio.load(audio_file)\n inputs = self.transforms(audio[0])\n outputs = self.preprocessor.to_index(text)\n return inputs, outputs\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass Preprocessor:\n \"\"\"\n A preprocessor for an audio dataset.\n Args:\n data_path (str) : Path to the top level data directory.\n num_features (int) : Number of audio features in transform.\n tokens_path (str) (optional) : The path to the list of model output\n tokens. If not provided the token set is built dynamically from\n the graphemes of the tokenized text. NB: This argument does not\n affect the tokenization of the text, only the number of output\n classes.\n lexicon_path (str) (optional) : A mapping of words to tokens. If\n provided the preprocessor will split the text into words and\n map them to the corresponding token. If not provided the text\n will be tokenized at the grapheme level.\n \"\"\"\n\n def __init__(\n self,\n data_path,\n num_features,\n splits,\n tokens_path=None,\n lexicon_path=None,\n use_words=False,\n prepend_wordsep=False,\n ):\n if use_words:\n raise ValueError(\"use_words not supported for audio dataset\")\n self.wordsep = \"▁\"\n self._prepend_wordsep = prepend_wordsep\n self.num_features = num_features\n\n data = []\n for sp in splits[\"train\"]:\n data.extend(load_data_split(data_path, sp, self.wordsep))\n\n # Load the set of graphemes:\n graphemes = set()\n for ex in data:\n graphemes.update(ex[\"text\"])\n self.graphemes = sorted(graphemes)\n\n # Build the token-to-index and index-to-token maps:\n if tokens_path is not None:\n with open(tokens_path, \"r\") as fid:\n self.tokens = [l.strip() for l in fid]\n else:\n # Default to use graphemes if no tokens are provided\n self.tokens = self.graphemes\n\n if lexicon_path is not None:\n with open(lexicon_path, \"r\") as fid:\n lexicon = (l.strip().split() for l in fid)\n lexicon = {l[0]: l[1:] for l in lexicon}\n self.lexicon = lexicon\n else:\n self.lexicon = None\n\n self.graphemes_to_index = {t: i for i, t in enumerate(self.graphemes)}\n self.tokens_to_index = {t: i for i, t in enumerate(self.tokens)}\n\n @property\n def num_tokens(self):\n return len(self.tokens)\n\n def to_index(self, line):\n tok_to_idx = self.graphemes_to_index\n if self.lexicon is not None:\n if len(line) > 0:\n # If the word is not found in the lexicon, fall back to letters.\n line = [\n t\n for w in line.split(self.wordsep)\n for t in self.lexicon.get(w, self.wordsep + w)\n ]\n tok_to_idx = self.tokens_to_index\n # In some cases we require the target to start with self.wordsep, for\n # example when learning word piece decompositions.\n if self._prepend_wordsep:\n line = itertools.chain([self.wordsep], line)\n return torch.LongTensor([tok_to_idx[t] for t in line])\n\n def to_text(self, indices):\n # Roughly the inverse of `to_index`\n encoding = self.graphemes\n if self.lexicon is not None:\n encoding = self.tokens\n return self._post_process(encoding[i] for i in indices)\n\n def tokens_to_text(self, indices):\n return self._post_process(self.tokens[i] for i in indices)\n\n def _post_process(self, indices):\n # ignore preceding and trailling spaces\n return \"\".join(indices).strip(self.wordsep)\n\n\ndef load_data_split(data_path, split, wordsep):\n json_file = os.path.join(data_path, f\"{split}.json\")\n with open(json_file, \"r\") as fid:\n examples = [json.loads(l) for l in fid]\n for ex in examples:\n text = ex[\"text\"]\n # swap word sep from | to self.wordsep\n text = re.sub(r\"\\s\", wordsep, text).strip(wordsep)\n ex[\"text\"] = text\n return examples\n" ]
[ [ "torch.LongTensor" ] ]
varunshiva/t20-mining
[ "3d1b44e3cccd77d7231c215735668b53f7799cd3" ]
[ "src/data/test/test_make_dataset.py" ]
[ "#!/usr/bin/env python\n\nfrom src.data import make_dataset\nfrom pathlib import Path\nimport pandas as pd\n\ndef test_cricsheet_to_dataframe():\n \"\"\"\n docstring\n \"\"\"\n \n test_path = Path(__file__).parent.resolve()\n test_file = test_path / 'data' / 'test.csv'\n make_dataset.cricsheet_to_dataframe(test_file, test_path)\n \n out_file = test_path / 'test.csv'\n \n assert out_file.exists() == True\n df = pd.read_csv(out_file)\n \n assert len(df.columns) == 31\n assert len(df['match_id']) != 0\n\nif __name__ == '__main__':\n test_cricsheet_to_dataframe()\n" ]
[ [ "pandas.read_csv" ] ]
Larofeticus/balsam
[ "6d646ebe87a21bca5934bbd0b5fdc68a1baea09b" ]
[ "balsam/core/models.py" ]
[ "from collections import defaultdict, Counter\nfrom itertools import combinations\nimport os\nimport json\nimport logging\nimport re\nimport numpy as np\nimport socket\nimport sys\nimport threading\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nimport uuid\nfrom getpass import getuser\nfrom pprint import pformat\n\nfrom balsam import setup\nsetup()\n\nfrom django.core.exceptions import ValidationError,ObjectDoesNotExist\nfrom django.db.utils import OperationalError\nfrom django.conf import settings\nfrom django.db import models, transaction\nfrom django.db.models import Value as V\nfrom django.db.models import Q\nfrom django.db import connection\nfrom django.db.models.functions import Concat\nfrom django.contrib.postgres.fields import JSONField\n\nlogger = logging.getLogger(__name__)\n\nclass InvalidStateError(ValidationError): pass\nclass InvalidParentsError(ValidationError): pass\nclass NoApplication(Exception): pass\n\nTIME_FMT = '%m-%d-%Y %H:%M:%S.%f'\n\nSTATES = '''\nCREATED\nAWAITING_PARENTS\nREADY\n\nSTAGED_IN\nPREPROCESSED\n\nRUNNING\nRUN_DONE\n\nPOSTPROCESSED\nJOB_FINISHED\n\nRUN_TIMEOUT\nRUN_ERROR\nRESTART_READY\n\nFAILED\nUSER_KILLED\n'''.split()\n\nACTIVE_STATES = '''\nRUNNING\n'''.split()\n\nPROCESSABLE_STATES = '''\nCREATED\nAWAITING_PARENTS\nREADY\nSTAGED_IN\nRUN_DONE\nPOSTPROCESSED\nRUN_TIMEOUT\nRUN_ERROR\n'''.split()\n\nRUNNABLE_STATES = '''\nPREPROCESSED\nRESTART_READY\n'''.split()\n\nEND_STATES = '''\nJOB_FINISHED\nFAILED\nUSER_KILLED\n'''.split()\n \nSTATE_TIME_PATTERN = re.compile(r'''\n^ # start of line\n\\[ # opening square bracket\n(\\d+-\\d+-\\d\\d\\d\\d # date MM-DD-YYYY\n\\s+ # one or more space\n\\d+:\\d+:\\d+\\.\\d+) # time HH:MM:SS.MICROSEC\n\\s+ # one or more space\n(\\w+) # state\n\\s* # 0 or more space\n\\] # closing square bracket\n''', re.VERBOSE | re.MULTILINE)\n\n_app_cache = {}\n\ndef process_job_times(qs=None):\n '''Returns {state : [elapsed_seconds_for_each_job_to_reach_state]}\n Useful for tracking job performance/throughput'''\n\n if qs is None: qs = BalsamJob.objects\n data = qs.values_list('state_history', flat=True)\n data = '\\n'.join(data)\n matches = STATE_TIME_PATTERN.finditer(data)\n result = ( m.groups() for m in matches )\n result = ( (state, datetime.strptime(time_str, TIME_FMT))\n for (time_str, state) in result )\n time_data = defaultdict(list)\n for state, time in result:\n time_data[state].append(time)\n return time_data\n\ndef utilization_report(time_data=None):\n if time_data is None:\n qs = BalsamJob.objects\n time_data = process_job_times(qs=qs)\n start_times = time_data.get('RUNNING', [])\n end_times = []\n for state in ['RUN_DONE', 'RUN_ERROR', 'RUN_TIMEOUT']:\n end_times.extend(time_data.get(state, []))\n\n startCounts = Counter(start_times)\n endCounts = Counter(end_times)\n for t in endCounts: endCounts[t] *= -1\n merged = sorted(list(startCounts.items()) + list(endCounts.items()),\n key = lambda x: x[0])\n counts = np.fromiter((x[1] for x in merged), dtype=np.int)\n\n times = [x[0] for x in merged]\n running = np.cumsum(counts)\n return (times, running)\n\ndef throughput_report(time_data=None):\n if time_data is None:\n qs = BalsamJob.objects\n time_data = process_job_times(qs=qs)\n done_times = time_data.get('RUN_DONE', [])\n doneCounts = sorted(list(Counter(done_times).items()),key=lambda x:x[0])\n times = [x[0] for x in doneCounts]\n counts = np.cumsum(np.fromiter((x[1] for x in doneCounts), dtype=np.int))\n return (times, counts)\n\ndef error_report(time_data=None):\n if time_data is None:\n qs = BalsamJob.objects\n time_data = process_job_times(qs=qs)\n err_times = time_data.get('RUN_ERROR', [])\n if not err_times: return\n time0 = min(err_times)\n err_seconds = np.array([(t-time0).total_seconds() for t in err_times])\n hmin, hmax = 0, max(err_seconds)\n bins = np.arange(hmin, hmax+60, 60)\n times = [time0 + timedelta(seconds=s) for s in bins]\n hist, _ = np.histogram(err_seconds, bins=bins, density=False)\n assert len(times) == len(hist) + 1\n return times, hist\n\ndef assert_disjoint():\n groups = [ACTIVE_STATES, PROCESSABLE_STATES, RUNNABLE_STATES, END_STATES]\n joined = [state for g in groups for state in g]\n assert len(joined) == len(set(joined)) == len(STATES)\n assert set(joined) == set(STATES) \n for g1,g2 in combinations(groups, 2):\n s1,s2 = set(g1), set(g2)\n assert s1.intersection(s2) == set()\nassert_disjoint()\n\ndef validate_state(value):\n if value not in STATES:\n raise InvalidStateError(f\"{value} is not a valid state in balsam.models\")\n\ndef get_time_string():\n return timezone.now().strftime(TIME_FMT)\n\ndef from_time_string(s):\n return datetime.strptime(s, TIME_FMT)\n\ndef history_line(state='CREATED', message=''):\n return f\"\\n[{get_time_string()} {state}] \".rjust(46) + message\n\ndef safe_select(queryset):\n qs = queryset.order_by('job_id').select_for_update()\n pks = None\n while pks is None:\n try:\n pks = list(qs.values_list('job_id', flat=True))\n except OperationalError as e:\n logger.error(f'select for update error: {e}\\nRetrying...')\n time.sleep(0.2)\n return BalsamJob.objects.filter(pk__in=pks)\n\n\nclass QueuedLaunch(models.Model):\n\n ADVISORY_LOCK_ID = hash(getuser())\n scheduler_id = models.IntegerField(default=0, db_index=True)\n project = models.TextField(default=settings.DEFAULT_PROJECT)\n queue = models.TextField(default='')\n nodes = models.IntegerField(default=0)\n wall_minutes = models.IntegerField(default=0)\n job_mode = models.TextField(default='')\n wf_filter = models.TextField(default='')\n command = models.TextField(default='')\n state = models.TextField(default='pending-submission')\n prescheduled_only = models.BooleanField(default=True) # if disabled, all BalsamJobs eligible to run\n from_balsam = models.BooleanField(default=True) # if disabled, all BalsamJobs eligible to run\n\n @classmethod\n def acquire_advisory(self):\n with connection.cursor() as cursor:\n command = f\"SELECT pg_try_advisory_lock({self.ADVISORY_LOCK_ID})\"\n cursor.execute(command)\n row = cursor.fetchone()\n row = ' '.join(map(str, row)).strip().lower()\n if 'true' in row:\n return True\n else:\n return False\n\n def __repr__(self):\n dat = {k:v for k,v in self.__dict__.items() if k not in ['_state']}\n return f'''Qlaunch {pformat(dat, indent=4)}'''\n\n def __str__(self):\n return repr(self)\n\n @classmethod\n @transaction.atomic\n def refresh_from_scheduler(cls):\n from balsam.service.schedulers import scheduler\n saved_jobs = list(cls.objects.all())\n saved_job_ids = [j.scheduler_id for j in saved_jobs]\n stats = scheduler.status_dict()\n for job_id, job in stats.items():\n if job_id not in saved_job_ids:\n j = cls(scheduler_id=job_id,\n project=job['project'],\n queue=job['queue'],\n nodes=job['nodes'],\n wall_minutes=job['wall_time_min'],\n state=job['state'],\n command=job['command'],\n from_balsam=False)\n j.save()\n logger.info(f'Detected new job: {j}')\n else:\n saved_job = saved_jobs[saved_job_ids.index(job_id)]\n if job['state'] != saved_job.state: \n logger.info(f'Updating batch job {job_id}: state {job[\"state\"]}')\n saved_job.state = job['state']\n saved_job.project = job['project']\n saved_job.queue = job['queue']\n saved_job.nodes = job['nodes']\n saved_job.wall_minutes = job['wall_time_min']\n saved_job.command = job['command']\n saved_job.save()\n delete_ids = [id for id in saved_job_ids if id not in stats.keys()]\n cls.objects.filter(scheduler_id__in=delete_ids).delete()\n if delete_ids:\n logger.info(f'Deleting Jobs {delete_ids} no longer in scheduler')\n\nclass JobSource(models.Manager):\n\n TICK_PERIOD = timedelta(minutes=1)\n EXPIRATION_PERIOD = timedelta(minutes=3)\n\n def __init__(self, workflow=None):\n super().__init__()\n self.workflow = workflow\n self._lock_str = None\n self._pid = None\n self.qLaunch = None\n self._checked_qLaunch = False\n\n def check_qLaunch(self):\n from balsam.service.schedulers import JobEnv\n sched_id = JobEnv.current_scheduler_id\n if sched_id is not None:\n try:\n self.qLaunch = QueuedLaunch.objects.get(scheduler_id=sched_id)\n except ObjectDoesNotExist:\n self.qLaunch = None\n if self.qLaunch is not None:\n if not (self.qLaunch.prescheduled_only and self.qLaunch.from_balsam):\n self.qLaunch = None\n if self.qLaunch is not None:\n logger.info(f'Filtering BalsamJobs scheduled for QueuedLaunch {self.qLaunch.pk} (Batch Job: {self.qLaunch.scheduler_id})')\n else:\n logger.info(f'Job source filtering for un-scheduled BalsamJobs')\n self._checked_qLaunch = True\n\n @property\n def lock_str(self):\n pid = os.getpid()\n if pid != self._pid:\n self._lock_str = f\"{socket.gethostname()}:{pid}\"\n self._pid = pid\n return self._lock_str\n\n @property\n def lockQuery(self):\n return Q(lock='') | Q(lock=self.lock_str)\n\n def get_queryset(self):\n if not self._checked_qLaunch: self.check_qLaunch()\n\n queryset = super().get_queryset()\n queryset = queryset.filter(self.lockQuery)\n if self.qLaunch is not None:\n queryset = queryset.filter(queued_launch_id=self.qLaunch.pk)\n else:\n queryset = queryset.filter(queued_launch__isnull=True)\n if self.workflow:\n queryset = queryset.filter(workflow__contains=self.workflow)\n return queryset\n\n def by_states(self, states):\n if isinstance(states, str):\n states = [states]\n elif isinstance(states, dict):\n states = states.keys()\n return self.get_queryset().filter(state__in=states)\n\n def get_runnable(self, *, max_nodes, remaining_minutes=None, mpi_only=False,\n serial_only=False, order_by=None):\n if mpi_only and serial_only:\n raise ValueError(\"arguments mpi_only and serial_only are mutually exclusive\")\n\n if max_nodes < 1:\n raise ValueError(\"Must be positive number of nodes\")\n\n if serial_only:\n assert max_nodes == 1\n\n runnable = self.by_states(RUNNABLE_STATES)\n runnable = runnable.filter(num_nodes__lte=max_nodes)\n\n if remaining_minutes is not None:\n try: remaining_minutes = int(remaining_minutes)\n except: remaining_minutes = None\n else: runnable = runnable.filter(wall_time_minutes__lte=remaining_minutes)\n\n if serial_only:\n runnable = runnable.filter(num_nodes=1, ranks_per_node=1)\n elif mpi_only:\n mpiquery = Q(num_nodes__gt=1) | Q(ranks_per_node__gt=1)\n runnable = runnable.filter(mpiquery)\n if order_by is not None:\n if isinstance(order_by, str):\n order_by = (order_by,)\n runnable = runnable.order_by(*order_by)\n return runnable\n\n @transaction.atomic\n def acquire(self, pk_list):\n '''input can be actual list of PKs or a queryset'''\n new_lock = self.lock_str\n to_lock = BalsamJob.objects.filter(pk__in=pk_list)\n to_lock = to_lock.select_for_update(skip_locked=True).filter(lock='')\n acquired_pks = list(to_lock.values_list('job_id', flat=True))\n BalsamJob.objects.filter(pk__in=acquired_pks).update(lock=new_lock,\n tick=timezone.now())\n return acquired_pks\n\n def start_tick(self):\n t = threading.Timer(self.TICK_PERIOD.total_seconds(), self.start_tick)\n t.daemon = True\n t.start()\n self._tick()\n\n def _tick(self):\n now = timezone.now()\n with transaction.atomic():\n my_locked = safe_select(BalsamJob.objects.filter(lock=self.lock_str))\n num_updated = my_locked.update(tick=now)\n logger.debug(f'Ticked lock on {num_updated} of my BalsamJobs')\n connection.close()\n\n def release(self, pk_list):\n with transaction.atomic():\n to_unlock = safe_select(BalsamJob.objects.filter(pk__in=pk_list))\n num_unlocked = to_unlock.update(lock='')\n logger.debug(f'Released lock on {num_unlocked} of my BalsamJobs')\n\n @transaction.atomic\n def release_all_owned(self):\n alljobs = safe_select(BalsamJob.objects.filter(lock=self.lock_str))\n alljobs.update(lock='')\n \n def clear_stale_locks(self):\n objects = self.model.objects\n total_count = objects.count()\n locked_count = objects.exclude(lock='').count()\n logger.debug(f'{locked_count} out of {total_count} jobs are locked')\n\n all_jobs = objects.all()\n expired_time = timezone.now() - self.EXPIRATION_PERIOD\n with transaction.atomic():\n expired_jobs = safe_select(all_jobs.exclude(lock='').filter(tick__lte=expired_time))\n revert_count = expired_jobs.filter(state='RUNNING').update(state='RESTART_READY')\n expired_count = expired_jobs.update(lock='')\n if expired_count:\n logger.info(f'Cleared stale lock on {expired_count} jobs')\n if revert_count: logger.info(f'Reverted {revert_count} RUNNING jobs to RESTART_READY')\n elif locked_count:\n logger.debug(f'No stale locks (older than {self.EXPIRATION_PERIOD.total_seconds()} seconds)')\n\nclass BalsamJob(models.Model):\n ''' A DB representation of a Balsam Job '''\n\n objects = models.Manager()\n source = JobSource()\n\n job_id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False)\n\n workflow = models.TextField(\n 'Workflow Name',\n help_text='Name of the workflow to which this job belongs',\n default='')\n name = models.TextField(\n 'Job Name',\n help_text='A name for the job given by the user.',\n default='')\n description = models.TextField(\n 'Job Description',\n help_text='A description of the job.',\n default='')\n lock = models.TextField(\n 'Process Lock',\n help_text='{hostname}:{PID} set by process that currently owns the job',\n default='',\n db_index=True\n )\n tick = models.DateTimeField(auto_now_add=True)\n\n parents = models.TextField(\n 'IDs of the parent jobs which must complete prior to the start of this job.',\n default='[]')\n\n input_files = models.TextField(\n 'Input File Patterns',\n help_text=\"Space-delimited filename patterns that will be searched in the parents'\"\\\n \"working directories. Every matching file will be made available in this\"\\\n \"job's working directory (symlinks for local Balsam jobs, file transfer for\"\\\n \"remote Balsam jobs). Default: all files from parent jobs are made available.\",\n default='*')\n stage_in_url = models.TextField(\n 'External stage in files or folders', help_text=\"A list of URLs for external data to be staged in prior to job processing. Job dataflow from parents to children is NOT handled here; see `input_files` field instead.\",\n default='')\n stage_out_files = models.TextField(\n 'External stage out files or folders',\n help_text=\"A string of filename patterns. Matches will be transferred to the stage_out_url. Default: no files are staged out\",\n default='')\n stage_out_url = models.TextField(\n 'Stage Out URL',\n help_text='The URLs to which designated stage out files are sent.',\n default='')\n\n wall_time_minutes = models.IntegerField(\n 'Job Wall Time in Minutes',\n help_text='The number of minutes the job is expected to take',\n default=1)\n num_nodes = models.IntegerField(\n 'Number of Compute Nodes',\n help_text='The number of compute nodes requested for this job.',\n default=1,\n db_index=True)\n coschedule_num_nodes = models.IntegerField(\n 'Number of additional compute nodes to reserve alongside this job',\n help_text='''Used by Balsam service only. If a pilot job runs on one or a\n few nodes, but requires additional worker nodes alongside it,\n use this field to specify the number of additional nodes that will be\n reserved by the service for this job.''',\n default=0)\n ranks_per_node = models.IntegerField(\n 'Number of ranks per node',\n help_text='The number of MPI ranks per node to schedule for this job.',\n default=1)\n cpu_affinity = models.TextField(\n 'Cray CPU Affinity (\"depth\" or \"none\")',\n default=\"none\")\n threads_per_rank = models.IntegerField(\n 'Number of threads per MPI rank',\n help_text='The number of OpenMP threads per MPI rank (if applicable)',\n default=1)\n threads_per_core = models.IntegerField(\n 'Number of hyperthreads per physical core (if applicable)',\n help_text='Number of hyperthreads per physical core.',\n default=1)\n node_packing_count = models.IntegerField(\n 'For serial (non-MPI) jobs only. How many to run concurrently on a node.',\n help_text='Setting this field at 2 means two serial jobs will run at a '\n 'time on a node. This field is ignored for MPI jobs.',\n default=1)\n environ_vars = models.TextField(\n 'Environment variables specific to this job',\n help_text=\"Colon-separated list of envs like VAR1=value1:VAR2=value2\",\n default='')\n \n application = models.TextField(\n 'Application to Run',\n help_text='The application to run; located in Applications database',\n default='')\n args = models.TextField(\n 'Command-line args to the application exe',\n help_text='Command line arguments used by the Balsam job runner',\n default='')\n user_workdir = models.TextField(\n 'Override the Balsam-generated workdir, point to existing location',\n default=''\n )\n\n\n wait_for_parents = models.BooleanField(\n 'If True, do not process this job until parents are FINISHED',\n default=True)\n post_error_handler = models.BooleanField(\n 'Let postprocesser try to handle RUN_ERROR',\n help_text='If true, the postprocessor will be invoked for RUN_ERROR jobs'\n ' and it is up to the script to handle error and update job state.',\n default=False)\n post_timeout_handler = models.BooleanField(\n 'Let postprocesser try to handle RUN_TIMEOUT',\n help_text='If true, the postprocessor will be invoked for RUN_TIMEOUT jobs'\n ' and it is up to the script to handle timeout and update job state.',\n default=False)\n auto_timeout_retry = models.BooleanField(\n 'Automatically restart jobs that have timed out',\n help_text=\"If True and post_timeout_handler is False, then jobs will \"\n \"simply be marked RESTART_READY upon timing out.\",\n default=True)\n\n state = models.TextField(\n 'Job State',\n help_text='The current state of the job.',\n default='CREATED',\n validators=[validate_state],\n db_index=True)\n state_history = models.TextField(\n 'Job State History',\n help_text=\"Chronological record of the job's states\",\n default=history_line)\n\n queued_launch = models.ForeignKey(\n 'QueuedLaunch',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n data = JSONField('User Data', help_text=\"JSON encoded data store for user-defined data\", default=dict)\n\n @staticmethod\n def from_dict(d):\n job = BalsamJob()\n SERIAL_FIELDS = [f for f in job.__dict__ if f not in\n '_state force_insert force_update using update_fields'.split()\n ]\n\n if type(d['job_id']) is str:\n d['job_id'] = uuid.UUID(d['job_id'])\n else:\n assert d['job_id'] is None\n d['job_id'] = job.job_id\n\n for field in SERIAL_FIELDS:\n job.__dict__[field] = d[field]\n\n assert type(job.job_id) == uuid.UUID\n return job\n\n\n def __repr__(self):\n result = f'BalsamJob {self.pk}\\n'\n result += '----------------------------------------------\\n'\n result += '\\n'.join( (k+':').ljust(32) + str(v) \n for k,v in self.__dict__.items() \n if k not in ['state_history', 'job_id', '_state', 'tick'])\n\n try: result += '\\n' + ' *** Executed command:'.ljust(32) + self.app_cmd\n except NoApplication: result += '\\n' + ' *** Executed command:'.ljust(32) + f'NO APPLICATION MATCHING {self.application}'\n except ApplicationDefinition.DoesNotExist: result += '\\n' + ' *** Executed command:'.ljust(32) + f'NO APPLICATION MATCHING {self.application}'\n\n result += '\\n' + ' *** Working directory:'.ljust(32) + self.working_directory +'\\n'\n return result\n\n def __str__(self):\n return self.__repr__()\n\n def get_parents_by_id(self):\n return json.loads(self.parents)\n\n def get_parents(self):\n parent_ids = self.get_parents_by_id()\n return BalsamJob.objects.filter(job_id__in=parent_ids)\n\n @property\n def num_ranks(self):\n return self.num_nodes * self.ranks_per_node\n\n @property\n def cute_id(self):\n if self.name:\n return f\"[{self.name} | { str(self.pk)[:8] }]\"\n else:\n return f\"[{ str(self.pk)[:8] }]\"\n \n @property\n def app_cmd(self):\n app = self.get_application()\n line = f\"{app.executable} {self.args}\"\n return ' '.join(os.path.expanduser(w) for w in line.split())\n\n @property\n def envscript(self):\n app = self.get_application()\n _envscript = app.envscript\n if _envscript and os.path.isfile(_envscript):\n return _envscript\n else:\n return None\n\n\n def get_children(self):\n return BalsamJob.objects.filter(parents__icontains=str(self.pk))\n\n def get_children_by_id(self):\n children = self.get_children()\n return [c.pk for c in children]\n\n def get_child_by_name(self, name):\n children = self.get_children().filter(name=name)\n if children.count() == 0:\n raise ValueError(f\"No child named {name}\")\n elif children.count() > 1:\n raise ValueError(f\"More than one child named {name}\")\n else:\n return children.first()\n\n def set_parents(self, parents):\n try:\n parents_list = list(parents)\n except:\n raise InvalidParentsError(\"Cannot convert input to list\")\n for i, parent in enumerate(parents_list):\n pk = parent.pk if isinstance(parent,BalsamJob) else parent\n if not BalsamJob.objects.filter(pk=pk).exists():\n raise InvalidParentsError(f\"Job PK {pk} is not in the BalsamJob DB\")\n parents_list[i] = str(pk)\n self.parents = json.dumps(parents_list)\n self.save(update_fields=['parents'])\n\n def get_application(self):\n if not self.application: \n raise NoApplication\n elif self.application in _app_cache:\n return _app_cache[self.application]\n else:\n app = ApplicationDefinition.objects.get(name=self.application)\n _app_cache[self.application] = app\n return app\n\n @property\n def preprocess(self):\n try:\n app = self.get_application()\n return app.preprocess\n except NoApplication:\n return ''\n \n @property\n def postprocess(self):\n try:\n app = self.get_application()\n return app.postprocess\n except NoApplication:\n return ''\n\n @staticmethod\n def parse_envstring(s):\n result = {}\n entries = s.split(':')\n entries = [e.split('=') for e in entries]\n return {variable:'='.join(values) for (variable,*values) in entries}\n\n def get_envs(self, *, timeout=False, error=False):\n envs = os.environ.copy()\n \n if self.environ_vars:\n job_vars = self.parse_envstring(self.environ_vars)\n envs.update(job_vars)\n \n balsam_envs = dict(\n BALSAM_JOB_ID=str(self.pk),\n BALSAM_PARENT_IDS=str(self.parents),\n )\n\n if self.threads_per_rank > 1:\n balsam_envs['OMP_NUM_THREADS'] = str(self.threads_per_rank)\n\n if timeout: balsam_envs['BALSAM_JOB_TIMEOUT']=\"TRUE\"\n if error: balsam_envs['BALSAM_JOB_ERROR']=\"TRUE\"\n envs.update(balsam_envs)\n return envs\n\n @classmethod\n def batch_update_state(cls, pk_list, new_state, message=''):\n try:\n exists = pk_list.exists()\n except AttributeError:\n exists = bool(pk_list)\n if not exists: return\n\n if new_state not in STATES:\n raise InvalidStateError(f\"{new_state} is not a job state in balsam.models\")\n \n msg = history_line(new_state, message)\n\n with transaction.atomic():\n update_jobs = cls.objects.filter(job_id__in=pk_list).exclude(state='USER_KILLED')\n update_jobs = safe_select(update_jobs)\n update_jobs.update(state=new_state,\n state_history=Concat('state_history', V(msg))\n )\n\n def update_state(self, new_state, message=''):\n if new_state not in STATES:\n raise InvalidStateError(f\"{new_state} is not a job state in balsam.models\")\n msg = history_line(new_state, message)\n self.state = new_state\n self.state_history += msg\n self.save(update_fields=['state', 'state_history'])\n\n def get_recent_state_str(self):\n return self.state_history.split(\"\\n\")[-1].strip()\n\n def read_file_in_workdir(self, fname):\n work_dir = self.working_directory\n path = os.path.join(work_dir, fname)\n if not os.path.exists(path):\n raise ValueError(f\"{fname} not found in working directory of {self.cute_id}\")\n else:\n return open(path).read()\n\n def get_state_times(self):\n matches = STATE_TIME_PATTERN.findall(self.state_history)\n return {state: datetime.strptime(timestr, TIME_FMT)\n for timestr, state in matches\n }\n\n @property\n def runtime_seconds(self):\n times = self.get_state_times()\n t0 = times.get('RUNNING', None) \n t1 = times.get('RUN_DONE', None) \n if t0 and t1:\n return (t1-t0).total_seconds()\n else:\n return None\n\n @property\n def working_directory(self):\n if self.user_workdir and os.path.isdir(self.user_workdir):\n return self.user_workdir\n top = settings.BALSAM_WORK_DIRECTORY\n if self.workflow:\n top = os.path.join(top, self.workflow)\n name = self.name.strip().replace(' ', '_')\n name += '_' + str(self.pk)[:8]\n path = os.path.join(top, name)\n return path\n\n def to_dict(self):\n SERIAL_FIELDS = [f for f in self.__dict__ if f not in ['_state']]\n d = {field : self.__dict__[field] for field in SERIAL_FIELDS}\n return d\n\n def serialize(self, **kwargs):\n d = self.to_dict()\n d.update(kwargs)\n if type(self.job_id) == uuid.UUID:\n d['job_id'] = str(self.job_id)\n else:\n assert self.job_id == d['job_id'] == None\n\n serial_data = json.dumps(d)\n return serial_data\n\n @classmethod\n def deserialize(cls, serial_data):\n if type(serial_data) is bytes:\n serial_data = serial_data.decode('utf-8')\n if type(serial_data) is str:\n serial_data = json.loads(serial_data)\n job = BalsamJob.from_dict(serial_data)\n return job\n\nclass ApplicationDefinition(models.Model):\n ''' application definition, each DB entry is a task that can be run\n on the local resource. '''\n name = models.TextField(\n 'Application Name',\n help_text='The name of an application that can be run locally.',\n default='')\n description = models.TextField(\n 'Application Description',\n help_text='A description of the application.',\n default='')\n executable = models.TextField(\n 'Executable',\n help_text='The executable path to run this application on the local system.',\n default='')\n preprocess = models.TextField(\n 'Preprocessing Script',\n help_text='A script that is run in a job working directory prior to submitting the job to the queue.',\n default='')\n envscript = models.TextField(\n 'Environment Setup Script',\n help_text='A script that is sourced immediately prior to the job launch command.',\n default='')\n postprocess = models.TextField(\n 'Postprocessing Script',\n help_text='A script that is run in a job working directory after the job has completed.',\n default='')\n\n def __repr__(self):\n result = f'Application {self.pk}:\\n'\n result += '-----------------------\\n'\n result += '\\n'.join( (k+':').ljust(32) + str(v) \n for k,v in self.__dict__.items() \n if k not in ['_state', 'id'])\n return result\n \n def __str__(self):\n return self.__repr__()\n\n @property\n def cute_id(self):\n return f\"[{self.name} | { str(self.pk)[:8] }]\"\n\n" ]
[ [ "numpy.histogram", "numpy.fromiter", "numpy.arange", "numpy.cumsum" ] ]
MichaelStevan/xmenu-keras-retinanet
[ "e2b843dcb7f40e96926d0b02d1036add47e69421" ]
[ "setup.py" ]
[ "import setuptools\nfrom setuptools.extension import Extension\nfrom distutils.command.build_ext import build_ext as DistUtilsBuildExt\n\n\nclass BuildExtension(setuptools.Command):\n description = DistUtilsBuildExt.description\n user_options = DistUtilsBuildExt.user_options\n boolean_options = DistUtilsBuildExt.boolean_options\n help_options = DistUtilsBuildExt.help_options\n\n def __init__(self, *args, **kwargs):\n from setuptools.command.build_ext import build_ext as SetupToolsBuildExt\n\n # Bypass __setatrr__ to avoid infinite recursion.\n self.__dict__['_command'] = SetupToolsBuildExt(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self._command, name)\n\n def __setattr__(self, name, value):\n setattr(self._command, name, value)\n\n def initialize_options(self, *args, **kwargs):\n return self._command.initialize_options(*args, **kwargs)\n\n def finalize_options(self, *args, **kwargs):\n ret = self._command.finalize_options(*args, **kwargs)\n import numpy\n self.include_dirs.append(numpy.get_include())\n return ret\n\n def run(self, *args, **kwargs):\n return self._command.run(*args, **kwargs)\n\n\nextensions = [\n Extension(\n 'xmenu_keras_retinanet.utils.compute_overlap',\n ['xmenu_keras_retinanet/utils/compute_overlap.pyx']\n ),\n]\n\n\nsetuptools.setup(\n name = 'xmenu-keras-retinanet',\n version = 'v1.0.0',\n description = 'Modified anchor size Keras implementation of RetinaNet object detection.',\n url = 'https://github.com/MichaelStevan/xmenu_keras_retinanet',\n download_url = 'https://github.com/MichaelStevan/xmenu-keras-retinanet/archive/v1.0.0-alpha.tar.gz',\n author = 'Michael Stevan',\n author_email = '[email protected]',\n maintainer = 'Michael Stevan',\n maintainer_email = '[email protected]',\n cmdclass = {'build_ext': BuildExtension},\n packages = setuptools.find_packages(),\n install_requires = ['keras', 'keras-resnet==0.1.0', 'six', 'scipy', 'cython', 'Pillow', 'opencv-python', 'progressbar2'],\n entry_points = {\n 'console_scripts': [\n 'retinanet-train=xmenu_keras_retinanet.bin.train:main',\n 'retinanet-evaluate=xmenu_keras_retinanet.bin.evaluate:main',\n 'retinanet-debug=xmenu_keras_retinanet.bin.debug:main',\n 'retinanet-convert-model=xmenu_keras_retinanet.bin.convert_model:main',\n ],\n },\n ext_modules = extensions,\n setup_requires = [\"cython>=0.28\", \"numpy>=1.14.0\"]\n)\n" ]
[ [ "numpy.get_include" ] ]
franzigeiger/brain-score-1
[ "6749e55544e4e60cee7b9a7b4bc3f60f66dcbce2" ]
[ "tests/test_benchmarks/test_trials.py" ]
[ "import numpy as np\n\nfrom brainio_base.assemblies import NeuroidAssembly\nfrom brainio_base.stimuli import StimulusSet\nfrom brainscore.benchmarks.trials import repeat_trials, average_trials\n\n\ndef _dummy_stimulus_set():\n stimulus_set = StimulusSet([\n {'image_id': 'a'},\n {'image_id': 'b'},\n {'image_id': 'c'},\n ])\n stimulus_set.image_paths = {\n 'a': 'a.png',\n 'b': 'b.png',\n 'c': 'c.png',\n }\n stimulus_set.identifier = 'dummy'\n return stimulus_set\n\n\ndef test_integer_repeat():\n stimulus_set = _dummy_stimulus_set()\n repeat_stimulus_set = repeat_trials(stimulus_set, number_of_trials=5)\n assert len(repeat_stimulus_set) == len(stimulus_set) * 5\n original_image_paths = [stimulus_set.get_image(image_id) for image_id in stimulus_set['image_id']]\n repeat_image_paths = [repeat_stimulus_set.get_image(image_id) for image_id in repeat_stimulus_set['image_id']]\n assert set(repeat_image_paths) == set(original_image_paths)\n assert all(len(group) == 5 and set(group['repetition']) == {0, 1, 2, 3, 4}\n for name, group in repeat_stimulus_set.groupby('image_id'))\n assert repeat_stimulus_set.identifier == 'dummy-5trials'\n\n\ndef test_average_neural_trials():\n assembly = NeuroidAssembly([[1, 2, 3],\n [2, 3, 4],\n [3, 4, 5],\n [4, 5, 6],\n [5, 6, 7],\n [6, 7, 8],\n [7, 8, 9],\n [8, 9, 10]],\n coords={'image_id': ('presentation', ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']),\n 'repetition': ('presentation', [0, 1, 0, 1, 0, 1, 0, 1]),\n 'presentation_dummy': ('presentation', ['x'] * 8),\n 'neuroid_id': ('neuroid', [0, 1, 2]),\n 'region': ('neuroid', ['IT', 'IT', 'IT'])},\n dims=['presentation', 'neuroid'])\n averaged_assembly = average_trials(assembly)\n assert len(averaged_assembly['neuroid']) == 3, \"messed up neuroids\"\n assert len(averaged_assembly['presentation']) == 4\n assert set(averaged_assembly['image_id'].values) == {'a', 'b', 'c', 'd'}\n np.testing.assert_array_equal(averaged_assembly['neuroid_id'].values, assembly['neuroid_id'].values)\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='a').values, [[1.5, 2.5, 3.5]])\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='b').values, [[3.5, 4.5, 5.5]])\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='c').values, [[5.5, 6.5, 7.5]])\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='d').values, [[7.5, 8.5, 9.5]])\n\n\ndef test_average_label_trials():\n assembly = NeuroidAssembly([['a'],\n ['a'],\n ['a'],\n ['b'],\n ['b'],\n ['a'],\n ],\n coords={'image_id': ('presentation', ['a', 'a', 'a', 'b', 'b', 'b']),\n 'repetition': ('presentation', [0, 1, 2, 0, 1, 2]),\n 'presentation_dummy': ('presentation', ['x'] * 6),\n 'choice': ('choice', ['dummy'])},\n dims=['presentation', 'choice'])\n averaged_assembly = average_trials(assembly)\n assert len(averaged_assembly['choice']) == 1, \"messed up dimension\"\n assert len(averaged_assembly['presentation']) == 2\n assert set(averaged_assembly['image_id'].values) == {'a', 'b'}\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='a').values, [['a']])\n np.testing.assert_array_equal(averaged_assembly.sel(image_id='b').values, [['b']])\n" ]
[ [ "numpy.testing.assert_array_equal" ] ]
wangjksjtu/autoassist-exp
[ "7c4599fb250c2041ab007965b083750875dd6ac9" ]
[ "machine_translation/fairseq/modules/sinusoidal_positional_embedding_new.py" ]
[ "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.onnx.operators\n\nfrom fairseq import utils\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size,\n embedding_dim,\n padding_idx,\n )\n self.onnx_trace = False\n self.register_buffer('_float_tensor', torch.FloatTensor(1))\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(num_embeddings, embedding_dim, padding_idx=None):\n \"\"\"Build sinusoidal embeddings.\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(self, input, incremental_state=None, timestep=None):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = torch.onnx.operators.shape_as_tensor(input)\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos,\n self.embedding_dim,\n self.padding_idx,\n )\n self.weights = self.weights.type_as(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = (timestep.int() + 1).long() if timestep is not None else seq_len\n if self.onnx_trace:\n return self.weights[self.padding_idx + pos, :].unsqueeze(1).repeat(bsz, 1, 1)\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1])))\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape)\n return embeddings\n return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()\n\n def max_positions(self):\n \"\"\"Maximum number of supported positions.\"\"\"\n return int(1e5) # an arbitrary large number\n" ]
[ [ "torch.zeros", "torch.cos", "torch.onnx.operators.reshape_from_tensor_shape", "torch.arange", "torch.sin", "torch.FloatTensor", "torch.onnx.operators.shape_as_tensor", "torch.LongTensor" ] ]
TheBonheurs/neural-processes
[ "5834bc65f406456e53c363ade1cb0f2a5f23a033" ]
[ "training.py" ]
[ "import torch\nfrom random import randint\nfrom neural_process import NeuralProcessImg\nfrom torch.distributions.kl import kl_divergence\nfrom utils import (context_target_split, batch_context_target_mask,\n img_mask_to_np_input)\n\n\nclass NeuralProcessTrainer:\n \"\"\"\n Class to handle training of Neural Processes for functions and images.\n\n Parameters\n ----------\n device : torch.device\n\n neural_process : neural_process.NeuralProcess or NeuralProcessImg instance\n\n optimizer : one of torch.optim optimizers\n\n num_context_range : tuple of ints\n Number of context points will be sampled uniformly in the range given\n by num_context_range.\n\n num_extra_target_range : tuple of ints\n Number of extra target points (as we always include context points in\n target points, i.e. context points are a subset of target points) will\n be sampled uniformly in the range given by num_extra_target_range.\n\n print_freq : int\n Frequency with which to print loss information during training.\n \"\"\"\n def __init__(self, device, neural_process, optimizer, num_context_range,\n num_extra_target_range, print_freq=100):\n self.device = device\n self.neural_process = neural_process\n self.optimizer = optimizer\n self.num_context_range = num_context_range\n self.num_extra_target_range = num_extra_target_range\n self.print_freq = print_freq\n\n # Check if neural process is for images\n self.is_img = isinstance(self.neural_process, NeuralProcessImg)\n self.steps = 0\n self.epoch_loss_history = []\n self.batches = 16\n\n def train(self, data_loader, epochs):\n \"\"\"\n Trains Neural Process.\n\n Parameters\n ----------\n data_loader : torch.utils.DataLoader instance\n\n epochs : int\n Number of epochs to train for.\n \"\"\"\n for epoch in range(epochs):\n epoch_loss = 0.\n self.neural_process.hidden = self.neural_process.gru.init_hidden(self.batches)\n for i, data in enumerate(data_loader):\n self.optimizer.zero_grad()\n\n # Sample number of context and target points\n num_context = randint(*self.num_context_range)\n num_extra_target = randint(*self.num_extra_target_range)\n\n # Create context and target points and apply neural process\n if self.is_img:\n img, _ = data # data is a tuple (img, label)\n batch_size = img.size(0)\n context_mask, target_mask = \\\n batch_context_target_mask(self.neural_process.img_size,\n num_context, num_extra_target,\n batch_size)\n\n img = img.to(self.device)\n context_mask = context_mask.to(self.device)\n target_mask = target_mask.to(self.device)\n\n p_y_pred, q_target, q_context = \\\n self.neural_process(img, context_mask, target_mask)\n\n # Calculate y_target as this will be required for loss\n _, y_target = img_mask_to_np_input(img, target_mask)\n else:\n x, y = data\n x_context, y_context, x_target, y_target = \\\n context_target_split(x, y, num_context, num_extra_target)\n p_y_pred, q_target, q_context = \\\n self.neural_process(x_context, y_context, x_target, y_target)\n\n loss = self._loss(p_y_pred, y_target, q_target, q_context)\n loss.backward()\n self.optimizer.step()\n\n epoch_loss += loss.item()\n\n self.steps += 1\n\n if self.steps % self.print_freq == 0:\n print(\"iteration {}, loss {:.3f}\".format(self.steps, loss.item()))\n\n print(\"Epoch: {}, Avg_loss: {}\".format(epoch, epoch_loss / len(data_loader)))\n self.epoch_loss_history.append(epoch_loss / len(data_loader))\n\n def _loss(self, p_y_pred, y_target, q_target, q_context):\n \"\"\"\n Computes Neural Process loss.\n\n Parameters\n ----------\n p_y_pred : one of torch.distributions.Distribution\n Distribution over y output by Neural Process.\n\n y_target : torch.Tensor\n Shape (batch_size, num_target, y_dim)\n\n q_target : one of torch.distributions.Distribution\n Latent distribution for target points.\n\n q_context : one of torch.distributions.Distribution\n Latent distribution for context points.\n \"\"\"\n # Log likelihood has shape (batch_size, num_target, y_dim). Take mean\n # over batch and sum over number of targets and dimensions of y\n log_likelihood = p_y_pred.log_prob(y_target).mean(dim=0).sum()\n # KL has shape (batch_size, r_dim). Take mean over batch and sum over\n # r_dim (since r_dim is dimension of normal distribution)\n kl = kl_divergence(q_target, q_context).mean(dim=0).sum()\n return -log_likelihood + kl\n" ]
[ [ "torch.distributions.kl.kl_divergence" ] ]
reinforcementdriving/nuscenes-devkit
[ "00b8f9082d84b32fb7dcf2356d39b6c7af83e1a1" ]
[ "python-sdk/nuscenes/eval/lidarseg/evaluate.py" ]
[ "import argparse\nimport json\nimport os\nfrom typing import Dict\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom nuscenes import NuScenes\nfrom nuscenes.eval.lidarseg.utils import LidarsegClassMapper, ConfusionMatrix, get_samples_in_eval_set\n\n\nclass LidarSegEval:\n \"\"\"\n This is the official nuScenes-lidarseg evaluation code.\n Results are written to the provided output_dir.\n\n nuScenes-lidarseg uses the following metrics:\n - Mean Intersection-over-Union (mIOU): We use the well-known IOU metric, which is defined as TP / (TP + FP + FN).\n The IOU score is calculated separately for each class, and then the mean is\n computed across classes. Note that in the challenge, index 0 is ignored in\n the calculation.\n - Frequency-weighted IOU (FWIOU): Instead of taking the mean of the IOUs across all the classes, each IOU is\n weighted by the point-level frequency of its class. Note that in the challenge,\n index 0 is ignored in the calculation. FWIOU is not used for the challenge.\n\n We assume that:\n - For each pointcloud, the prediction for every point is present in a .bin file, in the same order as that of the\n points stored in the corresponding .bin file.\n - The naming convention of the .bin files containing the predictions for a single point cloud is:\n <lidar_sample_data_token>_lidarseg.bin\n - The predictions are between 1 and 16 (inclusive); 0 is the index of the ignored class.\n\n Please see https://www.nuscenes.org/lidar-segmentation for more details.\n \"\"\"\n def __init__(self,\n nusc: NuScenes,\n results_folder: str,\n eval_set: str,\n verbose: bool = False):\n \"\"\"\n Initialize a LidarSegEval object.\n :param nusc: A NuScenes object.\n :param results_folder: Path to the folder.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param verbose: Whether to print messages during the evaluation.\n \"\"\"\n # Check there are ground truth annotations.\n assert len(nusc.lidarseg) > 0, 'Error: No ground truth annotations found in {}.'.format(nusc.version)\n\n # Check results folder exists.\n self.results_folder = results_folder\n self.results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)\n assert os.path.exists(self.results_bin_folder), \\\n 'Error: The folder containing the .bin files ({}) does not exist.'.format(self.results_bin_folder)\n\n self.nusc = nusc\n self.results_folder = results_folder\n self.eval_set = eval_set\n self.verbose = verbose\n\n self.mapper = LidarsegClassMapper(self.nusc)\n self.ignore_idx = self.mapper.ignore_class['index']\n self.id2name = {idx: name for name, idx in self.mapper.coarse_name_2_coarse_idx_mapping.items()}\n self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)\n\n if self.verbose:\n print('There are {} classes.'.format(self.num_classes))\n\n self.global_cm = ConfusionMatrix(self.num_classes, self.ignore_idx)\n\n self.sample_tokens = get_samples_in_eval_set(self.nusc, self.eval_set)\n if self.verbose:\n print('There are {} samples.'.format(len(self.sample_tokens)))\n\n def evaluate(self) -> Dict:\n \"\"\"\n Performs the actual evaluation.\n :return: A dictionary containing the evaluated metrics.\n \"\"\"\n for sample_token in tqdm(self.sample_tokens, disable=not self.verbose):\n sample = self.nusc.get('sample', sample_token)\n\n # Get the sample data token of the point cloud.\n sd_token = sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud.\n lidarseg_label_filename = os.path.join(self.nusc.dataroot,\n self.nusc.get('lidarseg', sd_token)['filename'])\n lidarseg_label = self.load_bin_file(lidarseg_label_filename)\n\n lidarseg_label = self.mapper.convert_label(lidarseg_label)\n\n # Load the predictions for the point cloud.\n lidarseg_pred_filename = os.path.join(self.results_folder, 'lidarseg',\n self.eval_set, sd_token + '_lidarseg.bin')\n lidarseg_pred = self.load_bin_file(lidarseg_pred_filename)\n\n # Get the confusion matrix between the ground truth and predictions.\n # Update the confusion matrix for the sample data into the confusion matrix for the eval set.\n self.global_cm.update(lidarseg_label, lidarseg_pred)\n\n iou_per_class = self.global_cm.get_per_class_iou()\n miou = self.global_cm.get_mean_iou()\n freqweighted_iou = self.global_cm.get_freqweighted_iou()\n\n # Put everything nicely into a dict.\n results = {'iou_per_class': {self.id2name[i]: class_iou for i, class_iou in enumerate(iou_per_class)},\n 'miou': miou,\n 'freq_weighted_iou': freqweighted_iou}\n\n # Print the results if desired.\n if self.verbose:\n print(\"======\\nnuScenes-lidarseg evaluation for {}\".format(self.eval_set))\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")\n\n return results\n\n @staticmethod\n def load_bin_file(bin_path: str) -> np.ndarray:\n \"\"\"\n Loads a .bin file containing the labels.\n :param bin_path: Path to the .bin file.\n :return: An array containing the labels.\n \"\"\"\n assert os.path.exists(bin_path), 'Error: Unable to find {}.'.format(bin_path)\n bin_content = np.fromfile(bin_path, dtype=np.uint8)\n assert len(bin_content) > 0, 'Error: {} is empty.'.format(bin_path)\n\n return bin_content\n\n\nif __name__ == '__main__':\n # Settings.\n parser = argparse.ArgumentParser(description='Evaluate nuScenes-lidarseg results.')\n parser.add_argument('--result_path', type=str,\n help='The path to the results folder.')\n parser.add_argument('--eval_set', type=str, default='val',\n help='Which dataset split to evaluate on, train, val or test.')\n parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',\n help='Default nuScenes data directory.')\n parser.add_argument('--version', type=str, default='v1.0-trainval',\n help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')\n parser.add_argument('--verbose', type=bool, default=False,\n help='Whether to print to stdout.')\n args = parser.parse_args()\n\n result_path_ = args.result_path\n eval_set_ = args.eval_set\n dataroot_ = args.dataroot\n version_ = args.version\n verbose_ = args.verbose\n\n nusc_ = NuScenes(version=version_, dataroot=dataroot_, verbose=verbose_)\n\n evaluator = LidarSegEval(nusc_, result_path_, eval_set=eval_set_, verbose=verbose_)\n evaluator.evaluate()\n" ]
[ [ "numpy.fromfile" ] ]
Linlin15963/msds501
[ "0bcfa7f59a4e9b2d71db2c5973eb04c1ae60e72f" ]
[ "save_np.py" ]
[ "import sys\nimport os\nimport numpy as np\n\nscript_path = sys.argv[1]\ndirname = os.path.dirname(script_path)\ndirname = \".\" if dirname.strip() == \"\" else dirname\n\nret_vocab_path = \"{dir}/{f}\".format(dir = dirname, f = \"glove.42B.300d.vocab.txt\")\nret_vectr_path = \"{dir}/{f}\".format(dir = dirname, f = \"glove.42B.300d.npy\")\n\nvocab = []\nmylistofvectors = []\nwith open(script_path, 'r', encoding='latin1') as f:\n for line in f.readlines():\n arr = line.strip().split(' ')\n if len(arr) != 301:\n print(arr)\n word, v = arr[0], arr[-300:]\n vocab.append(word)\n mylistofvectors.append(np.array(v, dtype=np.float32))\n\nwith open(ret_vectr_path, \"wb\") as f:\n np.save(f, np.array(mylistofvectors))\n\nwith open(ret_vocab_path, 'w') as f:\n for word in vocab:\n f.write(\"{v}\\n\".format(v=word))" ]
[ [ "numpy.array" ] ]
B0Qi/hualubei2020-callingsmoking
[ "73d1049d95554b5d669afa93132a0fce37461ff4" ]
[ "train.py" ]
[ "# from __future__ import print_function\n\nimport os,argparse\nimport random\nimport gc\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.utils.data.dataset import Dataset\nimport torchvision.models as models\n\nfrom sklearn.model_selection import KFold\n\n\nfrom libs.mAP import getValmAP\nfrom libs.tools import *\nfrom libs.model import NetClassify, NetMultilabel\nfrom libs.data import getDataLoader\nfrom libs.mixup import mixup_data, mixup_criterion\n\nfrom torch.autograd import Variable\nfrom config import cfg\n\nimport platform\n\nfrom libs.scheduler import GradualWarmupScheduler\n\nimport glob\n\n#from adabelief_pytorch import AdaBelief\n\nfrom libs.ranger import Ranger \nfrom libs.focal_loss import FocalLoss \n\n\ndef trainClassify(\n model, \n device, \n train_loader, \n optimizer, \n epoch, \n total_epoch,\n criterion,\n use_distill,\n label_smooth\n ):\n model.train()\n correct = 0\n count = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n\n output = model(data).double()\n\n #all_linear2_params = torch.cat([x.view(-1) for x in model.model_feature._fc.parameters()])\n #l2_regularization = 0.0003 * torch.norm(all_linear2_params, 2)\n\n loss = criterion(output, target)# + l2_regularization.item()\n loss.backward() #计算梯度\n\n clip_gradient(optimizer)\n\n optimizer.step() #更新参数\n optimizer.zero_grad()#把梯度置零\n\n ### train acc\n pred_score = nn.Softmax(dim=1)(output)\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n if use_distill or label_smooth>0:\n target = target.max(1, keepdim=True)[1] \n correct += pred.eq(target.view_as(pred)).sum().item()\n count += len(data)\n\n train_acc = correct / count\n #print(train_acc)\n if batch_idx % 10 == 0:\n print('\\r',\n '{}/{} [{}/{} ({:.0f}%)] loss:{:.3f} acc: {:.3f} '.format(\n epoch+1, total_epoch,batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(),train_acc), \n end=\"\",flush=True)\n\n\n#val_loss: 1.0412, val_acc: 66.67%, val_mAP: 0.6167\ndef valClassify( model, device, val_loader, criterion, use_distill, label_smooth):\n model.eval()\n val_loss = 0\n correct = 0\n\n with torch.no_grad():\n pres = []\n labels = []\n for data, target in val_loader:\n data, target = data.to(device), target.to(device)\n #print(target.shape)\n if use_distill:\n output = model(data).double()\n else:\n output = model(data)\n\n\n val_loss += criterion(output, target).item() # sum up batch loss\n\n #print(output.shape)\n pred_score = nn.Softmax(dim=1)(output)\n #print(pred_score.shape)\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n if use_distill or label_smooth>0:\n target = target.max(1, keepdim=True)[1] \n correct += pred.eq(target.view_as(pred)).sum().item()\n\n\n batch_pred_score = pred_score.data.cpu().numpy().tolist()\n batch_label_score = target.data.cpu().numpy().tolist()\n pres.extend(batch_pred_score)\n labels.extend(batch_label_score)\n\n pres = np.array(pres)\n labels = np.array(labels)\n #print(pres.shape, labels.shape)\n\n\n mAP = getValmAP(pres, labels)\n\n val_loss /= len(val_loader.dataset)\n val_acc = correct / len(val_loader.dataset)\n print(' ------------------------------ val_loss: {:.4f}, val_acc: {:.2f}%, val_mAP: {:.4f}'.format(\n val_loss, 100. * val_acc, mAP))\n\n return val_loss, mAP\n\n\n\n\n\ndef main(cfg):\n \n\n print(cfg)\n print(\"=================================\")\n\n\n model_name = cfg['model_name']\n img_size = cfg['img_size']\n class_number = cfg['class_number']\n save_dir = cfg['save_dir']\n random_seed = cfg['random_seed']\n train_path = cfg['train_path']\n GPU_ID = cfg['GPU_ID']\n\n fold_num = cfg['k_flod']\n batch_size = cfg['batch_size']\n epochs = cfg['epochs']\n learning_rate = cfg['learning_rate']\n early_stop_patient = cfg['early_stop_patient']\n save_start_epoch = cfg['save_start_epoch']\n use_warmup = cfg['use_warmup']\n schedu = cfg['schedu']\n optims = cfg['optims']\n weight_decay = cfg['weight_decay']\n use_distill = cfg['use_distill']\n label_smooth = cfg['label_smooth']\n model_path = cfg['model_path']\n start_fold = cfg['start_fold']\n test_path = cfg['test_path']\n \n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU_ID\n seed_reproducer(random_seed)\n\n ############################################################\n \n\n # log_interval = 10\n #use_cuda = True\n device = torch.device(\"cuda\")#cuda\n\n if platform.system() == \"Windows\":\n kwargs = {'num_workers': 0, 'pin_memory': True}\n else:\n kwargs = {'num_workers': 4, 'pin_memory': True}\n\n train_names = getAllName(train_path)\n val_names = getAllName(test_path)\n \n print(\"total imgs: \", len(train_names))\n\n if not use_distill:\n train_names = [x for x in train_names if \"aug\" not in x]\n print(\"remove aug: \", len(train_names))\n\n #print(train_names[:3])\n train_names.sort(key = lambda x:os.path.basename(x))\n #print(train_names[:3])\n\n train_names = np.array(train_names)\n val_names = np.array(val_names)\n \n random.shuffle(train_names)\n random.shuffle(val_names)\n \n\n \n # folds = KFold(n_splits=fold_num, shuffle=False)#, random_state=random_seed\n # for fold_i, (train_index, val_index) in enumerate(folds.split(train_names)):\n # print(\"Fold: \", fold_i+1,'/',fold_num)\n # if fold_i<start_fold:\n # continue\n\n\n # train_data = train_names[train_index]\n # val_data = train_names[val_index]\n # print(val_data[-3:])\n # b\n train_data = train_names\n val_data = val_names\n input_data = [train_data, val_data]\n \n\n\n if not use_distill and label_smooth==0:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n #criterion = FocalLoss().cuda()\n train_loader, val_loader = getDataLoader(\"trainClassify\", input_data,model_name, img_size, batch_size, kwargs)\n else:\n\n criterion = CrossEntropyLossOneHot().cuda()\n #kwargs['use_distill'] = use_distill\n #print(kwargs)\n train_loader, val_loader = getDataLoader(\"trainClassifyOnehot\", input_data,model_name, img_size, batch_size, kwargs)\n\n\n model = NetClassify(model_name, class_number).to(device)\n if model_path is not None:\n model.load_state_dict(torch.load(model_path))\n print(\"---------------------- load model!!!\")\n # print(model)\n # b\n \n\n if optims=='adam':\n optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optims=='SGD':\n optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)\n elif optims=='AdaBelief':\n optimizer = AdaBelief(model.parameters(), lr=learning_rate, eps=1e-12, betas=(0.9,0.999))\n elif optims=='Ranger':\n optimizer = Ranger(model.parameters(), lr=learning_rate)\n \n\n\n if schedu=='default':\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5)\n elif schedu=='step1':\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.8, last_epoch=-1)\n elif schedu=='step2':\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.5, last_epoch=-1)\n elif schedu=='step3':\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.5, last_epoch=-1)\n elif schedu=='SGDR1': \n scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,\n T_0=10, \n T_mult=2)\n elif schedu=='SGDR2': \n scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,\n T_0=5, \n T_mult=2)\n\n # elif schedu=='CVPR': \n # scheduler = WarmRe mizer, T_max=10, T_mult=1, eta_min=1e-5)\n \n if use_warmup:\n scheduler_warmup = GradualWarmupScheduler(optimizer, \n multiplier=1, total_epoch=1, after_scheduler=scheduler)\n\n\n early_stop_value = 0\n early_stop_dist = 0\n\n for epoch in range(epochs):\n \n if schedu=='step3':\n if epoch==10:\n img_size=416\n batch_size = 4\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5, last_epoch=-1)\n if not use_distill:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n train_loader, val_loader = getDataLoader(\"trainClassify\", input_data,model_name, img_size, batch_size, kwargs)\n else:\n criterion = CrossEntropyLossOneHot().cuda()\n train_loader, val_loader = getDataLoader(\"trainClassifyOnehot\", input_data,model_name, img_size, batch_size, kwargs)\n\n\n elif epoch==15:\n img_size=600\n batch_size = 3\n scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,\n T_0=10, \n T_mult=2)\n if not use_distill:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n train_loader, val_loader = getDataLoader(\"trainClassify\", input_data,model_name, img_size, batch_size, kwargs)\n else:\n criterion = CrossEntropyLossOneHot().cuda()\n train_loader, val_loader = getDataLoader(\"trainClassifyOnehot\", input_data,model_name, img_size, batch_size, kwargs)\n\n\n \n\n trainClassify(model, device, train_loader, optimizer, epoch, epochs, criterion, use_distill, label_smooth)\n print(\" LR:\", optimizer.param_groups[0][\"lr\"], end=\"\")\n\n t = time.time()\n val_loss, mAP = valClassify(model, device, val_loader, criterion, use_distill, label_smooth)\n print(\"val time: \", time.time() - t)\n\n if use_warmup:\n scheduler_warmup.step(epoch)\n else:\n if schedu=='default':\n scheduler.step(mAP)\n else:\n scheduler.step()\n\n #print(\"---\")\n #print(mAP, early_stop_value, early_stop_dist)\n if mAP>early_stop_value:\n early_stop_value = mAP\n early_stop_dist = 0\n if epoch>=save_start_epoch:\n hitory_path = glob.glob('./save/%s-%d_*k-%d_%s.pth' % (model_name,img_size,0,GPU_ID))\n if len(hitory_path)!=0:\n if os.path.exists(hitory_path[0]):\n os.remove(hitory_path[0])\n torch.save(model.state_dict(), './save/%s-%d_%d_%.4f_k-%d_%s.pth' % (model_name,img_size,epoch,mAP,0,GPU_ID))\n \n early_stop_dist+=1\n if early_stop_dist>early_stop_patient:\n print(\"------\")\n print(cfg)\n print(\"------\")\n print(\"===== Early Stop with patient %d , best is Epoch - %d :%f\" % (early_stop_patient,epoch-early_stop_patient,early_stop_value))\n break\n if epoch+1==epochs:\n print(\"===== Finish trainging , best is Epoch - %d :%f\" % (epoch-early_stop_dist,early_stop_value))\n break\n\n \n del model\n gc.collect()\n torch.cuda.empty_cache()\n\n\n #if not use_distill:\n # break\n #break\n\n\nif __name__ == '__main__':\n main(cfg)" ]
[ [ "torch.device", "numpy.array", "torch.optim.lr_scheduler.StepLR", "torch.nn.Softmax", "torch.no_grad", "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "torch.cuda.empty_cache", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
zhengjxu/mlflow
[ "004ef1f091881ac6379182055763a82607a4a669" ]
[ "mlflow/pytorch/__init__.py" ]
[ "\"\"\"\nThe ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module\nexports PyTorch models with the following flavors:\n\nPyTorch (native) format\n This is the main flavor that can be loaded back into PyTorch.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\nimport importlib\nimport logging\nimport os\nimport yaml\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom packaging.version import Version\nimport posixpath\n\nimport mlflow\nimport shutil\nimport mlflow.pyfunc.utils as pyfunc_utils\nfrom mlflow import pyfunc\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model, ModelSignature\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.models.utils import ModelInputExample, _save_example\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST\nfrom mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.environment import (\n _mlflow_conda_env,\n _validate_env_arguments,\n _process_pip_requirements,\n _process_conda_env,\n _CONDA_ENV_FILE_NAME,\n _REQUIREMENTS_FILE_NAME,\n _CONSTRAINTS_FILE_NAME,\n)\nfrom mlflow.utils.requirements_utils import _get_pinned_requirement\nfrom mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS\nfrom mlflow.utils.file_utils import _copy_file_or_tree, TempDir, write_to\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.utils.autologging_utils import autologging_integration, safe_patch\n\nFLAVOR_NAME = \"pytorch\"\n\n_SERIALIZED_TORCH_MODEL_FILE_NAME = \"model.pth\"\n_TORCH_STATE_DICT_FILE_NAME = \"state_dict.pth\"\n_PICKLE_MODULE_INFO_FILE_NAME = \"pickle_module_info.txt\"\n_EXTRA_FILES_KEY = \"extra_files\"\n_REQUIREMENTS_FILE_KEY = \"requirements_file\"\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_default_pip_requirements():\n \"\"\"\n :return: A list of default pip requirements for MLflow Models produced by this flavor.\n Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment\n that, at minimum, contains these requirements.\n \"\"\"\n return list(\n map(\n _get_pinned_requirement,\n [\n \"torch\",\n \"torchvision\",\n # We include CloudPickle in the default environment because\n # it's required by the default pickle module used by `save_model()`\n # and `log_model()`: `mlflow.pytorch.pickle_module`.\n \"cloudpickle\",\n ],\n )\n )\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment as a dictionary for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow.pytorch\n\n # Log PyTorch model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Fetch the associated conda environment\n env = mlflow.pytorch.get_default_conda_env()\n print(\"conda env: {}\".format(env))\n\n .. code-block:: text\n :caption: Output\n\n conda env {'name': 'mlflow-env',\n 'channels': ['conda-forge'],\n 'dependencies': ['python=3.7.5',\n {'pip': ['torch==1.5.1',\n 'torchvision==0.6.1',\n 'mlflow',\n 'cloudpickle==1.6.0']}]}\n \"\"\"\n return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())\n\n\n@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=\"torch\"))\ndef log_model(\n pytorch_model,\n artifact_path,\n conda_env=None,\n code_paths=None,\n pickle_module=None,\n registered_model_name=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n requirements_file=None,\n extra_files=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n **kwargs\n):\n \"\"\"\n Log a PyTorch model as an MLflow artifact for the current run.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Path to a Conda environment file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n :param registered_model_name: (Experimental) If given, create a model version under\n ``registered_model_name``, also creating a registered model if one\n with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example can be a Pandas DataFrame where the given\n example will be serialized to json using the Pandas split-oriented\n format, or a numpy array where the example will be serialized to json\n by converting it to a list. Bytes are base64-encoded.\n\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n\n :param requirements_file:\n\n .. warning::\n\n ``requirements_file`` has been deprecated. Please use ``pip_requirements`` instead.\n\n A string containing the path to requirements file. Remote URIs are resolved to absolute\n filesystem paths. For example, consider the following ``requirements_file`` string:\n\n .. code-block:: python\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3. If ``None``,\n no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n :param pip_requirements: {{ pip_requirements }}\n :param extra_pip_requirements: {{ extra_pip_requirements }}\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import numpy as np\n import torch\n import mlflow.pytorch\n\n class LinearNNModel(torch.nn.Module):\n def __init__(self):\n super(LinearNNModel, self).__init__()\n self.linear = torch.nn.Linear(1, 1) # One in and one out\n\n def forward(self, x):\n y_pred = self.linear(x)\n return y_pred\n\n def gen_data():\n # Example linear model modified to use y = 2x\n # from https://github.com/hunkim/PyTorchZeroToAll\n # X training data, y labels\n X = torch.arange(1.0, 25.0).view(-1, 1)\n y = torch.from_numpy(np.array([x * 2 for x in X])).view(-1, 1)\n return X, y\n\n # Define model, loss, and optimizer\n model = LinearNNModel()\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n # Training loop\n epochs = 250\n X, y = gen_data()\n for epoch in range(epochs):\n # Forward pass: Compute predicted y by passing X to the model\n y_pred = model(X)\n\n # Compute the loss\n loss = criterion(y_pred, y)\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # convert to scripted model and log the model\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.log_model(scripted_pytorch_model, \"scripted_model\")\n\n # Fetch the logged model artifacts\n print(\"run_id: {}\".format(run.info.run_id))\n for artifact_path in [\"model/data\", \"scripted_model/data\"]:\n artifacts = [f.path for f in MlflowClient().list_artifacts(run.info.run_id,\n artifact_path)]\n print(\"artifacts: {}\".format(artifacts))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 1a1ec9e413ce48e9abf9aec20efd6f71\n artifacts: ['model/data/model.pth',\n 'model/data/pickle_module_info.txt']\n artifacts: ['scripted_model/data/model.pth',\n 'scripted_model/data/pickle_module_info.txt']\n\n .. figure:: ../_static/images/pytorch_logged_models.png\n\n PyTorch logged models\n \"\"\"\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.pytorch,\n pytorch_model=pytorch_model,\n conda_env=conda_env,\n code_paths=code_paths,\n pickle_module=pickle_module,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n requirements_file=requirements_file,\n extra_files=extra_files,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n **kwargs,\n )\n\n\n@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=\"torch\"))\ndef save_model(\n pytorch_model,\n path,\n conda_env=None,\n mlflow_model=None,\n code_paths=None,\n pickle_module=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n requirements_file=None,\n extra_files=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n **kwargs\n):\n \"\"\"\n Save a PyTorch model to a path on the local file system.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example can be a Pandas DataFrame where the given\n example will be serialized to json using the Pandas split-oriented\n format, or a numpy array where the example will be serialized to json\n by converting it to a list. Bytes are base64-encoded.\n\n :param requirements_file:\n\n .. warning::\n\n ``requirements_file`` has been deprecated. Please use ``pip_requirements`` instead.\n\n A string containing the path to requirements file. Remote URIs are resolved to absolute\n filesystem paths. For example, consider the following ``requirements_file`` string:\n\n .. code-block:: python\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3. If ``None``,\n no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n :param pip_requirements: {{ pip_requirements }}\n :param extra_pip_requirements: {{ extra_pip_requirements }}\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Save PyTorch models to current working directory\n with mlflow.start_run() as run:\n mlflow.pytorch.save_model(model, \"model\")\n\n # Convert to a scripted model and save it\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.save_model(scripted_pytorch_model, \"scripted_model\")\n\n # Load each saved model for inference\n for model_path in [\"model\", \"scripted_model\"]:\n model_uri = \"{}/{}\".format(os.getcwd(), model_path)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n print(\"Loaded {}:\".format(model_path))\n for x in [6.0, 8.0, 12.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n print(\"--\")\n\n .. code-block:: text\n :caption: Output\n\n Loaded model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n --\n Loaded scripted_model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n \"\"\"\n import torch\n\n _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)\n\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n\n if not isinstance(pytorch_model, torch.nn.Module):\n raise TypeError(\"Argument 'pytorch_model' should be a torch.nn.Module\")\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise RuntimeError(\"Path '{}' already exists\".format(path))\n\n if mlflow_model is None:\n mlflow_model = Model()\n\n os.makedirs(path)\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n model_data_subpath = \"data\"\n model_data_path = os.path.join(path, model_data_subpath)\n os.makedirs(model_data_path)\n # Persist the pickle module name as a file in the model's `data` directory. This is necessary\n # because the `data` directory is the only available parameter to `_load_pyfunc`, and it\n # does not contain the MLmodel configuration; therefore, it is not sufficient to place\n # the module name in the MLmodel\n #\n # TODO: Stop persisting this information to the filesystem once we have a mechanism for\n # supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`\n pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"w\") as f:\n f.write(pickle_module.__name__)\n # Save pytorch model\n model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)\n if isinstance(pytorch_model, torch.jit.ScriptModule):\n torch.jit.ScriptModule.save(pytorch_model, model_path)\n else:\n torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)\n\n torchserve_artifacts_config = {}\n\n if extra_files:\n torchserve_artifacts_config[_EXTRA_FILES_KEY] = []\n if not isinstance(extra_files, list):\n raise TypeError(\"Extra files argument should be a list\")\n\n with TempDir() as tmp_extra_files_dir:\n for extra_file in extra_files:\n _download_artifact_from_uri(\n artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()\n )\n rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file),)\n torchserve_artifacts_config[_EXTRA_FILES_KEY].append({\"path\": rel_path})\n shutil.move(\n tmp_extra_files_dir.path(), posixpath.join(path, _EXTRA_FILES_KEY),\n )\n\n conda_env, pip_requirements, pip_constraints = (\n _process_pip_requirements(\n get_default_pip_requirements(), pip_requirements, extra_pip_requirements,\n )\n if conda_env is None\n else _process_conda_env(conda_env)\n )\n\n with open(os.path.join(path, _CONDA_ENV_FILE_NAME), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n # Save `constraints.txt` if necessary\n if pip_constraints:\n write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), \"\\n\".join(pip_constraints))\n\n if requirements_file:\n\n warnings.warn(\n \"`requirements_file` has been deprecated. Please use `pip_requirements` instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n if not isinstance(requirements_file, str):\n raise TypeError(\"Path to requirements file should be a string\")\n\n with TempDir() as tmp_requirements_dir:\n _download_artifact_from_uri(\n artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()\n )\n rel_path = os.path.basename(requirements_file)\n torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {\"path\": rel_path}\n shutil.move(tmp_requirements_dir.path(rel_path), path)\n else:\n # Save `requirements.txt`\n write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), \"\\n\".join(pip_requirements))\n\n if code_paths is not None:\n code_dir_subpath = \"code\"\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n\n mlflow_model.add_flavor(\n FLAVOR_NAME,\n model_data=model_data_subpath,\n pytorch_version=str(torch.__version__),\n **torchserve_artifacts_config,\n )\n pyfunc.add_to_model(\n mlflow_model,\n loader_module=\"mlflow.pytorch\",\n data=model_data_subpath,\n pickle_module_name=pickle_module.__name__,\n code=code_dir_subpath,\n env=_CONDA_ENV_FILE_NAME,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n\ndef _load_model(path, **kwargs):\n \"\"\"\n :param path: The path to a serialized PyTorch model.\n :param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function.\n \"\"\"\n import torch\n\n if os.path.isdir(path):\n # `path` is a directory containing a serialized PyTorch model and a text file containing\n # information about the pickle module that should be used by PyTorch to load it\n model_path = os.path.join(path, \"model.pth\")\n pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"r\") as f:\n pickle_module_name = f.read()\n if \"pickle_module\" in kwargs and kwargs[\"pickle_module\"].__name__ != pickle_module_name:\n _logger.warning(\n \"Attempting to load the PyTorch model with a pickle module, '%s', that does not\"\n \" match the pickle module that was used to save the model: '%s'.\",\n kwargs[\"pickle_module\"].__name__,\n pickle_module_name,\n )\n else:\n try:\n kwargs[\"pickle_module\"] = importlib.import_module(pickle_module_name)\n except ImportError as exc:\n raise MlflowException(\n message=(\n \"Failed to import the pickle module that was used to save the PyTorch\"\n \" model. Pickle module name: `{pickle_module_name}`\".format(\n pickle_module_name=pickle_module_name\n )\n ),\n error_code=RESOURCE_DOES_NOT_EXIST,\n ) from exc\n\n else:\n model_path = path\n\n if Version(torch.__version__) >= Version(\"1.5.0\"):\n return torch.load(model_path, **kwargs)\n else:\n try:\n # load the model as an eager model.\n return torch.load(model_path, **kwargs)\n except Exception:\n # If fails, assume the model as a scripted model\n return torch.jit.load(model_path)\n\n\ndef load_model(model_uri, **kwargs):\n \"\"\"\n Load a PyTorch model from a local file or a run.\n\n :param model_uri: The location, in URI format, of the MLflow model, for example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param kwargs: kwargs to pass to ``torch.load`` method.\n :return: A PyTorch model.\n\n .. code-block:: python\n :caption: Example\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Inference after loading the logged model\n model_uri = \"runs:/{}/model\".format(run.info.run_id)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n for x in [4.0, 6.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n\n .. code-block:: text\n :caption: Output\n\n predict X: 4.0, y_pred: 7.57\n predict X: 6.0, y_pred: 11.64\n predict X: 30.0, y_pred: 60.48\n \"\"\"\n import torch\n\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n try:\n pyfunc_conf = _get_flavor_configuration(\n model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME\n )\n except MlflowException:\n pyfunc_conf = {}\n code_subpath = pyfunc_conf.get(pyfunc.CODE)\n if code_subpath is not None:\n pyfunc_utils._add_code_to_system_path(\n code_path=os.path.join(local_model_path, code_subpath)\n )\n\n pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n if torch.__version__ != pytorch_conf[\"pytorch_version\"]:\n _logger.warning(\n \"Stored model version '%s' does not match installed PyTorch version '%s'\",\n pytorch_conf[\"pytorch_version\"],\n torch.__version__,\n )\n torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf[\"model_data\"])\n return _load_model(path=torch_model_artifacts_path, **kwargs)\n\n\ndef _load_pyfunc(path, **kwargs):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.\n \"\"\"\n return _PyTorchWrapper(_load_model(path, **kwargs))\n\n\nclass _PyTorchWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n\n def __init__(self, pytorch_model):\n self.pytorch_model = pytorch_model\n\n def predict(self, data, device=\"cpu\"):\n import torch\n\n if isinstance(data, pd.DataFrame):\n inp_data = data.values.astype(np.float32)\n elif isinstance(data, np.ndarray):\n inp_data = data\n elif isinstance(data, (list, dict)):\n raise TypeError(\n \"The PyTorch flavor does not support List or Dict input types. \"\n \"Please use a pandas.DataFrame or a numpy.ndarray\"\n )\n else:\n raise TypeError(\"Input data should be pandas.DataFrame or numpy.ndarray\")\n\n self.pytorch_model.to(device)\n self.pytorch_model.eval()\n with torch.no_grad():\n input_tensor = torch.from_numpy(inp_data).to(device)\n preds = self.pytorch_model(input_tensor)\n if not isinstance(preds, torch.Tensor):\n raise TypeError(\n \"Expected PyTorch model to output a single output tensor, \"\n \"but got output of type '{}'\".format(type(preds))\n )\n if isinstance(data, pd.DataFrame):\n predicted = pd.DataFrame(preds.numpy())\n predicted.index = data.index\n else:\n predicted = preds.numpy()\n return predicted\n\n\n@experimental\ndef log_state_dict(state_dict, artifact_path, **kwargs):\n \"\"\"\n Log a state_dict as an MLflow artifact for the current run.\n\n .. warning::\n This function just logs a state_dict as an artifact and doesn't generate\n an :ref:`MLflow Model <models>`.\n\n :param state_dict: state_dict to be saved.\n :param artifact_path: Run-relative artifact path.\n :param kwargs: kwargs to pass to ``torch.save``.\n\n .. code-block:: python\n :caption: Example\n\n # Log a model as a state_dict\n with mlflow.start_run():\n state_dict = model.state_dict()\n mlflow.pytorch.log_state_dict(state_dict, artifact_path=\"model\")\n\n # Log a checkpoint as a state_dict\n with mlflow.start_run():\n state_dict = {\n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"loss\": loss,\n }\n mlflow.pytorch.log_state_dict(state_dict, artifact_path=\"checkpoint\")\n \"\"\"\n\n with TempDir() as tmp:\n local_path = tmp.path()\n save_state_dict(state_dict=state_dict, path=local_path, **kwargs)\n mlflow.log_artifacts(local_path, artifact_path)\n\n\n@experimental\ndef save_state_dict(state_dict, path, **kwargs):\n \"\"\"\n Save a state_dict to a path on the local file system\n\n :param state_dict: state_dict to be saved.\n :param path: Local path where the state_dict is to be saved.\n :param kwargs: kwargs to pass to ``torch.save``.\n \"\"\"\n import torch\n\n # The object type check here aims to prevent a scenario where a user accidentally passees\n # a model instead of a state_dict and `torch.save` (which accepts both model and state_dict)\n # successfully completes, leaving the user unaware of the mistake.\n if not isinstance(state_dict, dict):\n raise TypeError(\n \"Invalid object type for `state_dict`: {}. Must be an instance of `dict`\".format(\n type(state_dict)\n )\n )\n\n os.makedirs(path, exist_ok=True)\n state_dict_path = os.path.join(path, _TORCH_STATE_DICT_FILE_NAME)\n torch.save(state_dict, state_dict_path, **kwargs)\n\n\n@experimental\ndef load_state_dict(state_dict_uri, **kwargs):\n \"\"\"\n Load a state_dict from a local file or a run.\n\n :param state_dict_uri: The location, in URI format, of the state_dict, for example:\n\n - ``/Users/me/path/to/local/state_dict``\n - ``relative/path/to/local/state_dict``\n - ``s3://my_bucket/path/to/state_dict``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/state_dict``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param kwargs: kwargs to pass to ``torch.load``.\n :return: A state_dict\n\n .. code-block:: python\n :caption: Example\n\n with mlflow.start_run():\n artifact_path = \"model\"\n mlflow.pytorch.log_state_dict(model.state_dict(), artifact_path)\n state_dict_uri = mlflow.get_artifact_uri(artifact_path)\n\n state_dict = mlflow.pytorch.load_state_dict(state_dict_uri)\n \"\"\"\n import torch\n\n local_path = _download_artifact_from_uri(artifact_uri=state_dict_uri)\n state_dict_path = os.path.join(local_path, _TORCH_STATE_DICT_FILE_NAME)\n return torch.load(state_dict_path, **kwargs)\n\n\n@experimental\n@autologging_integration(FLAVOR_NAME)\ndef autolog(\n log_every_n_epoch=1,\n log_models=True,\n disable=False,\n exclusive=False,\n disable_for_unsupported_versions=False,\n silent=False,\n): # pylint: disable=unused-argument\n \"\"\"\n Enables (or disables) and configures autologging from `PyTorch Lightning\n <https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.\n\n Autologging is performed when you call the `fit` method of\n `pytorch_lightning.Trainer() \\\n <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.\n\n Explore the complete `PyTorch MNIST \\\n <https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST>`_ for\n an expansive example with implementation of additional lightening steps.\n\n **Note**: Autologging is only supported for PyTorch Lightning models,\n i.e., models that subclass\n `pytorch_lightning.LightningModule \\\n <https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.\n In particular, autologging support for vanilla PyTorch models that only subclass\n `torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_\n is not yet available.\n\n :param log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics\n are logged after every epoch.\n :param log_models: If ``True``, trained models are logged as MLflow model artifacts.\n If ``False``, trained models are not logged.\n :param disable: If ``True``, disables the PyTorch Lightning autologging integration.\n If ``False``, enables the PyTorch Lightning autologging integration.\n :param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.\n If ``False``, autologged content is logged to the active fluent run,\n which may be user-created.\n :param disable_for_unsupported_versions: If ``True``, disable autologging for versions of\n pytorch and pytorch-lightning that have not been tested against this version\n of the MLflow client or are incompatible.\n :param silent: If ``True``, suppress all event logs and warnings from MLflow during PyTorch\n Lightning autologging. If ``False``, show all events and warnings during\n PyTorch Lightning autologging.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import pytorch_lightning as pl\n import torch\n from torch.nn import functional as F\n from torch.utils.data import DataLoader\n from torchvision import transforms\n from torchvision.datasets import MNIST\n from pytorch_lightning.metrics.functional import accuracy\n\n import mlflow.pytorch\n from mlflow.tracking import MlflowClient\n\n # For brevity, here is the simplest most minimal example with just a training\n # loop step, (no validation, no testing). It illustrates how you can use MLflow\n # to auto log parameters, metrics, and models.\n\n class MNISTModel(pl.LightningModule):\n def __init__(self):\n super(MNISTModel, self).__init__()\n self.l1 = torch.nn.Linear(28 * 28, 10)\n\n def forward(self, x):\n return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n def training_step(self, batch, batch_nb):\n x, y = batch\n loss = F.cross_entropy(self(x), y)\n acc = accuracy(loss, y)\n\n # Use the current of PyTorch logger\n self.log(\"train_loss\", loss, on_epoch=True)\n self.log(\"acc\", acc, on_epoch=True)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.02)\n\n def print_auto_logged_info(r):\n\n tags = {k: v for k, v in r.data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, \"model\")]\n print(\"run_id: {}\".format(r.info.run_id))\n print(\"artifacts: {}\".format(artifacts))\n print(\"params: {}\".format(r.data.params))\n print(\"metrics: {}\".format(r.data.metrics))\n print(\"tags: {}\".format(tags))\n\n # Initialize our model\n mnist_model = MNISTModel()\n\n # Initialize DataLoader from MNIST Dataset\n train_ds = MNIST(os.getcwd(), train=True,\n download=True, transform=transforms.ToTensor())\n train_loader = DataLoader(train_ds, batch_size=32)\n\n # Initialize a trainer\n trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20)\n\n # Auto log all MLflow entities\n mlflow.pytorch.autolog()\n\n # Train the model\n with mlflow.start_run() as run:\n trainer.fit(mnist_model, train_loader)\n\n # fetch the auto logged parameters and metrics\n print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 42caa17b60cb489c8083900fb52506a7\n artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data']\n params: {'betas': '(0.9, 0.999)',\n 'weight_decay': '0',\n 'epochs': '20',\n 'eps': '1e-08',\n 'lr': '0.02',\n 'optimizer_name': 'Adam', '\n amsgrad': 'False'}\n metrics: {'acc_step': 0.0,\n 'train_loss_epoch': 1.0917967557907104,\n 'train_loss_step': 1.0794280767440796,\n 'train_loss': 1.0794280767440796,\n 'acc_epoch': 0.0033333334140479565,\n 'acc': 0.0}\n tags: {'Mode': 'training'}\n\n .. figure:: ../_static/images/pytorch_lightening_autolog.png\n\n PyTorch autologged MLflow entities\n \"\"\"\n import pytorch_lightning as pl\n from mlflow.pytorch._pytorch_autolog import _create_patch_fit\n\n fit = _create_patch_fit(log_every_n_epoch=log_every_n_epoch, log_models=log_models)\n safe_patch(FLAVOR_NAME, pl.Trainer, \"fit\", fit, manage_run=True)\n" ]
[ [ "torch.jit.ScriptModule.save", "torch.save", "torch.no_grad", "torch.from_numpy", "torch.jit.load", "torch.load" ] ]
samsartor/score_sde
[ "d25c8d092a68d643c796d771c55f80075aa041d1" ]
[ "models/ddpm.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: skip-file\n\"\"\"DDPM model.\n\nThis code is the pytorch equivalent of:\nhttps://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport functools\nimport numpy as np\n\nfrom . import utils, layers, normalization\n\nRefineBlock = layers.RefineBlock\nResidualBlock = layers.ResidualBlock\nResnetBlockDDPM = layers.ResnetBlockDDPM\nUpsample = layers.Upsample\nDownsample = layers.Downsample\nconv3x3 = layers.ddpm_conv3x3\nget_act = layers.get_act\nget_normalization = normalization.get_normalization\ndefault_initializer = layers.default_init\n\n\[email protected]_model(name='ddpm')\nclass DDPM(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.act = act = get_act(config)\n self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))\n\n self.nf = nf = config.model.nf\n ch_mult = config.model.ch_mult\n self.num_res_blocks = num_res_blocks = config.model.num_res_blocks\n self.attn_resolutions = attn_resolutions = config.model.attn_resolutions\n dropout = config.model.dropout\n resamp_with_conv = config.model.resamp_with_conv\n self.num_resolutions = num_resolutions = len(ch_mult)\n channels, image_size, self.centered = utils.diffusion_domain(config)\n self.all_resolutions = all_resolutions = [image_size // (2 ** i) for i in range(num_resolutions)]\n\n AttnBlock = functools.partial(layers.AttnBlock)\n self.conditional = conditional = config.model.conditional\n ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, temb_dim=4 * nf, dropout=dropout)\n if conditional:\n # Condition on noise levels.\n modules = [nn.Linear(nf, nf * 4)]\n modules[0].weight.data = default_initializer()(modules[0].weight.data.shape)\n nn.init.zeros_(modules[0].bias)\n modules.append(nn.Linear(nf * 4, nf * 4))\n modules[1].weight.data = default_initializer()(modules[1].weight.data.shape)\n nn.init.zeros_(modules[1].bias)\n else:\n modules = []\n\n # Downsampling block\n modules.append(conv3x3(channels, nf))\n hs_c = [nf]\n in_ch = nf\n for i_level in range(num_resolutions):\n # Residual blocks for this resolution\n for i_block in range(num_res_blocks):\n out_ch = nf * ch_mult[i_level]\n modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))\n in_ch = out_ch\n if all_resolutions[i_level] in attn_resolutions:\n modules.append(AttnBlock(channels=in_ch))\n hs_c.append(in_ch)\n if i_level != num_resolutions - 1:\n modules.append(Downsample(channels=in_ch, with_conv=resamp_with_conv))\n hs_c.append(in_ch)\n\n in_ch = hs_c[-1]\n modules.append(ResnetBlock(in_ch=in_ch))\n modules.append(AttnBlock(channels=in_ch))\n modules.append(ResnetBlock(in_ch=in_ch))\n\n # Upsampling block\n for i_level in reversed(range(num_resolutions)):\n for i_block in range(num_res_blocks + 1):\n out_ch = nf * ch_mult[i_level]\n modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch))\n in_ch = out_ch\n if all_resolutions[i_level] in attn_resolutions:\n modules.append(AttnBlock(channels=in_ch))\n if i_level != 0:\n modules.append(Upsample(channels=in_ch, with_conv=resamp_with_conv))\n\n assert not hs_c\n modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=32, eps=1e-6))\n modules.append(conv3x3(in_ch, channels, init_scale=0.))\n self.all_modules = nn.ModuleList(modules)\n\n self.scale_by_sigma = config.model.scale_by_sigma\n\n def forward(self, x, labels):\n modules = self.all_modules\n m_idx = 0\n if self.conditional:\n # timestep/scale embedding\n timesteps = labels\n temb = layers.get_timestep_embedding(timesteps, self.nf)\n temb = modules[m_idx](temb)\n m_idx += 1\n temb = modules[m_idx](self.act(temb))\n m_idx += 1\n else:\n temb = None\n\n if self.centered:\n # Input is in [-1, 1]\n h = x\n else:\n # Input is in [0, 1]\n h = 2 * x - 1.\n\n # Downsampling block\n hs = [modules[m_idx](h)]\n m_idx += 1\n for i_level in range(self.num_resolutions):\n # Residual blocks for this resolution\n for i_block in range(self.num_res_blocks):\n h = modules[m_idx](hs[-1], temb)\n m_idx += 1\n if h.shape[-1] in self.attn_resolutions:\n h = modules[m_idx](h)\n m_idx += 1\n hs.append(h)\n if i_level != self.num_resolutions - 1:\n hs.append(modules[m_idx](hs[-1]))\n m_idx += 1\n\n h = hs[-1]\n h = modules[m_idx](h, temb)\n m_idx += 1\n h = modules[m_idx](h)\n m_idx += 1\n h = modules[m_idx](h, temb)\n m_idx += 1\n\n # Upsampling block\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks + 1):\n h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)\n m_idx += 1\n if h.shape[-1] in self.attn_resolutions:\n h = modules[m_idx](h)\n m_idx += 1\n if i_level != 0:\n h = modules[m_idx](h)\n m_idx += 1\n\n assert not hs\n h = self.act(modules[m_idx](h))\n m_idx += 1\n h = modules[m_idx](h)\n m_idx += 1\n assert m_idx == len(modules)\n\n if self.scale_by_sigma:\n # Divide the output by sigmas. Useful for training with the NCSN loss.\n # The DDPM loss scales the network output by sigma in the loss function,\n # so no need of doing it here.\n used_sigmas = self.sigmas[labels, None, None, None]\n h = h / used_sigmas\n\n return h\n" ]
[ [ "torch.nn.Linear", "torch.nn.init.zeros_", "torch.nn.GroupNorm", "torch.nn.ModuleList" ] ]
Eddy-zheng/ImageDT
[ "78c9e671526422f28bd564cad9879ef95f12b454" ]
[ "imagedt/tools/metrics/roc_curve.py" ]
[ "# coding: utf-8\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport imagedt\n\n\ndef roc(test_file, show_fig=False):\n lines = imagedt.file.readlines(test_file)\n\n # line: Image_name,True_class_name,Label_id,Predict_id,Confidence,Match\n try:\n int(lines[0].split(',')[-1])\n start_line_num = 0\n except ValueError:\n start_line_num = 1\n\n results = np.array([[line.split(',')[-2], line.split(',')[-1]] for line in lines[start_line_num:]])\n prob_arr = np.array(results[:, 0], dtype=float)\n match_arr = np.array(results[:, 1], dtype=int)\n\n # imagedt.tools.set_pdb()\n save_lines, thresvalue = [], []\n tpr, fnr = [], []\n for index, thres in enumerate(np.arange(0,1.001,0.001)):\n tp, fn, fp, tn = 0, 0, 0, 0\n for res_ind, pre_prob in enumerate(prob_arr):\n if float(prob_arr[res_ind]) >= thres:\n if int(match_arr[res_ind]):\n tp += 1\n else:\n fp += 1\n else:\n if int(match_arr[res_ind]):\n fn += 1\n else:\n tn += 1\n\n y = float(tp) / np.maximum(float(tp + fn), np.finfo(np.float64).eps)\n x = float(fp) / np.maximum(float(fp + tn), np.finfo(np.float64).eps)\n thresvalue.append(thres)\n tpr.append(y)\n fnr.append(x)\n save_lines.append([str(x), str(y), str(thres)])\n\n save_dir = os.path.dirname(test_file)\n\n\n imagedt.file.write_csv(save_lines, os.path.join(save_dir, 'thres-fpr-tpr.csv'))\n fig=plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xscale('log')\n plt.grid()\n plt.xlabel('FPR')\n plt.ylabel('TPR/THRES')\n plt.title('ROC-THRES')\n ax.plot(fnr, tpr, color='red')\n ax.plot(fnr, thresvalue, color='green')\n plt.legend(('roc', 'fpr-thres'),loc='best')\n\n plt.savefig(os.path.join(save_dir, 'roc.png'), dpi=300)\n print (\"ROC curve figure save: {0}\".format(save_dir))\n if show_fig:\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.finfo", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
chicolucio/estatisticas-oiq-2019
[ "017da73f3bef6a6f40d7dd1214621cad7c14be2e" ]
[ "resultados/input_data.py" ]
[ "# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\n\n\ndef input_data(file_path):\n \"\"\"Gera um Pandas DF a partir de um arquivo csv.\n\n Parameters\n ----------\n file_path : type string\n Caminho do arquivo.\n\n Returns\n -------\n type\n Pandas DataFrame. NaN substituído por 0\n\n \"\"\"\n df = pd.read_csv(file_path)\n return df.fillna(0)\n\n\ndef no_absents(df):\n \"\"\"Elimina alunos faltosos. No caso, considerou-se que faltaram os alunos\n que estavam com todas as notas zeradas (coluna 2 em diante). Funciona pois\n nenhum aluno presente zerou a prova. Caso isso algum dia aconteça, vai\n falhar. Talvez adotar um sistema de coluna de flag para indicar presença.\n\n Parameters\n ----------\n df : type Pandas DataFrame.\n DataFrame que será tratado.\n\n Returns\n -------\n type\n DataFrame sem alunos faltosos.\n\n \"\"\"\n columns = df.columns[2:]\n index_values = df[(df[columns] == 0).all(axis=1)].index\n return df.drop(index_values).reset_index(drop=True)\n\n\ndef grades(df):\n \"\"\"Calcula a pontuação da prova considerando objetivas e discursivas.\n\n Parameters\n ----------\n df : type Pandas DataFrame\n DataFrame de início.\n\n Returns\n -------\n type Pandas DataFrame\n DataFrame com as colunas de pontuação.\n\n \"\"\"\n df['Pontos - Objetiva'] = df.iloc[:, 2] * 5\n df['Pontos - Discursiva'] = df.iloc[:, 3] + df.iloc[:, 4]\n df['Pontuação final'] = df['Pontos - Objetiva'] + df['Pontos - Discursiva']\n return df\n\n\ndef awards(df, head=10):\n \"\"\"Ordena DataFrame de acordo com a nota final. Desempate na discursiva.\n\n Parameters\n ----------\n df : type Pandas DataFrame\n DataFrame de início.\n head : type integer\n Número de linhas que será exibido (padrão é 10 pois costuma-se premiar\n os 10 primeiros - 3 medalhas e 7 menções honrosas).\n\n Returns\n -------\n type Pandas DataFrame\n DataFrame de saída.\n\n \"\"\"\n df = df.sort_values(['Pontuação final', 'Pontos - Discursiva'],\n ascending=False).head(head).reset_index(drop=True)\n df.index = df.index + 1 # para facilitar, numerando a partir de 1\n return df\n\n\ndef bins(df):\n \"\"\"Segrega os dados de notas de 10 em 10 pontos para construção de gráficos.\n\n Parameters\n ----------\n df : type Pandas DataFrame\n DataFrame de início.\n\n Returns\n -------\n type Pandas DataFrame\n DataFrame final.\n\n \"\"\"\n df_bins = pd.DataFrame(df['ALUNO'].rename('Contagem').groupby(pd.cut(\n df['Pontuação final'].rename('Intervalos'), np.arange(0, 101, 10), right=False)).count())\n\n df_bins['Contagem /%'] = round(100 * df_bins['Contagem'] /\n df_bins['Contagem'].sum(), 2)\n df_bins['Contagem cumulativa'] = df_bins['Contagem'].cumsum()\n df_bins['Contagem /% cumulativa'] = df_bins['Contagem /%'].cumsum()\n\n return df_bins\n\n\ndef latex(df):\n \"\"\"Converte DataFrame em tabela para LaTeX.\n\n Parameters\n ----------\n df : Pandas DataFrame\n DataFrame de início.\n\n Returns\n -------\n type None\n Comando print para apresentar tabela em LaTeX que pode ser copiada.\n\n \"\"\"\n return print(df.to_latex())\n\n\ndef pivot_tables(df, values, index, column, margins_name='Total'):\n \"\"\"Gera tabela dinâmica Pandas de acordo com os dados passados\"\"\"\n pivot_df = pd.pivot_table(df, values=values, index=index, columns=column,\n aggfunc=pd.Series.nunique, margins=True,\n margins_name=margins_name)\n return pivot_df.fillna(0)\n\n\ndef pivot_data(df, column):\n \"\"\"Extrai dados de uma dada coluna (contagem de 0) de uma pivot table,\n excluindo a linha de total\"\"\"\n rows = df.shape[0]\n return df.iloc[:rows - 1, column]\n\n\ndef pivot_index(df):\n \"\"\"Extrai os indexadores de uma pivot table, exlcuindo a linha de total\"\"\"\n rows = df.shape[0]\n return df.index[:rows - 1]\n\n\ndef pivot_total(df):\n \"\"\"Extrai os totais por coluna de uma pivot table. Retorna valores e labels\"\"\"\n rows = df.shape[0]\n columns = df.shape[1]\n values = df.iloc[rows - 1, :columns - 1]\n labels = df.columns[:columns - 1]\n return values, labels\n\n\ndef stats_table(df):\n \"\"\"Gera tabela com resumo dos dados estatíticos\"\"\"\n df.loc['IQR'] = df.loc['75%'] - df.loc['25%']\n df = df.T.drop(['Acertos parte A'])\n df = df.drop(['count'], axis=1)\n df = df.reindex(['Pontos - Objetiva',\n 'Q17',\n 'Q18',\n 'Pontos - Discursiva',\n 'Pontuação final'])\n df['mean'] = round(df['mean'], 2)\n df['std'] = round(df['std'], 2)\n return df\n\n\ndef semester(df_row):\n \"\"\"Retorna o perído do aluno de acordo com o código da turma\"\"\"\n number = df_row[1]\n return '{0}º período'.format(number)\n\n\ndef semester_shift(df):\n \"\"\"Retorna DF com o turno e o período do aluno de acordo com o código da turma\"\"\"\n df[['cod', 'num']] = df['TURMA'].str.split(\n '(\\d+)', expand=True).drop(2, axis=1)\n\n df['Período'] = df.loc[:, 'num'].apply(semester)\n\n df['Turno'] = np.where(df['num'].str[0] == '1', 'Manhã',\n np.where(df['num'].str[0] == '2', 'Tarde', 'Noite'))\n\n return df\n" ]
[ [ "numpy.where", "pandas.read_csv", "pandas.pivot_table", "numpy.arange" ] ]
leejw51/BumblebeeNet
[ "fd9bedbfaad9bb94bcb4f1dc44eec8125bb9c690" ]
[ "Test/DrawGraph2.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(0,6, 0.1)\ny1 = np.sin(x)\ny2 = np.cos(x)\n\nplt.plot(x,y1, label=\"sin\")\nplt.plot(x,y2,linestyle=\"--\", label=\"cos\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title('sin & cos')\nplt.legend()\nplt.show()\n" ]
[ [ "numpy.sin", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.arange", "numpy.cos", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
johan-kallstrom/bayesian-learning-course
[ "4d9a41a540821bd7f6791919471659c5b7643f4b" ]
[ "experiments/run_experiment_02.py" ]
[ "# Run experiment 2\n\nfrom bayesian_learning.bandits import NonStationaryBernoulliBandit\nfrom bayesian_learning.thompson_sampling import ThompsonSampling, SlidingWindowThompsonSampling\nfrom bayesian_learning.bayes_ucb import BayesUcb, SlidingWindowBayesUcb\nfrom bayesian_learning.ucb import Ucb\nfrom bayesian_learning.q_learning import EpsilonGreedySampling\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nimport pickle\n\n# Experiment settings\nnp.random.seed(2020)\nn_runs = 100\nn_draws = 10000\nbandit_probs = np.array([0.8, 0.5, 0.2])\nbernoulli_bandit = NonStationaryBernoulliBandit(probs=bandit_probs, total_draws=n_draws)\n\n# Priors\nuninformed_priors = np.array([[1.0, 1.0],\n [1.0, 1.0],\n [1.0, 1.0]])\n\n# Players\nplayers = []\n\nthompson_sampler = ThompsonSampling(priors=uninformed_priors)\nplayers.append([thompson_sampler,['Thompson Sampling']])\n\nsliding_window_thompson_sampler = SlidingWindowThompsonSampling(priors=uninformed_priors)\nplayers.append([sliding_window_thompson_sampler,['Sliding Window Thompson Sampling']])\n\nbayes_ucb = BayesUcb(priors=uninformed_priors)\nplayers.append([bayes_ucb,['Bayes UCB']])\n\nsliding_window_bayes_ucb = SlidingWindowBayesUcb(priors=uninformed_priors)\nplayers.append([sliding_window_bayes_ucb,['Sliding Window Bayes UCB']])\n\nucb = Ucb(n_arms=bandit_probs.shape[0])\nplayers.append([ucb,['UCB']])\n\nepsilon_greedy_sampling = EpsilonGreedySampling(n_arms=bandit_probs.shape[0])\nplayers.append([epsilon_greedy_sampling,['Epsilon Greedy']])\n\n# Run the experiment\nn_players = len(players)\ncumulative_reward = np.zeros(shape=(1, n_players, n_runs))\ncumulative_reward_history = np.zeros(shape=(n_draws, n_players, n_runs))\ncumulative_regret = np.zeros(shape=(1, n_players, n_runs))\ncumulative_regret_history = np.zeros(shape=(n_draws, n_players, n_runs))\nposterior_estimates = np.zeros(shape=(9,3,2,n_runs))\nfor run in range(n_runs):\n print(\"Run: \", run+1)\n for player in players:\n player[0].reset()\n post_idx = 0\n for draw in range(n_draws):\n bernoulli_bandit._update_probs(draw)\n for i, player in enumerate(players):\n # Draw and learn\n arm = player[0].select_arm()\n reward, expected_regret = bernoulli_bandit.draw(arm)\n player[0].learn(arm, reward)\n\n # Calculate metrics\n cumulative_reward[:,i,run] += reward\n cumulative_reward_history[draw, i, run] = cumulative_reward[:,i,run]\n cumulative_regret[:,i,run] += expected_regret\n cumulative_regret_history[draw, i, run] = cumulative_regret[:,i,run]\n\nmean_regret = np.mean(cumulative_regret_history,axis=2)\npickle.dump(mean_regret, open(\"./experiments/experiment_2_mean_regret_long.p\", \"wb\"))\nfor i in range(n_players): \n plt.plot(mean_regret[:,i], label=players[i][1][0])\n\nplt.xlabel('draw')\nplt.ylabel('cumulative regret')\nplt.title('Comparison of Bayesian and Frequentist Algorithms')\nplt.legend()\nplt.savefig(\"Experiment2_Regret_long.pdf\", bbox_inches='tight')\n# plt.show()\n\n\n# Run experiment to see regret as function of window sizee\nn_players = 2\nn_runs = 100\nwindow_lengths = [10, 50, 100, 250, 500, 1000, 1500, 2000, 2500, 3000]\n\nregrets = np.zeros(shape=(n_players,len(window_lengths)))\nfor w_idx, window_length in enumerate(window_lengths):\n cumulative_regret = np.zeros(shape=(n_players, n_runs))\n for run in range(n_runs):\n print(\"Run: \", run+1)\n players = []\n\n sliding_window_thompson_sampler = SlidingWindowThompsonSampling(priors=uninformed_priors, window_length=window_length)\n players.append([sliding_window_thompson_sampler,['Thompson Sampling']])\n\n sliding_window_bayes_ucb = SlidingWindowBayesUcb(priors=uninformed_priors, window_length=window_length)\n players.append([sliding_window_bayes_ucb,['Bayes UCB']])\n post_idx = 0\n for draw in range(n_draws):\n bernoulli_bandit._update_probs(draw)\n for i, player in enumerate(players):\n # Draw and learn\n arm = player[0].select_arm()\n reward, expected_regret = bernoulli_bandit.draw(arm)\n player[0].learn(arm, reward)\n\n # Calculate metrics\n cumulative_regret[i,run] += expected_regret\n mean_regret = np.mean(cumulative_regret,axis=1)\n for j in range(n_players): \n regrets[j,w_idx] = mean_regret[j]\n\npickle.dump(regrets, open(\"./experiments/experiment_2_regretes_by_window_long.p\", \"wb\"))\n\nplt.clf()\nfor i in range(n_players): \n plt.plot(window_lengths, regrets[i,:], label=players[i][1][0])\n\nplt.xlabel('window length')\nplt.ylabel('cumulative regret')\nplt.title('Effect of Window Length on Regret')\nplt.legend()\nplt.savefig(\"Experiment2_Regret_by_Window_long.pdf\", bbox_inches='tight')\nplt.show()" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf" ] ]
China-chenzhibo/GuessByHistory-Model
[ "5e520bd9b8c7415a05c09741a2d6796c0e5f470e" ]
[ "GBH_testCase.py" ]
[ "# test\r\nimport check_day\r\nimport datetime\r\nfrom dateutil.relativedelta import relativedelta\r\nimport pandas as pd\r\nimport os\r\nimport baostock as bs\r\nimport time\r\n\r\ntest_group = [\r\n datetime.date(2021, 11, 22), # 周一/交易日\r\n datetime.date(2021, 11, 21), # 周末\r\n datetime.date(2021, 10, 8), # 国庆后一天\r\n datetime.date(2021, 10, 1), # 国庆后一天\r\n datetime.date(2021, 9, 30), # 国庆前一天\r\n\r\n]\r\n\r\n# 参考标的可取 sh.000001 | sz.399376 | sh.600030\r\n\r\n\"\"\"\r\nInitialization Phase: 处理输入的数据|下载数据|打标签\r\n\"\"\"\r\ndef download_data():\r\n global targetCode\r\n getMarketIndex(targetCode, start_date)\r\n return 0\r\n\r\n\r\ndef getMarketIndex(targetCode, b_date):\r\n output_Folder = os.getcwd() + \"\\\\outputIndex\\\\nowaday_data\\\\\"\r\n filePath = output_Folder + targetCode + \".csv\"\r\n if os.path.exists(filePath) and bool(datetime.date.today().strftime('%Y-%m-%d')==time.strftime('%Y-%m-%d',time.localtime(os.stat(filePath).st_mtime))):\r\n pass\r\n else:\r\n bs.login() # 登陆系统,显示登陆返回信息\r\n rs = bs.query_history_k_data_plus(targetCode, \"date,code,open,high,low,close,preclose,volume,amount,pctChg\",\r\n start_date=b_date, frequency=\"d\")\r\n # 打印结果集\r\n data_list = []\r\n while (rs.error_code == '0') & rs.next():\r\n # 获取一条记录,将记录合并在一起\r\n data_list.append(rs.get_row_data())\r\n result = pd.DataFrame(data_list, columns=rs.fields)\r\n\r\n trade_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').date()\r\n while not check_day.is_tradeDay(trade_date)[0]: # 消除新股上市的影响 默认2个月恢复正常走势\r\n trade_date =trade_date + datetime.timedelta(days=1)\r\n if result['date'][0]!=trade_date.strftime('%Y-%m-%d'):\r\n result = result[40:]\r\n result = result.drop(index=(result.loc[(result['volume'] == '0')].index)) # 消除停牌情况\r\n result.reset_index(drop=True, inplace=True)\r\n\r\n # 结果集输出到csv文件\r\n result.to_csv(output_Folder + targetCode + \".csv\", index=False)\r\n bs.logout() # 登出系统\r\n\r\n\r\ndef tag_df():\r\n global targetCode\r\n output_Folder = os.getcwd() + \"\\\\outputIndex\\\\nowaday_data\\\\\"\r\n df = pd.read_csv(output_Folder + targetCode + \".csv\")\r\n for i in range(len(df)):\r\n open = float(df['open'][i])\r\n close = float(df['close'][i])\r\n preclose = float(df['preclose'][i])\r\n df.loc[i, 'label'] = describeState(open, close, preclose)[0]\r\n df.loc[i, 'UpOrDown'] = describeState(open, close, preclose)[1]\r\n return df\r\n\r\n\r\n# 由于刚好是相等的情况极少发生,因此默认拿券商所能提供的最低费率万二,只要在这幅度内默认为“平”\r\ndef describeState(open, close, preclose):\r\n fee = 0.0003\r\n if open > (preclose * (1 + fee)):\r\n label = 'H'\r\n elif open < (preclose * (1 - fee)):\r\n label = 'L'\r\n else:\r\n label = 'S'\r\n\r\n if close > (open * (1 + fee)):\r\n label = label + 'H'\r\n elif close < (open * (1 - fee)):\r\n label = label + 'L'\r\n else:\r\n label = label + 'S'\r\n\r\n if close > (preclose * (1 + fee)):\r\n UoD = 'Up'\r\n elif close < (preclose * (1 - fee)):\r\n UoD = 'Down'\r\n else:\r\n UoD = 'Smooth'\r\n\r\n return label, UoD\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nGetProbability Phase: 获得涨跌、走势形态的概率结果\r\n\"\"\"\r\ndef isNearHoliday(input_date): # 此函数查询是否在节假日附近会返回不是 或者 是+holiday label\r\n flag = 0\r\n for i in range(3):\r\n i = i + 1\r\n for _ in range(2):\r\n i = i * (-1)\r\n Linput_date = input_date + datetime.timedelta(days=i) # 同时向左右(其中左侧权重大)找节假日\r\n if flag * i <= 0:\r\n if check_day.is_tradeDay(Linput_date)[0]:\r\n if flag == 0:\r\n flag = i\r\n else:\r\n return False\r\n else:\r\n if check_day.is_tradeDay(Linput_date)[1] != 'Weekend':\r\n holiday_label = check_day.is_tradeDay(Linput_date)[1]\r\n if i > 0: # 判断节假日处于input_date相邻的左或右\r\n LR = 'Right'\r\n else:\r\n LR = 'Left'\r\n return True, holiday_label, LR\r\n return False\r\n\r\n\r\ndef calculate(cal_df):\r\n dict_trendP = {'HH': 0, 'HS': 0, 'HL': 0, 'LH': 0, 'LS': 0, 'LL': 0, 'SH': 0, 'SL': 0, 'SS': 0}\r\n dict_updownP = {'Up': [], 'Down': [], 'Smooth': []}\r\n Up_cout, Down_cout, Smooth_cout = [0, 0], [0, 0], [0, 0]\r\n len_df = len(cal_df)\r\n for i in range(len_df):\r\n if cal_df.loc[i]['UpOrDown'] == 'Up':\r\n Up_cout[0] = Up_cout[0] + 1\r\n Up_cout[1] = Up_cout[1] + cal_df.loc[i]['pctChg']\r\n elif cal_df.loc[i]['UpOrDown'] == 'Down':\r\n Down_cout[0] = Down_cout[0] + 1\r\n Down_cout[1] = Down_cout[1] + cal_df.loc[i]['pctChg']\r\n else:\r\n Smooth_cout[0] = Smooth_cout[0] + 1\r\n Smooth_cout[1] = Smooth_cout[1] + cal_df.loc[i]['pctChg']\r\n\r\n dict_trendP[cal_df.loc[i]['label']] = dict_trendP[cal_df.loc[i]['label']] + 1\r\n\r\n if Up_cout[0] == 0:\r\n dict_updownP['Up'] = [0, 0]\r\n else:\r\n dict_updownP['Up'] = [Up_cout[0] / len_df, Up_cout[1] / Up_cout[0]]\r\n if Down_cout[0] == 0:\r\n dict_updownP['Down'] = [0, 0]\r\n else:\r\n dict_updownP['Down'] = [Down_cout[0] / len_df, Down_cout[1] / Down_cout[0]]\r\n if Smooth_cout[0] == 0:\r\n dict_updownP['Smooth'] = [0, 0]\r\n else:\r\n dict_updownP['Smooth'] = [Smooth_cout[0] / len_df, Smooth_cout[1] / Smooth_cout[0]]\r\n for key in dict_trendP.keys():\r\n dict_trendP[key] = dict_trendP[key] / len_df\r\n\r\n return dict_updownP, dict_trendP\r\n\r\n\r\ndef locLastHoliday(input_date, holiday_label, LR): # 前提条件是已知inputdate是isNearHoliday\r\n lastyear_date = input_date + relativedelta(years=-1)\r\n lastyear_label = check_day.is_tradeDay(lastyear_date)[1]\r\n if LR == 'Left': # 说明input_date是节后的第一天\r\n if lastyear_label == holiday_label: # 减一年刚好处于节假日中\r\n ly_date = lastyear_date\r\n while 1:\r\n ly_date = ly_date + datetime.timedelta(days=1) # 向后移动到非节假日\r\n if check_day.is_tradeDay(ly_date)[0]:\r\n return ly_date\r\n else:\r\n num = 0\r\n while bool(num < 60):\r\n num = num + 1\r\n for _ in range(2):\r\n num = num * (-1)\r\n ly_date = lastyear_date + datetime.timedelta(days=num) # 先减一天,再加一天\r\n if check_day.is_tradeDay(ly_date)[1] == holiday_label:\r\n while not (check_day.is_tradeDay(ly_date)[0]):\r\n ly_date = ly_date + datetime.timedelta(days=1)\r\n return ly_date\r\n return None\r\n\r\n\r\n else: # 说明input_date是节前的最后一天\r\n if lastyear_label == holiday_label: # 减一年刚好处于节假日中\r\n ly_date = lastyear_date\r\n while 1:\r\n ly_date = ly_date + datetime.timedelta(days=-1) # 向前移动到非节假日\r\n if check_day.is_tradeDay(ly_date)[0]:\r\n return ly_date\r\n else:\r\n num = 0\r\n while bool(num < 60): # 以前的节日有些并不放假,如端午、清明、中秋,排除此影响\r\n num = num + 1\r\n for _ in range(2):\r\n num = num * (-1)\r\n ly_date = lastyear_date + datetime.timedelta(days=num) # 先减一天,再加一天\r\n if check_day.is_tradeDay(ly_date)[1] == holiday_label:\r\n while not (check_day.is_tradeDay(ly_date)[0]):\r\n ly_date = ly_date + datetime.timedelta(days=-1)\r\n return ly_date\r\n return None\r\n\r\n\r\ndef getProbability_nearHolidaty(df, input_date, holiday_label, LR, Flag): # Flag表示打不打印被统计的日期\r\n candidate_group = []\r\n ly_date = input_date\r\n while bool((ly_date + relativedelta(years=-1)) >= datetime.datetime.strptime(df['date'][0], '%Y-%m-%d').date()):\r\n ly_date = locLastHoliday(ly_date, holiday_label, LR)\r\n if ly_date == None:\r\n break\r\n else:\r\n ly_date_str = ly_date.strftime('%Y-%m-%d')\r\n candidate_group.append(ly_date_str)\r\n if Flag == 1:\r\n print(\"||被统计的日期||\") # 打印被统计的日期群\r\n print(candidate_group)\r\n print(\"\")\r\n\r\n column_name = ['date', 'code', 'pctChg', 'label', 'UpOrDown']\r\n empty = pd.DataFrame(columns=column_name)\r\n for i in candidate_group:\r\n empty = pd.concat([empty, df.loc[df['date'] == i][column_name]], ignore_index=True)\r\n upDownProbability, trendProbability = calculate(empty)\r\n return upDownProbability, trendProbability\r\n\r\n\r\ndef locLastDay_normalDay(input_date):\r\n lastyear_date = input_date + relativedelta(years=-1)\r\n while not check_day.is_tradeDay(lastyear_date)[0] or isNearHoliday(lastyear_date):\r\n lastyear_date = lastyear_date + datetime.timedelta(days=1)\r\n LastDayGroup = [lastyear_date.strftime('%Y-%m-%d')]\r\n Ldate = lastyear_date + datetime.timedelta(days=-1)\r\n Rdate = lastyear_date + datetime.timedelta(days=1)\r\n if not isNearHoliday(Ldate):\r\n for _ in range(2):\r\n if bool(upperlimit_date <= Ldate): # 避免回溯超上限\r\n while not check_day.is_tradeDay(Ldate)[0]:\r\n Ldate = Ldate + datetime.timedelta(days=-1)\r\n LastDayGroup.append(Ldate.strftime('%Y-%m-%d'))\r\n Ldate = Ldate + datetime.timedelta(days=-1)\r\n if isNearHoliday(Ldate + datetime.timedelta(days=-1)):\r\n break\r\n\r\n if not isNearHoliday(Rdate):\r\n for _ in range(2):\r\n while not check_day.is_tradeDay(Rdate)[0]:\r\n Rdate = Rdate + datetime.timedelta(days=1)\r\n LastDayGroup.append(Rdate.strftime('%Y-%m-%d'))\r\n Rdate = Rdate + datetime.timedelta(days=1)\r\n if isNearHoliday(Rdate + datetime.timedelta(days=1)):\r\n break\r\n\r\n return LastDayGroup\r\n\r\n\r\ndef getProbability_normalDay(df, input_date, Flag): # Flag表示打不打印被统计的日期\r\n ly_date = input_date\r\n candidate_group = []\r\n while bool((ly_date + relativedelta(years=-1)) >= datetime.datetime.strptime(df['date'][0], '%Y-%m-%d').date()):\r\n candidate_group.extend(locLastDay_normalDay(ly_date))\r\n ly_date = ly_date + relativedelta(years=-1)\r\n if Flag == 1:\r\n print(\"||被统计的日期||\")\r\n print(candidate_group)\r\n print(sorted(candidate_group, reverse=True))\r\n print(\"\")\r\n column_name = ['date', 'code', 'pctChg', 'label', 'UpOrDown']\r\n empty = pd.DataFrame(columns=column_name)\r\n for i in candidate_group:\r\n empty = pd.concat([empty, df.loc[df['date'] == i][column_name]], ignore_index=True)\r\n upDownProbability, trendProbability = calculate(empty)\r\n return upDownProbability, trendProbability\r\n\r\n\r\n\r\n\"\"\"\r\nProcessOutput Phase: 将概率结果打印出来\r\n\"\"\"\r\ndef output_fx(df, input_date):\r\n # H:high L:Low S:Smooth\r\n dict_describeState = {'HH': '高开高走', 'HS': '高开平走', 'HL': '高开低走', 'LH': '低开高走', 'LS': '低开平走', 'LL': '低开低走',\r\n 'SH': '平开高走', 'SL': '平开低走', 'SS': '平开平走'}\r\n dict_LR = {'Left': '后', 'Right': '前'}\r\n dict_holiday = {\"New Year's Day\": '元旦', 'Spring Festival': '春节', 'Tomb-sweeping Day': '清明节', 'Labour Day': '劳动节',\r\n 'Dragon Boat Festival': '端午节', 'National Day': '国庆节', 'Mid-autumn Festival': '中秋节', 'Weekend': '周末'}\r\n\r\n isNearHoliday_output = isNearHoliday(input_date)\r\n if isNearHoliday_output: # 判断是否处于节假日附近\r\n holiday_label = isNearHoliday_output[1]\r\n LR = isNearHoliday_output[2]\r\n print(\"!所查询的日期\", input_date, \"处于\" + dict_holiday[holiday_label] + \"的\" + dict_LR[LR] + \"一天\")\r\n dict_upD, dict_trend = getProbability_nearHolidaty(df, input_date, holiday_label, LR, Flag=True) # Flag=1表示打印被统计的日期\r\n print('||统计概率||')\r\n print(input_date, \"上涨的概率是\", round(dict_upD['Up'][0] * 100, 2), \"% ,历史平均上涨幅度为\", round(dict_upD['Up'][1], 2),\r\n \"% ;下跌的概率是\", round(dict_upD['Down'][0] * 100, 2), \"% ,历史平均下跌幅度为\", round(dict_upD['Down'][1], 2),\r\n \"% ;平走的概率是\", round(dict_upD['Smooth'][0] * 100, 2), \"% ,历史平均平走涨跌幅度为\", round(dict_upD['Smooth'][1], 4),\r\n \"%\")\r\n sorted_trend = sorted(dict_trend.items(), key=lambda item: item[1], reverse=True)\r\n strPrint = '形态上,该天'\r\n for composition in sorted_trend:\r\n strPrint = strPrint + dict_describeState[composition[0]] + '的概率是' + str(\r\n round(composition[1] * 100, 2)) + '%; '\r\n print(strPrint)\r\n\r\n\r\n else: # 输入的日期不是特殊的日期\r\n dict_upD, dict_trend = getProbability_normalDay(df, input_date, Flag=True) # Flag=1表示打印被统计的日期\r\n print('||统计概率||')\r\n print(input_date, \"上涨的概率是\", round(dict_upD['Up'][0] * 100, 2), \"% ,历史平均上涨幅度为\", round(dict_upD['Up'][1], 2),\r\n \"% ;下跌的概率是\", round(dict_upD['Down'][0] * 100, 2), \"% ,历史平均下跌幅度为\", round(dict_upD['Down'][1], 2),\r\n \"% ;平走的概率是\", round(dict_upD['Smooth'][0] * 100, 2), \"% ,历史平均平走涨跌幅度为\", round(dict_upD['Smooth'][1], 4),\r\n \"%\")\r\n sorted_trend = sorted(dict_trend.items(), key=lambda item: item[1], reverse=True)\r\n strPrint = '形态上,该天'\r\n for composition in sorted_trend:\r\n strPrint = strPrint + dict_describeState[composition[0]] + '的概率是' + str(\r\n round(composition[1] * 100, 2)) + '%; '\r\n print(strPrint)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nVerifying Phase: 将概率结果打印出来\r\n\"\"\"\r\ndef verify_fx(df, VOtemp_date): # 取2021年作为验证\r\n correct_trend, wrong_trend, correct_ud, wrong_ud = 0, 0, 0, 0\r\n if check_day.is_tradeDay(VOtemp_date)[0]:\r\n isNearHoliday_output = isNearHoliday(VOtemp_date)\r\n if isNearHoliday_output: # 判断是否处于节假日附近\r\n holiday_label = isNearHoliday_output[1]\r\n LR = isNearHoliday_output[2]\r\n dict_upD, dict_trend = getProbability_nearHolidaty(df, VOtemp_date, holiday_label, LR , Flag=False) # Flag=0表示不打印被统计的日期\r\n else: # 输入的日期不是特殊的日期\r\n dict_upD, dict_trend = getProbability_normalDay(df, VOtemp_date, Flag=False) # Flag=0表示不打印被统计的日期\r\n\r\n for key1, value1 in dict_upD.items():\r\n if value1 == max(dict_upD['Up'], dict_upD['Down'], dict_upD['Smooth']):\r\n new_ud = key1\r\n\r\n for key2, value2 in dict_trend.items():\r\n if value2 == max(dict_trend['HH'], dict_trend['HS'], dict_trend['HL'], dict_trend['LH'], dict_trend['LS'],\r\n dict_trend['LL'], dict_trend['SH'], dict_trend['SL'], dict_trend['SS']):\r\n new_trend = key2\r\n\r\n if df.loc[df['date'] == VOtemp_date.strftime('%Y-%m-%d')]['label'].to_string()[-2:] == new_trend[-2:]:\r\n correct_trend = correct_trend + 1\r\n else:\r\n wrong_trend = wrong_trend + 1\r\n if df.loc[df['date'] == VOtemp_date.strftime('%Y-%m-%d')]['UpOrDown'].to_string()[-2:] == new_ud[-2:]:\r\n correct_ud = correct_ud + 1\r\n else:\r\n wrong_ud = wrong_ud + 1\r\n return [correct_ud, wrong_ud, correct_trend, wrong_trend]\r\n\r\n\r\ndef verify_output(df):\r\n print(\"【请稍等,正在回测...】\")\r\n VOend_date = datetime.date.today() + datetime.timedelta(days=-1)\r\n VOstart_date = VOend_date + relativedelta(years=-1)\r\n VOtemp_date = VOstart_date\r\n P = [0, 0, 0, 0] # [correct_trend,wrong_trend,correct_ud,wrong_ud]\r\n while not bool(VOtemp_date > VOend_date):\r\n temp = verify_fx(df, VOtemp_date)\r\n for j in range(len(temp)):\r\n P[j] = P[j] + temp[j]\r\n VOtemp_date = VOtemp_date + datetime.timedelta(days=1)\r\n print(\"||该标的回测一年的结果||\")\r\n print('回测', VOstart_date, '到', VOend_date, '涨跌正确率为', round(P[0] * 100 / (P[0] + P[1]), 2), \"%\")\r\n print('回测', VOstart_date, '到', VOend_date, '走势形态正确率为', round(P[2] * 100 / (P[2] + P[3]), 2), \"%\")\r\n\r\n\r\nif __name__ == '__main__':\r\n global targetCode\r\n start_date = '2005-01-01' # 默认从2005年开始统计\r\n targetCode = 'sh.600000'\r\n download_data()\r\n dict_holiday = {\"New Year's Day\": '元旦', 'Spring Festival': '春节', 'Tomb-sweeping Day': '清明节', 'Labour Day': '劳动节',\r\n 'Dragon Boat Festival': '端午节', 'National Day': '国庆节', 'Mid-autumn Festival': '中秋节', 'Weekend': '周末'}\r\n df = tag_df()\r\n for input_date in test_group:\r\n print(\"\")\r\n print(\"正在查询日期\", input_date, \"...\")\r\n search_date = input_date\r\n if check_day.is_tradeDay(search_date)[0]:\r\n upperlimit_date = datetime.datetime.strptime(df['date'][0], '%Y-%m-%d').date()\r\n output_fx(df, input_date)\r\n verify_output(df)\r\n else:\r\n print(\"!你所查询的日期是\" + dict_holiday[check_day.is_tradeDay(search_date)[1]] + \",股市不开盘。\")\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
TalonCB/ReChorus
[ "e5ba2bb853f1a554954be747c3a5fb11c96c1c95" ]
[ "src/models/general/BPRMF.py" ]
[ "# -*- coding: UTF-8 -*-\n# @Author : Chenyang Wang\n# @Email : [email protected]\n\n\"\"\" BPRMF\nReference:\n \"Bayesian personalized ranking from implicit feedback\"\n Rendle et al., UAI'2009.\nCMD example:\n python main.py --model_name BPRMF --emb_size 64 --lr 1e-3 --l2 1e-6 --dataset 'Grocery_and_Gourmet_Food'\n\"\"\"\n\nimport torch.nn as nn\n\nfrom models.BaseModel import GeneralModel\n\n\nclass BPRMF(GeneralModel):\n extra_log_args = ['emb_size']\n\n @staticmethod\n def parse_model_args(parser):\n parser.add_argument('--emb_size', type=int, default=64,\n help='Size of embedding vectors.')\n return GeneralModel.parse_model_args(parser)\n\n def __init__(self, args, corpus):\n self.emb_size = args.emb_size\n super().__init__(args, corpus)\n\n def _define_params(self):\n self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)\n self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)\n\n def forward(self, feed_dict):\n self.check_list = []\n u_ids = feed_dict['user_id'] # [batch_size]\n i_ids = feed_dict['item_id'] # [batch_size, -1]\n\n cf_u_vectors = self.u_embeddings(u_ids)\n cf_i_vectors = self.i_embeddings(i_ids)\n\n prediction = (cf_u_vectors[:, None, :] * cf_i_vectors).sum(dim=-1) # [batch_size, -1]\n return {'prediction': prediction.view(feed_dict['batch_size'], -1)}\n" ]
[ [ "torch.nn.Embedding" ] ]
david-zwicker/py-utils
[ "dae98cb3125f8160b59a1721e01640f344c9dc64" ]
[ "utils/data_structures/cache.py" ]
[ "'''\nCreated on Sep 11, 2014\n\n@author: David Zwicker <[email protected]>\n\nThis module contains functions that can be used to manage cache structures\n'''\n\nfrom __future__ import division\n\nimport collections\nfrom hashlib import sha1\nimport functools\nimport logging\nimport numbers\nimport os\nimport sys\n\ntry:\n from collections.abc import MutableMapping\nexcept ImportError: # python 2 fallback\n from collections import MutableMapping\n\nimport six\nimport numpy as np\n\n\n\ndef _hash_iter(it):\n \"\"\" get hash of an iterable but turning it into a tuple first \"\"\"\n return hash(tuple(it))\n\n\n\ndef hash_mutable(obj):\n \"\"\" return hash also for (nested) mutable objects. This function might be a\n bit slow, since it iterates over all containers and hashes objects\n recursively. \"\"\"\n # deal with some special classes\n if isinstance(obj, (list, tuple)):\n return _hash_iter(hash_mutable(v) for v in obj)\n\n if isinstance(obj, (set, frozenset)):\n return hash(frozenset(hash_mutable(v) for v in obj))\n\n if isinstance(obj, (dict, MutableMapping, collections.OrderedDict,\n collections.defaultdict, collections.Counter)):\n data = [(k, hash_mutable(v)) for k, v in sorted(six.iteritems(obj))]\n return _hash_iter(data)\n\n if isinstance(obj, np.ndarray):\n return hash(obj.tobytes())\n\n try:\n # try using the internal hash function\n return hash(obj)\n except TypeError:\n try:\n # try hashing the data buffer\n return hash(sha1(obj))\n except (ValueError, TypeError):\n # otherwise, hash the internal dict\n return hash_mutable(obj.__dict__)\n\n\n\ndef hash_readable(obj):\n \"\"\" return human readable hash also for (nested) mutable objects. This\n function returns a json-like representation of the object. The function\n might be a bit slow, since it iterates over all containers and hashes\n objects recursively. Note that this hash function tries to return the same\n value for equivalent objects, but it does not ensure that the objects can\n be reconstructed from this data. \"\"\"\n if isinstance(obj, numbers.Number):\n return str(obj)\n\n if isinstance(obj, (six.string_types, six.text_type)):\n return '\"' + str(obj).replace('\\\\', '\\\\\\\\').replace('\"', '\\\"') + '\"'\n\n if isinstance(obj, (list, tuple)):\n return '[' + ', '.join(hash_readable(v) for v in obj) + ']'\n\n if isinstance(obj, (set, frozenset)):\n return '{' + ', '.join(hash_readable(v) for v in sorted(obj)) + '}'\n\n if isinstance(obj, (dict, MutableMapping, collections.OrderedDict,\n collections.defaultdict, collections.Counter)):\n return '{' + ', '.join(hash_readable(k) + ': ' + hash_readable(v)\n for k, v in sorted(six.iteritems(obj))) + '}'\n\n if isinstance(obj, np.ndarray):\n return repr(obj)\n\n # otherwise, assume it's a generic object\n try:\n if hasattr(obj, '__getstate__'):\n data = obj.__getstate__()\n else:\n data = obj.__dict__\n\n except AttributeError:\n # strange object without a dictionary attached to it\n return repr(obj)\n\n else:\n # turn arguments into something readable\n args = ', '.join(str(k) + '=' + hash_readable(v)\n for k, v in sorted(six.iteritems(data))\n if not k.startswith('_'))\n\n return '{name}({args})'.format(name=obj.__class__.__name__, args=args)\n\n\n\ndef make_serializer(method):\n \"\"\" returns a function that serialize data with the given method. Note that\n some of the methods destroy information and cannot be reverted. \"\"\"\n if callable(method):\n return method\n\n if method is None:\n return lambda s: s\n\n if method == 'hash':\n return hash\n\n if method == 'hash_mutable':\n return hash_mutable\n\n if method == 'hash_readable':\n return hash_readable\n\n if method == 'json':\n import json\n return lambda s: json.dumps(s, sort_keys=True).encode('utf-8')\n\n if method == 'pickle':\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n return lambda s: pickle.dumps(s, protocol=pickle.HIGHEST_PROTOCOL)\n\n if method == 'yaml':\n import yaml\n return lambda s: yaml.dump(s).encode('utf-8')\n\n raise ValueError('Unknown serialization method `%s`' % method)\n\n\n\ndef make_unserializer(method):\n \"\"\" returns a function that unserialize data with the given method \"\"\"\n if callable(method):\n return method\n\n if method is None:\n return lambda s: s\n\n if method == 'json':\n import json\n return lambda s: json.loads(s.decode('utf-8'))\n\n if method == 'pickle':\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n\n if sys.version_info[0] > 2:\n return lambda s: pickle.loads(s)\n else:\n # python 2 sometimes needs an explicit conversion to string\n return lambda s: pickle.loads(str(s))\n\n if method == 'yaml':\n import yaml\n return yaml.full_load\n\n if method == 'yaml_unsafe':\n import yaml\n return yaml.unsafe_load\n\n raise ValueError('Unknown serialization method `%s`' % method)\n\n\n\nclass DictFiniteCapacity(collections.OrderedDict):\n \"\"\" cache with a limited number of items \"\"\"\n\n default_capacity = 100\n\n def __init__(self, *args, **kwargs):\n self.capacity = kwargs.pop('capacity', self.default_capacity)\n super(DictFiniteCapacity, self).__init__(*args, **kwargs)\n\n\n def check_length(self):\n \"\"\" ensures that the dictionary does not grow beyond its capacity \"\"\"\n while len(self) > self.capacity:\n self.popitem(last=False)\n\n\n def __setitem__(self, key, value):\n super(DictFiniteCapacity, self).__setitem__(key, value)\n self.check_length()\n\n\n def update(self, values):\n super(DictFiniteCapacity, self).update(values)\n self.check_length()\n\n\n\nclass PersistentDict(MutableMapping):\n \"\"\" a key value database which is stored on the disk\n keys and values must be strings.\n \"\"\"\n\n def __init__(self, filename):\n # open the sqlite table\n self.filename = filename\n self.open()\n\n\n def open(self):\n \"\"\" opens the database assuming that it is not open \"\"\"\n # lazy import\n import sqlite3\n try:\n self._con = sqlite3.connect(self.filename)\n except sqlite3.OperationalError as e:\n msg = str(e) + ' (path: ' + self.filename + ')'\n six.reraise(type(e), type(e)(msg), sys.exc_info()[2])\n\n self._con.text_factory = bytes # make sure that we mainly handle bytes\n\n # make sure that the cache table exists\n with self._con:\n self._con.execute(\"CREATE table IF NOT EXISTS cache (\"\n \"key BLOB PRIMARY KEY, \"\n \"value BLOB, \"\n \"time TIMESTAMP DEFAULT CURRENT_TIMESTAMP\"\n \");\")\n\n\n def clear(self):\n \"\"\" closes and opens the database \"\"\"\n self._con.close()\n os.remove(self.filename)\n self.open()\n\n\n def __del__(self):\n if hasattr(self, '_con') and self._con:\n self._con.close()\n\n\n def __len__(self):\n return self._con.execute(\"SELECT Count(*) FROM cache\").fetchone()[0]\n\n\n def __getitem__(self, key):\n if not isinstance(key, six.binary_type):\n raise TypeError('Key must be bytes, but was %r' % key)\n res = self._con.execute(\"SELECT value FROM cache WHERE key=? \"\n \"LIMIT 1\", (key,)).fetchone()\n if res:\n return res[0]\n else:\n raise KeyError(key)\n\n\n def __setitem__(self, key, value):\n if not (isinstance(key, six.binary_type) and\n isinstance(value, six.binary_type)):\n raise TypeError('Keys and values must be bytes')\n with self._con:\n self._con.execute(\"INSERT OR REPLACE INTO cache (key, value) \"\n \"VALUES (?, ?)\", (key, value))\n\n\n def __delitem__(self, key):\n if not isinstance(key, six.binary_type):\n raise TypeError('Key must be bytes, but was %r' % key)\n with self._con:\n self._con.execute(\"DELETE FROM cache WHERE key=?\", (key,))\n\n\n def __contains__(self, key):\n if not isinstance(key, six.binary_type):\n raise TypeError('Key must be bytes, but was %r' % key)\n return self._con.execute(\"SELECT EXISTS(SELECT 1 FROM cache \"\n \"WHERE key=? LIMIT 1);\", (key,)).fetchone()[0]\n\n\n def __iter__(self):\n for row in self._con.execute(\"SELECT key FROM cache\").fetchall():\n yield row[0]\n\n\n\nclass SerializedDict(MutableMapping):\n \"\"\" a key value database which is stored on the disk\n This class provides hooks for converting arbitrary keys and values to\n strings, which are then stored in the database.\n \"\"\"\n\n def __init__(self, key_serialization='pickle',\n value_serialization='pickle', storage_dict=None):\n \"\"\" provides a dictionary whose keys and values are serialized\n transparently. The serialization methods are determined by\n `key_serialization` and `value_serialization`.\n\n `storage_dict` can be used to chose a different dictionary for the\n underlying storage mechanism, e.g., storage_dict = PersistentDict()\n \"\"\"\n # initialize the dictionary that actually stores the data\n if storage_dict is None:\n self._data = {}\n else:\n self._data = storage_dict\n\n # define the methods that serialize and unserialize the data\n self.serialize_key = make_serializer(key_serialization)\n self.unserialize_key = make_unserializer(key_serialization)\n self.serialize_value = make_serializer(value_serialization)\n self.unserialize_value = make_unserializer(value_serialization)\n\n\n def __len__(self):\n return len(self._data)\n\n\n def __getitem__(self, key):\n # convert key to its string representation\n key_s = self.serialize_key(key)\n # fetch the value\n value = self._data[key_s]\n # convert the value to its object representation\n return self.unserialize_value(value)\n\n\n def __setitem__(self, key, value):\n # convert key and value to their string representations\n key_s = self.serialize_key(key)\n value_s = self.serialize_value(value)\n # add the item to the dictionary\n self._data[key_s] = value_s\n\n\n def __delitem__(self, key):\n # convert key to its string representation\n key_s = self.serialize_key(key)\n # delete the item from the dictionary\n del self._data[key_s]\n\n\n def __contains__(self, key):\n # convert key to its string representation\n key_s = self.serialize_key(key)\n # check whether this items exists in the dictionary\n return key_s in self._data\n\n\n def __iter__(self):\n # iterate dictionary\n for key_s in self._data.__iter__():\n # convert the value to its object representation\n yield self.unserialize_key(key_s)\n\n\n\nclass _class_cache(object):\n \"\"\" class handling the caching of results of methods and properties \"\"\"\n\n def __init__(self, factory=None, extra_args=None, ignore_args=None,\n hash_function='hash_readable', doc=None, name=None):\n \"\"\" decorator that caches calls in a dictionary attached to the\n instances. This can be used with most classes\n\n class Foo(object):\n\n @cached_property()\n def property(self):\n return \"Cached property\"\n\n @cached_method()\n def method(self):\n return \"Cached method\"\n\n\n foo = Foo()\n foo.property\n foo.method()\n\n # The first call to a cached method creates the attribute\n # `foo._cache_methods`, which is a dictionary containing the\n # cache for each method.\n\n The cache can be cleared by setting foo._cache_methods = {} if the cache\n factory is a simple dict, i.e, if `factory` == None.\n Alternatively, each cached method has a `clear_cache_of_obj` method,\n which clears the cache of this particular method. In the example above\n we could thus call `foo.bar.clear_cache_of_obj(foo)` to clear the cache.\n Note that the object instance has to be passed as a parameter, since the\n method `bar` is defined on the class, not the instance, i.e., we could\n also call Foo.bar.clear_cache_of_obj(foo). To clear the cache from\n within a method, one can thus call\n self.method_name.clear_cache_of_obj(self)\n where `method_name` is the name of the method whose cache is cleared\n\n For convenience there is also the class decorator\n `add_clear_cache_method` that adds a method `clear_cache` that can be\n used to clear the caches of all methods of the class and its subclasses\n\n Additionally, `extra_args` can specify a list of properties that are\n added to the cache key. They are then treated as if they are supplied as\n arguments to the method. This is important to include when the result of\n a method depends not only on method arguments but also on instance\n properties. Conversely, the keyword arguments listed in `ignore_args`\n are ignored in the cache key.\n\n This class also plays together with user-supplied storage backends by\n defining a cache factory. The cache factory should return a dict-like\n object that handles the cache for the given method.\n\n class Foo(object):\n\n def get_cache(self, name):\n # `name` is the name of the method to cache\n return DictFiniteCapacity()\n\n @cached_method(factory='get_cache')\n def foo(self):\n return \"Cached\"\n \"\"\"\n self.extra_args = extra_args\n self.hash_function = hash_function\n self.name = name\n\n # setup the ignored arguments\n if ignore_args is not None:\n if isinstance(ignore_args, six.string_types):\n ignore_args = [ignore_args]\n self.ignore_args = set(ignore_args)\n else:\n self.ignore_args = None\n\n # check whether the decorator has been applied correctly\n if callable(factory):\n class_name = self.__class__.__name__\n raise ValueError('Missing function call. Call this decorator as '\n '@{0}() instead of @{0}'.format(class_name))\n\n else:\n self.factory = factory\n\n\n def _get_clear_cache_method(self):\n \"\"\" return a method that can be attached to classes to clear the cache\n of the wrapped method \"\"\"\n\n def clear_cache(obj):\n \"\"\" clears the cache associated with this method \"\"\"\n try:\n # try getting an initialized cache\n cache = obj._cache_methods[self.name]\n\n except (AttributeError, KeyError):\n # the cache was not initialized\n if self.factory is None:\n # the cache would be a dictionary, but it is not yet\n # initialized => we don't need to clear anything\n return\n # initialize the cache, since it might open a persistent\n # database, which needs to be cleared\n cache = getattr(obj, self.factory)(self.name)\n\n # clear the cache\n cache.clear()\n\n return clear_cache\n\n\n def _get_wrapped_function(self, func):\n \"\"\" return the wrapped method, which implements the cache \"\"\"\n\n if self.name is None:\n self.name = func.__name__\n\n # create the function to serialize the keys\n hash_key = make_serializer(self.hash_function)\n\n @functools.wraps(func)\n def wrapper(obj, *args, **kwargs):\n # try accessing the cache\n try:\n cache = obj._cache_methods[self.name]\n except (AttributeError, KeyError) as err:\n # the cache was not initialized\n wrapper._logger.debug('Initialize the cache `%s` using hash '\n '`%s`', self.name, self.hash_function)\n if isinstance(err, AttributeError):\n # the cache dictionary is not even present\n obj._cache_methods = {}\n # create cache using the right factory method\n if self.factory is None:\n cache = {}\n else:\n cache = getattr(obj, self.factory)(self.name)\n # store the cache in the dictionary\n obj._cache_methods[self.name] = cache\n\n # determine the key that encodes the current arguments\n if self.ignore_args:\n kwargs_key = {k: v for k, v in six.iteritems(kwargs)\n if k not in self.ignore_args}\n func_args = [args, kwargs_key]\n else:\n func_args = [args, kwargs]\n\n if self.extra_args:\n for extra_arg in self.extra_args:\n func_args.append(getattr(obj, extra_arg))\n\n cache_key = hash_key(tuple(func_args))\n\n try:\n # try loading the results from the cache\n result = cache[cache_key]\n except KeyError:\n # if this failed, compute and store the results\n wrapper._logger.debug('Cache missed for `%s`. Compute result '\n 'for method `%s`',\n tuple(func_args), self.name)\n result = func(obj, *args, **kwargs)\n cache[cache_key] = result\n return result\n\n # initialize the logger\n wrapper._logger = logging.getLogger(__name__)\n\n return wrapper\n\n\n\nclass cached_property(_class_cache):\n \"\"\"Decorator to use a function as a cached property.\n\n The function is only called the first time and each successive call returns\n the cached result of the first call.\n\n class Foo(object):\n\n @cached_property\n def foo(self):\n return \"Cached\"\n\n The data is stored in a dictionary named `_cache_methods` attached to the\n instance of each object. The cache can thus be cleared by setting\n self._cache_methods = {}. The cache of specific property can be cleared\n using\n self._cache_methods[property_name] = {}\n where `property_name` is the name of the property\n\n Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.\n \"\"\"\n\n def __call__(self, method):\n \"\"\" apply the cache decorator to the property \"\"\"\n # save name, e.g., to be able to delete cache later\n self._cache_name = self.name\n self.clear_cache_of_obj = self._get_clear_cache_method()\n self.func = self._get_wrapped_function(method)\n\n self.__doc__ = self.func.__doc__\n self.__name__ = self.func.__name__\n self.__module__ = self.func.__module__\n return self\n\n\n def __get__(self, obj, owner):\n \"\"\" call the method to obtain the result for this property \"\"\"\n# if obj is None:\n# return self\n\n return self.func(obj)\n\n\n\nclass cached_method(_class_cache):\n \"\"\" class handling the caching of results of methods \"\"\"\n\n def __call__(self, method):\n \"\"\" apply the cache decorator to the method \"\"\"\n\n wrapper = self._get_wrapped_function(method)\n\n # save name, e.g., to be able to delete cache later\n wrapper._cache_name = self.name\n wrapper.clear_cache_of_obj = self._get_clear_cache_method()\n\n return wrapper\n\n\n\ndef add_clear_cache_method(cls):\n \"\"\" a class decorator that adds a clear_cache method to the class \"\"\"\n # gather the methods that need to be cleared\n methods_with_cache = []\n for method_name in dir(cls):\n if method_name.startswith('__'):\n continue\n\n method = getattr(cls, method_name)\n if hasattr(method, 'clear_cache_of_obj'):\n methods_with_cache.append(method)\n\n # add the actual method for clearing the cache\n def clear_cache(self):\n \"\"\" clears the cache of all methods \"\"\"\n for method in methods_with_cache:\n method.clear_cache_of_obj(self)\n cls.clear_cache = clear_cache\n\n return cls\n\n\n\nclass CachedArray(object):\n \"\"\"\n class that provides an array of given shape when called. If the shape is\n consistent with the last call, a stored copy will be returned. Otherwise a\n new array will be constructed.\n \"\"\"\n\n def __init__(self, value=None):\n self._data = np.empty(0)\n self.value = value\n\n def __call__(self, shape):\n if self._data.shape == shape:\n if self.value is not None:\n self._data.fill(self.value)\n else:\n if self.value is None:\n self._data = np.empty(shape)\n elif self.value == 0:\n self._data = np.zeros(shape)\n else:\n self._data = np.full(shape, self.value, np.double)\n return self._data\n" ]
[ [ "numpy.full", "numpy.empty", "numpy.zeros" ] ]
MiloszGrabski/deepchem
[ "1af203724db0ecebedf87967d1009fd8973da2d6" ]
[ "deepchem/models/tests/test_molgan_layers.py" ]
[ "import unittest\n\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras import activations\nfrom deepchem.models.layers import MolGANConvolutionLayer, MolGANMultiConvolutionLayer, MolGANAggregationLayer, MolGANEncoderLayer\n\n\nclass test_molgan_layers(unittest.TestCase):\n \"\"\"\n Unit testing for MolGAN basic layers\n \"\"\"\n\n def test_graph_convolution_layer(self):\n vertices = 9\n nodes = 5\n edges = 5\n units = 128\n\n layer = MolGANConvolutionLayer(units=units, edges=edges)\n adjacency_tensor = Input(shape=(vertices, vertices, edges))\n node_tensor = Input(shape=(vertices, nodes))\n output = layer([adjacency_tensor, node_tensor])\n model = keras.Model(\n inputs=[adjacency_tensor, node_tensor], outputs=[output])\n\n assert model.output_shape == [((None, vertices, vertices, edges),\n (None, vertices, nodes), (None, vertices,\n units))]\n assert layer.units == units\n assert layer.activation == activations.tanh\n assert layer.edges == 5\n assert layer.dropout_rate == 0.0\n\n def test_aggregation_layer(self):\n vertices = 9\n units = 128\n\n layer = MolGANAggregationLayer(units=units)\n hidden_tensor = Input(shape=(vertices, units))\n output = layer(hidden_tensor)\n model = keras.Model(inputs=[hidden_tensor], outputs=[output])\n\n assert model.output_shape == (None, units)\n assert layer.units == units\n assert layer.activation == activations.tanh\n assert layer.dropout_rate == 0.0\n\n def test_multigraph_convolution_layer(self):\n vertices = 9\n nodes = 5\n edges = 5\n first_convolution_unit = 128\n second_convolution_unit = 64\n units = [first_convolution_unit, second_convolution_unit]\n\n layer = MolGANMultiConvolutionLayer(units=units, edges=edges)\n adjacency_tensor = Input(shape=(vertices, vertices, edges))\n node_tensor = Input(shape=(vertices, nodes))\n hidden_tensor = layer([adjacency_tensor, node_tensor])\n model = keras.Model(\n inputs=[adjacency_tensor, node_tensor], outputs=[hidden_tensor])\n\n assert model.output_shape == (None, vertices, second_convolution_unit)\n assert layer.units == units\n assert layer.activation == activations.tanh\n assert layer.edges == 5\n assert layer.dropout_rate == 0.0\n\n def test_graph_encoder_layer(self):\n vertices = 9\n nodes = 5\n edges = 5\n first_convolution_unit = 128\n second_convolution_unit = 64\n aggregation_unit = 128\n units = [(first_convolution_unit, second_convolution_unit),\n aggregation_unit]\n\n layer = MolGANEncoderLayer(units=units, edges=edges)\n adjacency_tensor = Input(shape=(vertices, vertices, edges))\n node_tensor = Input(shape=(vertices, nodes))\n output = layer([adjacency_tensor, node_tensor])\n model = keras.Model(\n inputs=[adjacency_tensor, node_tensor], outputs=[output])\n\n assert model.output_shape == (None, aggregation_unit)\n assert layer.graph_convolution_units == (first_convolution_unit,\n second_convolution_unit)\n assert layer.auxiliary_units == aggregation_unit\n assert layer.activation == activations.tanh\n assert layer.edges == 5\n assert layer.dropout_rate == 0.0\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ] ]
abdisa1Fekadu/basic-of-self-driving-car
[ "b14e798d8523d04ada37a6328bcc3b41b48c6f8f" ]
[ "Autopilot_V2/AutopilotApp_V2.py" ]
[ "import numpy as np\nimport cv2\nfrom keras.models import load_model\n\nmodel = load_model('Autopilot.h5')\n\ndef keras_predict(model, image):\n processed = keras_process_image(image)\n steering_angle = float(model.predict(processed, batch_size=1))\n steering_angle = steering_angle * 60\n return steering_angle\n\n\ndef keras_process_image(img):\n image_x = 100\n image_y = 100\n img = cv2.resize(img, (image_x, image_y))\n img = np.array(img, dtype=np.float32)\n img = np.reshape(img, (-1, image_x, image_y, 1))\n return img\n\n\nsteer = cv2.imread('steering_wheel_image.jpg', 0)\nrows, cols = steer.shape\nsmoothed_angle = 0\n\ncap = cv2.VideoCapture('run.mp4')\nwhile (cap.isOpened()):\n ret, frame = cap.read()\n gray = cv2.resize((cv2.cvtColor(frame, cv2.COLOR_RGB2HSV))[:, :, 1], (100, 100))\n steering_angle = keras_predict(model, gray)\n print(steering_angle)\n cv2.imshow('frame', cv2.resize(frame, (600, 400), interpolation=cv2.INTER_AREA))\n smoothed_angle += 0.2 * pow(abs((steering_angle - smoothed_angle)), 2.0 / 3.0) * (\n steering_angle - smoothed_angle) / abs(\n steering_angle - smoothed_angle)\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -smoothed_angle, 1)\n dst = cv2.warpAffine(steer, M, (cols, rows))\n cv2.imshow(\"steering wheel\", dst)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array", "numpy.reshape" ] ]
louis-she/ignite
[ "d05a8939139e056e5c5daf842c81af0ab5b0caaf" ]
[ "examples/gan/dcgan.py" ]
[ "import argparse\nimport os\nimport random\nimport warnings\nfrom pathlib import Path\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\n\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint, Timer\nfrom ignite.metrics import RunningAverage\n\ntry:\n import torchvision.datasets as dset\n import torchvision.transforms as transforms\n import torchvision.utils as vutils\n\nexcept ImportError:\n raise ImportError(\n \"Please install torchvision to run this example, for example \"\n \"via conda by running 'conda install -c pytorch torchvision'. \"\n )\n\n\nPRINT_FREQ = 100\nFAKE_IMG_FNAME = \"fake_sample_epoch_{:04d}.png\"\nREAL_IMG_FNAME = \"real_sample_epoch_{:04d}.png\"\nLOGS_FNAME = \"logs.tsv\"\nPLOT_FNAME = \"plot.svg\"\nSAMPLES_FNAME = \"samples.svg\"\nCKPT_PREFIX = \"networks\"\n\n\nclass Net(nn.Module):\n \"\"\"A base class for both generator and the discriminator.\n Provides a common weight initialization scheme.\n\n \"\"\"\n\n def weights_init(self):\n for m in self.modules():\n classname = m.__class__.__name__\n\n if \"Conv\" in classname:\n m.weight.data.normal_(0.0, 0.02)\n\n elif \"BatchNorm\" in classname:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n def forward(self, x):\n return x\n\n\nclass Generator(Net):\n \"\"\"Generator network.\n\n Args:\n nf (int): Number of filters in the second-to-last deconv layer\n \"\"\"\n\n def __init__(self, z_dim, nf, nc):\n super(Generator, self).__init__()\n\n self.net = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(in_channels=z_dim, out_channels=nf * 8, kernel_size=4, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(nf * 8),\n nn.ReLU(inplace=True),\n # state size. (nf*8) x 4 x 4\n nn.ConvTranspose2d(in_channels=nf * 8, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf * 4),\n nn.ReLU(inplace=True),\n # state size. (nf*4) x 8 x 8\n nn.ConvTranspose2d(in_channels=nf * 4, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf * 2),\n nn.ReLU(inplace=True),\n # state size. (nf*2) x 16 x 16\n nn.ConvTranspose2d(in_channels=nf * 2, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf),\n nn.ReLU(inplace=True),\n # state size. (nf) x 32 x 32\n nn.ConvTranspose2d(in_channels=nf, out_channels=nc, kernel_size=4, stride=2, padding=1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n self.weights_init()\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Discriminator(Net):\n \"\"\"Discriminator network.\n\n Args:\n nf (int): Number of filters in the first conv layer.\n \"\"\"\n\n def __init__(self, nc, nf):\n super(Discriminator, self).__init__()\n\n self.net = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(in_channels=nc, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nf) x 32 x 32\n nn.Conv2d(in_channels=nf, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nf*2) x 16 x 16\n nn.Conv2d(in_channels=nf * 2, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nf*4) x 8 x 8\n nn.Conv2d(in_channels=nf * 4, out_channels=nf * 8, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(nf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nf*8) x 4 x 4\n nn.Conv2d(in_channels=nf * 8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False),\n nn.Sigmoid(),\n )\n\n self.weights_init()\n\n def forward(self, x):\n output = self.net(x)\n return output.view(-1, 1).squeeze(1)\n\n\ndef check_manual_seed(seed):\n \"\"\"If manual seed is not specified, choose a random one and communicate it to the user.\"\"\"\n\n seed = seed or random.randint(1, 10000)\n random.seed(seed)\n torch.manual_seed(seed)\n\n print(f\"Using manual seed: {seed}\")\n\n\ndef check_dataset(dataset, dataroot):\n \"\"\"\n\n Args:\n dataset (str): Name of the dataset to use. See CLI help for details\n dataroot (str): root directory where the dataset will be stored.\n\n Returns:\n dataset (data.Dataset): torchvision Dataset object\n\n \"\"\"\n resize = transforms.Resize(64)\n crop = transforms.CenterCrop(64)\n to_tensor = transforms.ToTensor()\n normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n\n if dataset in {\"imagenet\", \"folder\", \"lfw\"}:\n dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([resize, crop, to_tensor, normalize]))\n nc = 3\n\n elif dataset == \"lsun\":\n dataset = dset.LSUN(\n root=dataroot, classes=[\"bedroom_train\"], transform=transforms.Compose([resize, crop, to_tensor, normalize])\n )\n nc = 3\n\n elif dataset == \"cifar10\":\n dataset = dset.CIFAR10(\n root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize])\n )\n nc = 3\n\n elif dataset == \"mnist\":\n dataset = dset.MNIST(root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize]))\n nc = 1\n\n elif dataset == \"fake\":\n dataset = dset.FakeData(size=256, image_size=(3, 64, 64), transform=to_tensor)\n nc = 3\n\n else:\n raise RuntimeError(f\"Invalid dataset name: {dataset}\")\n\n return dataset, nc\n\n\ndef main(\n dataset,\n dataroot,\n z_dim,\n g_filters,\n d_filters,\n batch_size,\n epochs,\n learning_rate,\n beta_1,\n saved_G,\n saved_D,\n seed,\n n_workers,\n device,\n alpha,\n output_dir,\n):\n\n # seed\n check_manual_seed(seed)\n\n # data\n dataset, num_channels = check_dataset(dataset, dataroot)\n loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=n_workers, drop_last=True)\n\n # netowrks\n netG = Generator(z_dim, g_filters, num_channels).to(device)\n netD = Discriminator(num_channels, d_filters).to(device)\n\n # criterion\n bce = nn.BCELoss()\n\n # optimizers\n optimizerG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta_1, 0.999))\n optimizerD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta_1, 0.999))\n\n # load pre-trained models\n if saved_G:\n netG.load_state_dict(torch.load(saved_G))\n\n if saved_D:\n netD.load_state_dict(torch.load(saved_D))\n\n # misc\n real_labels = torch.ones(batch_size, device=device)\n fake_labels = torch.zeros(batch_size, device=device)\n fixed_noise = torch.randn(batch_size, z_dim, 1, 1, device=device)\n\n def get_noise():\n return torch.randn(batch_size, z_dim, 1, 1, device=device)\n\n # The main function, processing a batch of examples\n def step(engine, batch):\n\n # unpack the batch. It comes from a dataset, so we have <images, labels> pairs. Discard labels.\n real, _ = batch\n real = real.to(device)\n\n # -----------------------------------------------------------\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n netD.zero_grad()\n\n # train with real\n output = netD(real)\n errD_real = bce(output, real_labels)\n D_x = output.mean().item()\n\n errD_real.backward()\n\n # get fake image from generator\n noise = get_noise()\n fake = netG(noise)\n\n # train with fake\n output = netD(fake.detach())\n errD_fake = bce(output, fake_labels)\n D_G_z1 = output.mean().item()\n\n errD_fake.backward()\n\n # gradient update\n errD = errD_real + errD_fake\n optimizerD.step()\n\n # -----------------------------------------------------------\n # (2) Update G network: maximize log(D(G(z)))\n netG.zero_grad()\n\n # Update generator. We want to make a step that will make it more likely that discriminator outputs \"real\"\n output = netD(fake)\n errG = bce(output, real_labels)\n D_G_z2 = output.mean().item()\n\n errG.backward()\n\n # gradient update\n optimizerG.step()\n\n return {\"errD\": errD.item(), \"errG\": errG.item(), \"D_x\": D_x, \"D_G_z1\": D_G_z1, \"D_G_z2\": D_G_z2}\n\n # ignite objects\n trainer = Engine(step)\n checkpoint_handler = ModelCheckpoint(output_dir, CKPT_PREFIX, n_saved=10, require_empty=False)\n timer = Timer(average=True)\n\n # attach running average metrics\n monitoring_metrics = [\"errD\", \"errG\", \"D_x\", \"D_G_z1\", \"D_G_z2\"]\n RunningAverage(alpha=alpha, output_transform=lambda x: x[\"errD\"]).attach(trainer, \"errD\")\n RunningAverage(alpha=alpha, output_transform=lambda x: x[\"errG\"]).attach(trainer, \"errG\")\n RunningAverage(alpha=alpha, output_transform=lambda x: x[\"D_x\"]).attach(trainer, \"D_x\")\n RunningAverage(alpha=alpha, output_transform=lambda x: x[\"D_G_z1\"]).attach(trainer, \"D_G_z1\")\n RunningAverage(alpha=alpha, output_transform=lambda x: x[\"D_G_z2\"]).attach(trainer, \"D_G_z2\")\n\n # attach progress bar\n pbar = ProgressBar()\n pbar.attach(trainer, metric_names=monitoring_metrics)\n\n @trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))\n def print_logs(engine):\n fname = output_dir / LOGS_FNAME\n columns = [\"iteration\"] + list(engine.state.metrics.keys())\n values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]\n\n with open(fname, \"a\") as f:\n if f.tell() == 0:\n print(\"\\t\".join(columns), file=f)\n print(\"\\t\".join(values), file=f)\n message = f\"[{engine.state.epoch}/{epochs}][{engine.state.iteration % len(loader)}/{len(loader)}]\"\n for name, value in zip(columns, values):\n message += f\" | {name}: {value}\"\n\n pbar.log_message(message)\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def save_fake_example(engine):\n fake = netG(fixed_noise)\n path = output_dir / FAKE_IMG_FNAME.format(engine.state.epoch)\n vutils.save_image(fake.detach(), path, normalize=True)\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def save_real_example(engine):\n img, y = engine.state.batch\n path = output_dir / REAL_IMG_FNAME.format(engine.state.epoch)\n vutils.save_image(img, path, normalize=True)\n\n # adding handlers using `trainer.add_event_handler` method API\n trainer.add_event_handler(\n event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={\"netG\": netG, \"netD\": netD}\n )\n\n # automatically adding handlers via a special `attach` method of `Timer` handler\n timer.attach(\n trainer,\n start=Events.EPOCH_STARTED,\n resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED,\n step=Events.ITERATION_COMPLETED,\n )\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n pbar.log_message(f\"Epoch {engine.state.epoch} done. Time per batch: {timer.value():.3f}[s]\")\n timer.reset()\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def create_plots(engine):\n try:\n import matplotlib as mpl\n\n mpl.use(\"agg\")\n\n import matplotlib.pyplot as plt\n import pandas as pd\n\n except ImportError:\n warnings.warn(\"Loss plots will not be generated -- pandas or matplotlib not found\")\n\n else:\n df = pd.read_csv(output_dir / LOGS_FNAME, delimiter=\"\\t\", index_col=\"iteration\")\n _ = df.plot(subplots=True, figsize=(20, 20))\n _ = plt.xlabel(\"Iteration number\")\n fig = plt.gcf()\n path = output_dir / PLOT_FNAME\n\n fig.savefig(path)\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EXCEPTION_RAISED)\n def handle_exception(engine, e):\n if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):\n engine.terminate()\n warnings.warn(\"KeyboardInterrupt caught. Exiting gracefully.\")\n\n create_plots(engine)\n checkpoint_handler(engine, {\"netG_exception\": netG, \"netD_exception\": netD})\n\n else:\n raise e\n\n # Setup is done. Now let's run the training\n trainer.run(loader, epochs)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--dataset\",\n required=True,\n choices={\"cifar10\", \"lsun\", \"imagenet\", \"folder\", \"lfw\", \"fake\", \"mnist\"},\n help=\"Type of the dataset to be used.\",\n )\n\n parser.add_argument(\"--dataroot\", required=True, help=\"path to dataset\")\n\n parser.add_argument(\"--workers\", type=int, default=2, help=\"number of data loading workers\")\n\n parser.add_argument(\"--batch-size\", type=int, default=64, help=\"input batch size\")\n\n parser.add_argument(\"--z-dim\", type=int, default=100, help=\"size of the latent z vector\")\n\n parser.add_argument(\n \"--g-filters\", type=int, default=64, help=\"Number of filters in the second-to-last generator deconv layer\"\n )\n\n parser.add_argument(\"--d-filters\", type=int, default=64, help=\"Number of filters in first discriminator conv layer\")\n\n parser.add_argument(\"--epochs\", type=int, default=25, help=\"number of epochs to train for\")\n\n parser.add_argument(\"--lr\", type=float, default=0.0002, help=\"learning rate\")\n\n parser.add_argument(\"--beta-1\", type=float, default=0.5, help=\"beta_1 for adam\")\n\n parser.add_argument(\"--no-cuda\", action=\"store_true\", help=\"disables cuda\")\n\n parser.add_argument(\"--saved-G\", default=\"\", help=\"path to pickled generator (to continue training)\")\n\n parser.add_argument(\"--saved-D\", default=\"\", help=\"path to pickled discriminator (to continue training)\")\n\n parser.add_argument(\"--output-dir\", default=\".\", help=\"directory to output images and model checkpoints\")\n\n parser.add_argument(\"--seed\", type=int, help=\"manual seed\")\n\n parser.add_argument(\"--alpha\", type=float, default=0.98, help=\"smoothing constant for exponential moving averages\")\n\n args = parser.parse_args()\n dev = \"cpu\" if (not torch.cuda.is_available() or args.no_cuda) else \"cuda:0\"\n\n args.output_dir = Path(args.output_dir)\n try:\n args.output_dir.mkdir(parents=True)\n except FileExistsError:\n if (not args.output_dir.is_dir()) or (len(os.listdir(args.output_dir)) > 0):\n raise FileExistsError(\"Please provide a path to a non-existing or empty directory.\")\n\n main(\n dataset=args.dataset,\n dataroot=args.dataroot,\n z_dim=args.z_dim,\n g_filters=args.g_filters,\n d_filters=args.d_filters,\n batch_size=args.batch_size,\n epochs=args.epochs,\n learning_rate=args.lr,\n beta_1=args.beta_1,\n saved_D=args.saved_D,\n saved_G=args.saved_G,\n seed=args.seed,\n device=dev,\n n_workers=args.workers,\n alpha=args.alpha,\n output_dir=args.output_dir,\n )\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.ones", "torch.cuda.is_available", "matplotlib.pyplot.gcf", "torch.load", "pandas.read_csv", "torch.nn.ConvTranspose2d", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.zeros", "matplotlib.use", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Sigmoid", "matplotlib.pyplot.xlabel", "torch.randn" ] ]
AndreiDavydov/Poisson_Denoiser
[ "a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22" ]
[ "wavelet_transform.py" ]
[ "import pywt\nimport numpy as np\n\n\nclass WaveletTransform():\n '''\n Class for using the particular wavelet transformation. After doing first transformation, it will save shapes of the image. \n So, to work with the images of different size you should make another instance of this class.\n '''\n def __init__(self, wt_filt='haar', wt_scale=1, size=256):\n super(WaveletTransform, self).__init__()\n\n self.wt_filt = wt_filt\n self.wt_scale = wt_scale\n \n\n self.size = size # specified just to be able to compute the CG. (must be 256)\n self.shapes = []\n self.approx_coefs = None # here the coefs deleted by mask will be saved\n\n \n def wt_shapes_acqusition(self, coefs):\n '''\n The function creates a list of shapes of coeff-arrays at each scale.\n '''\n for i, coef_block in enumerate(coefs):\n if i == 0: # LL block\n self.shapes.append(coef_block.shape)\n \n else: # triplet for wavelet coefs.\n self.shapes += [(coef_block[0].shape, coef_block[1].shape, coef_block[2].shape)] \n\n\n def W(self, img):\n '''\n Direct discrete 2-D Wavelet transformation.\n Returns the list of multiscale coefficients.\n '''\n mult_coefs = pywt.wavedec2(img, self.wt_filt, level=self.wt_scale)\n \n if len(self.shapes) == 0: \n self.wt_shapes_acqusition(mult_coefs)\n \n return mult_coefs\n \n\n def W_inv(self, coefs):\n '''\n Inverse Discrete Wavelet transformation.\n Input types can be: np.array (img-like) | list of coefs | vector.\n If input is img-like - the dimensions must be equal and divided by 2.\n '''\n \n mult_coefs = coefs.copy()\n\n if isinstance(mult_coefs, np.ndarray): # img-like or vector case\n mult_coefs = self.as_coefs(mult_coefs)\n \n reconstructed = pywt.waverec2(mult_coefs, self.wt_filt)\n return reconstructed \n\n \n def as_coefs(self, coefs):\n '''\n Tranform coefficients from img-like or vector-like input to the list of coefs.\n '''\n\n mult_coefs = coefs.copy()\n \n\n assert(isinstance(mult_coefs, np.ndarray))\n coefs = []\n\n if len(mult_coefs.shape) == 1: # vector-like case\n\n for i, block_shapes in enumerate(self.shapes):\n if i == 0:\n len_coefs = block_shapes[0] * block_shapes[1]\n LL_coefs = mult_coefs[:len_coefs].reshape(block_shapes[0], block_shapes[1])\n coefs.append(LL_coefs)\n\n mult_coefs = mult_coefs[len_coefs:]\n\n else:\n coefs_per_block = []\n for block_shape in block_shapes:\n len_coefs = block_shape[0] * block_shape[1]\n wt_coefs = mult_coefs[:len_coefs].reshape(block_shape[0], block_shape[1])\n coefs_per_block.append(wt_coefs)\n\n mult_coefs = mult_coefs[len_coefs:]\n\n coefs.append(tuple(coefs_per_block))\n\n return coefs\n\n\n else: # img-like case\n\n reversed_shapes = self.shapes[::-1]\n \n for block_shapes in reversed_shapes:\n block_shape = block_shapes[0]\n\n if isinstance(block_shape, tuple):\n block_shape = block_shape[0] # each wavelet block has same shape.\n\n HVD = (mult_coefs[ :block_shape, block_shape: ], \n mult_coefs[ block_shape:, :block_shape ], \n mult_coefs[ block_shape:, block_shape: ])\n\n coefs.append(HVD)\n mult_coefs = mult_coefs[:block_shape, :block_shape]\n\n else:\n coefs.append(mult_coefs)\n\n coefs = coefs[::-1]\n\n return coefs\n\n\n def as_vector(self, coefs):\n '''\n The input is either img-like object or a list of coefs.\n '''\n\n mult_coefs = coefs.copy()\n\n if isinstance(mult_coefs, np.ndarray): # must be an img-like case\n mult_coefs = self.as_coefs(mult_coefs)\n\n vector = mult_coefs[0].flatten()\n mult_coefs = mult_coefs[1:] # only wavelet blocks remained.\n\n for block in mult_coefs:\n for i in range(3):\n vector = np.concatenate((vector, block[i].flatten()))\n\n return vector\n\n \n def as_image(self, coefs):\n '''\n The input is a list of wavelet coefs with triplets for detailed coefs. \n If it is a vector it will be transformed to the list.\n \n Returns image-like object (if possible).\n '''\n \n mult_coefs = coefs.copy()\n\n if isinstance(mult_coefs, np.ndarray): # vector-like case\n if len(mult_coefs.shape) == 1:\n mult_coefs = self.as_coefs(mult_coefs)\n \n try:\n block = mult_coefs[0]\n for i in range(1, len(mult_coefs)):\n (cH, cV, cD) = mult_coefs[i]\n block = np.block([[block, cH],[cV, cD]])\n\n except ValueError:\n print ('ValueError: Dimensions mismatch. Such WT cannot be viewed as a 2D image.')\n \n else:\n return block \n \n\n def masked_coefs(self, coefs):\n '''\n Computes the masked wavelet transform given full list of wavelet coefficients. \n (as if the binary mask was applied to the coefs for leaving only scaling (wavelet coefficients)).\n '''\n\n masked_mult_coefs = coefs.copy()\n\n if not isinstance(masked_mult_coefs, list):\n masked_mult_coefs = self.as_coefs(masked_mult_coefs) # a hope that the input is vector-like or img-like.\n\n self.approx_coefs = masked_mult_coefs[0].copy() # save deleted coefs.\n masked_mult_coefs[0] = np.zeros_like(masked_mult_coefs[0])\n\n return masked_mult_coefs\n\n\n def Wdir(self, img):\n '''\n Computes the masked WT (mask is applied only on scaling coefs). \n Returns the vectorized format of coefs.\n '''\n return self.as_vector(self.masked_coefs(self.W(img)))\n\n def Wconj(self, vec_coefs):\n '''\n Computes the Conjugate to \"Wdir\" operator. By default, vec_coefs is given in a vectorized format.\n '''\n return self.W_inv(self.masked_coefs(self.as_coefs(vec_coefs)))\n\n\n def IsConjugateRight(self, eps=1e-5):\n '''\n Simple check for whether the Wavelet Conjugate Tranformation implemented here is correct.\n As direct WT the function \"Wdirect\" is used, as conjugate WT the \"Wconj\" is used.\n '''\n np.random.seed(5)\n\n x = np.random.rand(2048,2048)\n\n W_x = self.Wdir(x)\n y = np.random.randn(len(W_x))\n\n left = (W_x * y).sum()\n\n Wconj_y = self.Wconj(y)\n right = (Wconj_y * x).sum()\n\n print(np.abs(left - right) < eps)\n" ]
[ [ "numpy.zeros_like", "numpy.random.rand", "numpy.random.seed", "numpy.block", "numpy.abs" ] ]
jmnyman/vq-vae-2-pytorch
[ "48502ebf9120677f3b493d33261177951eebce99" ]
[ "vqvae.py" ]
[ "import torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\n\r\n# Copyright 2018 The Sonnet Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\r\n\r\n# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch\r\n\r\n\r\nclass Quantize(nn.Module):\r\n def __init__(self, dim, n_embed, decay=0.99, eps=1e-5):\r\n super().__init__()\r\n\r\n self.dim = dim\r\n self.n_embed = n_embed\r\n self.decay = decay\r\n self.eps = eps\r\n\r\n embed = torch.randn(dim, n_embed)\r\n self.register_buffer('embed', embed)\r\n self.register_buffer('cluster_size', torch.zeros(n_embed))\r\n self.register_buffer('embed_avg', embed.clone())\r\n\r\n def forward(self, input):\r\n flatten = input.reshape(-1, self.dim)\r\n dist = (\r\n flatten.pow(2).sum(1, keepdim=True)\r\n - 2 * flatten @ self.embed\r\n + self.embed.pow(2).sum(0, keepdim=True)\r\n )\r\n _, embed_ind = (-dist).max(1)\r\n embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)\r\n embed_ind = embed_ind.view(*input.shape[:-1])\r\n quantize = self.embed_code(embed_ind)\r\n\r\n if self.training:\r\n self.cluster_size.data.mul_(self.decay).add_(\r\n 1 - self.decay, embed_onehot.sum(0)\r\n )\r\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\r\n self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)\r\n n = self.cluster_size.sum()\r\n cluster_size = (\r\n (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n\r\n )\r\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\r\n self.embed.data.copy_(embed_normalized)\r\n\r\n diff = (quantize.detach() - input).pow(2).mean()\r\n quantize = input + (quantize - input).detach()\r\n\r\n return quantize, diff, embed_ind\r\n\r\n def embed_code(self, embed_id):\r\n return F.embedding(embed_id, self.embed.transpose(0, 1))\r\n\r\n\r\nclass ResBlock(nn.Module):\r\n def __init__(self, in_channel, channel):\r\n super().__init__()\r\n\r\n self.conv = nn.Sequential(\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(in_channel, channel, 3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel, in_channel, 1),\r\n )\r\n\r\n def forward(self, input):\r\n out = self.conv(input)\r\n out += input\r\n\r\n return out\r\n\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride, n_additional_downsample_layers):\r\n super().__init__()\r\n\r\n if stride == 4:\r\n blocks = [\r\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel, channel, 3, padding=1),\r\n ]\r\n\r\n elif stride == 2:\r\n blocks = [\r\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(channel // 2, channel, 3, padding=1),\r\n ]\r\n\r\n # ADDITIONAL DOWNSAMPLING\r\n for i in range(n_additional_downsample_layers):\r\n blocks.append(nn.Conv2d(channel, channel, kernel_size=4, stride=2, padding=1))\r\n\r\n for i in range(n_res_block):\r\n blocks.append(ResBlock(channel, n_res_channel))\r\n\r\n blocks.append(nn.ReLU(inplace=True))\r\n\r\n self.blocks = nn.Sequential(*blocks)\r\n\r\n def forward(self, input):\r\n return self.blocks(input)\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(\r\n self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride, n_additional_upsample_layers):\r\n super().__init__()\r\n\r\n blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)]\r\n\r\n for i in range(n_res_block):\r\n blocks.append(ResBlock(channel, n_res_channel))\r\n\r\n blocks.append(nn.ReLU(inplace=True))\r\n\r\n # additional upsampling to match additional downsampling\r\n for i in range(n_additional_upsample_layers):\r\n blocks.append(nn.ConvTranspose2d(channel, channel, kernel_size=4, stride=2, padding=1))\r\n\r\n if stride == 4:\r\n blocks.extend(\r\n [\r\n nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose2d(\r\n channel // 2, out_channel, 4, stride=2, padding=1\r\n ),\r\n ]\r\n )\r\n\r\n elif stride == 2:\r\n blocks.append(\r\n nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1)\r\n )\r\n\r\n self.blocks = nn.Sequential(*blocks)\r\n\r\n def forward(self, input):\r\n return self.blocks(input)\r\n\r\n\r\nclass VQVAE(nn.Module):\r\n def __init__(\r\n self,\r\n in_channel=3,\r\n channel=128,\r\n n_res_block=2,\r\n n_res_channel=32,\r\n embed_dim=64,\r\n n_embed=512,\r\n decay=0.99,\r\n n_additional_downsample_layers=3,\r\n n_additional_upsample_layers=3,\r\n num_classes=2,\r\n input_size=512,\r\n ):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.embed_dim = embed_dim\r\n\r\n self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, 4, n_additional_downsample_layers)\r\n self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, 2, 0)\r\n self.quantize_conv_t = nn.Conv2d(channel, embed_dim, 1)\r\n self.quantize_t = Quantize(embed_dim, n_embed)\r\n self.dec_t = Decoder(\r\n embed_dim, embed_dim, channel, n_res_block, n_res_channel, 2, n_additional_upsample_layers=0\r\n )\r\n self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)\r\n self.quantize_b = Quantize(embed_dim, n_embed)\r\n self.upsample_t = nn.ConvTranspose2d(\r\n embed_dim, embed_dim, 4, stride=2, padding=1\r\n )\r\n self.dec = Decoder(\r\n embed_dim + embed_dim,\r\n in_channel,\r\n channel,\r\n n_res_block,\r\n n_res_channel,\r\n 4,\r\n n_additional_upsample_layers\r\n )\r\n\r\n self.dropout = nn.Dropout()\r\n\r\n self.downsample_top_size = int(self.input_size / 2**(3+n_additional_downsample_layers))\r\n self.unrolled_top_size = int(self.embed_dim * self.downsample_top_size**2)\r\n\r\n # CLASSIFIER [disabled by default with a scale of 0]\r\n self.num_classes = num_classes\r\n self.classifier_fc = nn.Linear(self.embed_dim, self.num_classes) # if not unrolling!\r\n # self.classifier_fc = nn.Linear(self.unrolled_top_size, self.num_classes)\r\n self.cross_ent = nn.CrossEntropyLoss(reduction='sum')\r\n\r\n self.mse_loss = nn.MSELoss()\r\n\r\n def forward(self, input, labels):\r\n # TODO don't return decoding; instead recreate decoding when asked via model.decode_code` instead\r\n quant_t, quant_b, diff, id_t, id_b, enc_t, enc_b, classifier_loss = self.encode(input, labels)\r\n dec = self.decode(quant_t, quant_b)\r\n recon_loss = self.mse_loss(dec, input)\r\n\r\n #return dec, diff, enc_t, enc_b, classifier_loss, recon_loss\r\n #return diff, enc_t, enc_b, classifier_loss, recon_loss\r\n #return diff, classifier_loss, recon_loss # try only returning losses to reduce gathered memory?\r\n \r\n return diff, classifier_loss, recon_loss, id_t, id_b\r\n\r\n\r\n def encode(self, input, labels):\r\n batch_size = input.shape[0]\r\n\r\n enc_b = self.enc_b(input)\r\n enc_t = self.enc_t(enc_b)\r\n\r\n pre_quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 1) # pre-actual quantization\r\n # repeat labels to match enc_t's latent field dimensions [pointwise classification]\r\n repeat_labels = labels.view(batch_size,1,1).repeat(1, self.downsample_top_size, self.downsample_top_size)\r\n \r\n # New: run classifier on pre-quantized top level encoding\r\n# classifier_logits = self.classifier_fc(self.dropout(pre_quant_t.contiguous().view(batch_size,-1)))\r\n classifier_logits = self.classifier_fc(self.dropout(pre_quant_t)) # if not unrolling!\r\n\r\n # need unsqueeze(0) for dataparallel formatting\r\n# classifier_loss = self.cross_ent(classifier_logits, labels.long()).unsqueeze(0)\r\n # reshape again for crossent\r\n classifier_loss = self.cross_ent(classifier_logits.permute(0,3,1,2), repeat_labels.long()).unsqueeze(0) \r\n \r\n quant_t, diff_t, id_t = self.quantize_t(pre_quant_t)\r\n quant_t = quant_t.permute(0, 3, 1, 2)\r\n diff_t = diff_t.unsqueeze(0)\r\n\r\n dec_t = self.dec_t(quant_t)\r\n enc_b = torch.cat([dec_t, enc_b], 1)\r\n\r\n pre_quant_b = self.quantize_conv_b(enc_b).permute(0, 2, 3, 1)\r\n quant_b, diff_b, id_b = self.quantize_b(pre_quant_b) # I renamed to avoid confusion; not sure performance hit\r\n quant_b = quant_b.permute(0, 3, 1, 2)\r\n diff_b = diff_b.unsqueeze(0)\r\n\r\n return quant_t, quant_b, diff_t + diff_b, id_t, id_b, enc_t, enc_b, classifier_loss\r\n\r\n def decode(self, quant_t, quant_b):\r\n upsample_t = self.upsample_t(quant_t)\r\n quant = torch.cat([upsample_t, quant_b], 1)\r\n dec = self.dec(quant)\r\n\r\n return dec\r\n\r\n def decode_code(self, code_t, code_b):\r\n quant_t = self.quantize_t.embed_code(code_t)\r\n quant_t = quant_t.permute(0, 3, 1, 2)\r\n quant_b = self.quantize_b.embed_code(code_b)\r\n quant_b = quant_b.permute(0, 3, 1, 2)\r\n\r\n dec = self.decode(quant_t, quant_b)\r\n\r\n return dec\r\n\r\n\r\nclass ThreeLevelVQVAE(nn.Module):\r\n def __init__(\r\n self,\r\n in_channel=3,\r\n channel=128,\r\n n_res_block=2,\r\n n_res_channel=32,\r\n embed_dim=64,\r\n n_embed=512,\r\n decay=0.99,\r\n n_additional_downsample_layers=3,\r\n n_additional_upsample_layers=3,\r\n num_classes=2,\r\n input_size=512,\r\n ):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.embed_dim = embed_dim\r\n\r\n # TODO add middle latent layer encoder, conv2d, quantize, decoder, (more?)\r\n # TODO upsample/downsample\r\n self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, 4, n_additional_downsample_layers)\r\n self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, 2, 0)\r\n self.quantize_conv_t = nn.Conv2d(channel, embed_dim, 1)\r\n self.quantize_t = Quantize(embed_dim, n_embed)\r\n self.dec_t = Decoder(\r\n embed_dim, embed_dim, channel, n_res_block, n_res_channel, 2, n_additional_upsample_layers=0\r\n )\r\n self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)\r\n self.quantize_b = Quantize(embed_dim, n_embed)\r\n self.upsample_t = nn.ConvTranspose2d(\r\n embed_dim, embed_dim, 4, stride=2, padding=1\r\n )\r\n self.dec = Decoder(\r\n embed_dim + embed_dim,\r\n in_channel,\r\n channel,\r\n n_res_block,\r\n n_res_channel,\r\n 4,\r\n n_additional_upsample_layers\r\n )\r\n\r\n self.dropout = nn.Dropout()\r\n\r\n self.downsample_top_size = int(self.input_size / 2 ** (3 + n_additional_downsample_layers))\r\n self.unrolled_top_size = int(self.embed_dim * self.downsample_top_size ** 2)\r\n\r\n # CLASSIFIER [disabled by default with a scale of 0]\r\n self.num_classes = num_classes\r\n self.classifier_fc = nn.Linear(self.embed_dim, self.num_classes) # if not unrolling!\r\n # self.classifier_fc = nn.Linear(self.unrolled_top_size, self.num_classes)\r\n self.cross_ent = nn.CrossEntropyLoss(reduction='sum')\r\n\r\n def forward(self, input, labels):\r\n # TODO match base model\r\n quant_t, quant_b, diff, _, _, enc_t, enc_b, classifier_loss = self.encode(input, labels)\r\n dec = self.decode(quant_t, quant_b)\r\n\r\n return dec, diff, enc_t, enc_b, classifier_loss\r\n\r\n def encode(self, input, labels):\r\n batch_size = input.shape[0]\r\n\r\n # obtain first representations for each latent layer (each will be added to via concatenation)\r\n enc_b = self.enc_b(input)\r\n enc_m = self.enc_m(enc_b)\r\n enc_t = self.enc_t(enc_m)\r\n\r\n # transform, quantize, and decode top latent layer; run classifier on prequantized representation\r\n pre_quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 1) # pre-actual quantization\r\n # repeat labels to match enc_t's latent field dimensions [pointwise classification]\r\n repeat_labels = labels.view(batch_size, 1, 1).repeat(1, self.downsample_top_size, self.downsample_top_size)\r\n # run classifier on pre-quantized top level encoding\r\n classifier_logits = self.classifier_fc(self.dropout(pre_quant_t)) # if not unrolling!\r\n # reshape again for crossent\r\n classifier_loss = self.cross_ent(classifier_logits.permute(0, 3, 1, 2), repeat_labels.long()).unsqueeze(0)\r\n\r\n # quantize top layer and decode\r\n quant_t, diff_t, id_t = self.quantize_t(pre_quant_t)\r\n quant_t = quant_t.permute(0, 3, 1, 2)\r\n diff_t = diff_t.unsqueeze(0)\r\n dec_t = self.dec_t(quant_t)\r\n\r\n # combine decoded top layer with first representation of middle latent layer\r\n enc_m = torch.cat([dec_t, enc_m], 1)\r\n\r\n # transform, quantize, and decode middle latent field\r\n pre_quant_m = self.quantize_conv_m(enc_m).permute(0, 2, 3, 1) # pre-actual quantization\r\n quant_m, diff_m, id_m = self.quantize_m(pre_quant_m)\r\n quant_m = quant_m.permute(0, 3, 1, 2)\r\n diff_m = diff_m.unsqueeze(0)\r\n\r\n # combine decoded middle layer with first representation of bottom latent layer\r\n enc_b = torch.cat([dec_m, enc_b], 1)\r\n\r\n pre_quant_b = self.quantize_conv_b(enc_b).permute(0, 2, 3, 1)\r\n quant_b, diff_b, id_b = self.quantize_b(pre_quant_b) # I renamed to avoid confusion; not sure performance hit\r\n quant_b = quant_b.permute(0, 3, 1, 2)\r\n diff_b = diff_b.unsqueeze(0)\r\n\r\n # TODO clean up output format -- dictionary?\r\n return quant_t, quant_b, diff_t + diff_b, id_t, id_b, enc_t, enc_b, classifier_loss\r\n\r\n def decode(self, quant_t, quant_m, quant_b):\r\n \"\"\"\r\n General process here is to take the highest level representation (embedding form of quantized category assignment)\r\n and then to upsample this representation to match the spatial resolution of the next \"lower\" layer\r\n then combine these representations via concat\r\n\r\n I'm not 100% sure on the distinction between upsample and decoder operations under the source\r\n implementation's use....\r\n\r\n Maybe it's just concat[upsample_t, upsample_m, quant_b], where both upsamplings map to the same dim as bottom layer\r\n ( see below )\r\n\r\n :param quant_t:\r\n :param quant_m:\r\n :param quant_b:\r\n :return:\r\n \"\"\"\r\n # upsample_t = self.upsample_t(quant_t)\r\n # quant_tm = torch.cat([upsample_t, quant_m], 1)\r\n # dec_m = self.dec_m(quant_tm)\r\n # upsample_m = upsample_m(dec_m) # Not sure about this\r\n # quant_mb = torch.cat([upsample_m, quant_b], 1)\r\n # dec = self.dec(quant_mb)\r\n\r\n upsample_t = self.upsample_t(quant_t)\r\n upsample_m = self.upsample_m(quant_m)\r\n combined_quant = torch.cat([upsample_t, upsample_m, quant_t], 1)\r\n dec = self.dec(combined_quant)\r\n\r\n return dec\r\n\r\n def decode_code(self, code_t, code_b):\r\n quant_t = self.quantize_t.embed_code(code_t)\r\n quant_t = quant_t.permute(0, 3, 1, 2)\r\n quant_b = self.quantize_b.embed_code(code_b)\r\n quant_b = quant_b.permute(0, 3, 1, 2)\r\n\r\n dec = self.decode(quant_t, quant_b)\r\n\r\n return dec\r\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.Dropout", "torch.cat", "torch.nn.functional.one_hot", "torch.nn.MSELoss", "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.randn" ] ]