repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jinzhao3611/Political_Stance_Prediction
[ "b2314363e00a41836c5ae747ec29933601976736" ]
[ "train_ML.py" ]
[ "import argparse\nimport pickle\nfrom math import log\nfrom typing import Dict, List\nfrom collections import Counter\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import LinearSVC\n\nfrom reader.data import load_corpus, Corpus, Sentence\nfrom reader.scoring import score_corpus\n\n\ndef train_svm(train, test):\n params_header = [\"c\", \"class_weight\", \"feature_func\"]\n c_values = [1.0, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001]\n class_weight_values = [None, \"balanced\"]\n feature_funcs = [binary_bow, count_bow, log_count_bow]\n\n first_row = True\n for c_value in c_values:\n for class_weight in class_weight_values:\n for feature_func in feature_funcs:\n row = [str(c_value), str(class_weight), feature_func.__name__]\n model = MLModel(\n LinearSVC(C=c_value, class_weight=class_weight, max_iter=10000),\n feature_func,\n )\n model.train(train)\n\n pred_corpus = model.predict(test)\n score = score_corpus(test, pred_corpus)\n if first_row:\n print(\",\".join(params_header + score.header()))\n first_row = False\n print(\",\".join(row + score.row()))\n\n\ndef train_logistic_regression(train, test):\n params_header = [\"c\", \"solver\", \"feature_func\"]\n\n c_values = [0.05]\n solvers = [\"saga\"]\n feature_funcs = [binary_bow]\n\n first_row = True\n for c_value in c_values:\n for solver in solvers:\n for feature_func in feature_funcs:\n row = [str(c_value), solver, feature_func.__name__]\n model = MLModel(\n LogisticRegression(\n C=c_value,\n solver=solver,\n multi_class=\"multinomial\",\n penalty=\"l2\",\n max_iter=10000,\n ),\n feature_func,\n )\n model.train(train)\n\n pred_corpus = model.predict(test)\n score = score_corpus(test, pred_corpus)\n if first_row:\n print(\",\".join(params_header + score.header()))\n first_row = False\n print(\",\".join(row + score.row()))\n\n\ndef train_MLP(train, test):\n params_header = [\"hiddens\", \"lr\", \"feature_func\"]\n lrs = [0.01, 0.005]\n feature_funcs = [binary_bow, count_bow, log_count_bow]\n\n first_row = True\n for lr in lrs:\n for feature_func in feature_funcs:\n row = [str(lr), feature_func.__name__]\n model = MLModel(\n MLPClassifier(\n hidden_layer_sizes=(100,),\n solver=\"adam\",\n batch_size=32,\n learning_rate=\"adaptive\",\n learning_rate_init=lr,\n ),\n feature_func,\n )\n model.train(train)\n\n pred_corpus = model.predict(test)\n score = score_corpus(test, pred_corpus)\n if first_row:\n print(\",\".join(params_header + score.header()))\n first_row = False\n print(\",\".join(row + score.row()))\n\n\ndef train_test(\n train_path: str, test_path: str, bias_only: bool, model_fn, dict_mapping\n) -> None:\n train_corpus = load_corpus(train_path, bias_only, dict_mapping)\n test_corpus = load_corpus(test_path, bias_only, dict_mapping)\n model_fn(train_corpus, test_corpus)\n\n\ndef get_feature_names(instance: Sentence, use_bigrams: bool = True, use_trigrams: bool = False) -> List[str]:\n unigrams = [token.text.lower() for token in instance.tokens]\n if use_bigrams:\n bigrams = [\"_\".join(pair) for pair in zip(unigrams[:-1], unigrams[1:])]\n unigrams.extend(bigrams)\n if use_trigrams:\n trigrams = [\"_\".join(pair) for pair in zip(unigrams[:-2], unigrams[1:-1], unigrams[2:])]\n unigrams.extend(trigrams)\n return unigrams\n\n\ndef binary_bow(instance: Sentence) -> Dict[str, float]:\n features = get_feature_names(instance)\n return {feat: 1.0 for feat in features}\n\n\ndef count_bow(instance: Sentence) -> Dict[str, float]:\n features = get_feature_names(instance)\n return {\n token: float(count)\n for token, count in Counter(\n features\n ).items()\n }\n\n\ndef log_count_bow(instance: Sentence) -> Dict[str, float]:\n return {token: log(count) for token, count in count_bow(instance).items()}\n\n\nclass MLModel:\n def __init__(self, model, feature_function,) -> None:\n self.feature_func = feature_function\n self.vectorizer = DictVectorizer()\n self.model = model\n\n def train(self, corpus: Corpus) -> None:\n features = self.vectorizer.fit_transform(\n self.feature_func(instance) for instance in corpus\n )\n labels = tuple(corpus.stances)\n self.model.fit(features, labels)\n\n def predict(self, corpus: Corpus) -> Corpus:\n features = self.vectorizer.transform(\n self.feature_func(instance) for instance in corpus\n )\n preds = self.model.predict(features)\n return Corpus(\n tuple(\n instance.copy_with_stance(pred) for instance, pred in zip(corpus, preds)\n )\n )\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"train_path\")\n parser.add_argument(\"test_path\")\n parser.add_argument(\"model\")\n parser.add_argument(\"-b\", \"--bias\", action=\"store_true\")\n parser.add_argument(\"-m\", \"--use_moral\", action=\"store_true\")\n models = {\"logre\": train_logistic_regression, \"mlp\": train_MLP, \"svm\": train_svm}\n args = parser.parse_args()\n if args.use_moral:\n with open(\"data/additional_resources/moral_dict.pkl\", \"rb\") as f:\n moral_dict_mapping = pickle.load(f)\n else:\n moral_dict_mapping = None\n train_test(args.train_path, args.test_path, args.bias, models[args.model], moral_dict_mapping)\n\n\nif __name__ == \"__main__\":\n from nltk.metrics import ConfusionMatrix\n gold = \"a a a c c d\".split()\n pred = \"a a c c d d\".split()\n cm = ConfusionMatrix(gold, pred)\n print(cm['c', 'a'])\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "sklearn.neural_network.MLPClassifier", "sklearn.feature_extraction.DictVectorizer", "sklearn.svm.LinearSVC" ] ]
ZLkanyo009/mindspore
[ "0a6ed86bb443ed233504fa7eee931a24637d50bb" ]
[ "tests/st/control/inner/test_120_if_after_while_in_if.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nfrom mindspore.common import dtype as mstype\nfrom mindspore import nn\nfrom mindspore import Tensor\nfrom mindspore.ops import composite as C\nfrom mindspore import context\n\ncontext.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"Ascend\")\n\n\nclass ForwardNet(nn.Cell):\n def __init__(self, max_cycles=10):\n super(ForwardNet, self).__init__()\n self.max_cycles = max_cycles\n self.zero = Tensor(np.array(0), mstype.int32)\n self.i = Tensor(np.array(0), mstype.int32)\n\n def construct(self, x, y):\n out = self.zero\n i = self.i\n if x > y:\n while i < self.max_cycles:\n out = x * y + out\n i = i + 1\n if out > 20:\n out = out - 20\n return out\n\n\nclass BackwardNet(nn.Cell):\n def __init__(self, net):\n super(BackwardNet, self).__init__(auto_prefix=False)\n self.forward_net = net\n self.grad = C.GradOperation()\n\n def construct(self, *inputs):\n grads = self.grad(self.forward_net)(*inputs)\n return grads\n\n\ndef test_forward():\n x = Tensor(np.array(1), mstype.int32)\n y = Tensor(np.array(3), mstype.int32)\n forward_net = ForwardNet(max_cycles=3)\n out = forward_net(x, y)\n print(\"forward out:\", out)\n\n\ndef test_backward():\n x = Tensor(np.array(1), mstype.int32)\n y = Tensor(np.array(3), mstype.int32)\n forward_net = ForwardNet(max_cycles=3)\n backward_net = BackwardNet(forward_net)\n grads = backward_net(x, y)\n print(\"grads:\", grads)\n" ]
[ [ "numpy.array" ] ]
ericyinyzy/MTN_trajectory
[ "2c6e2cb07f89a118094257d6bea4e024d5ceda54" ]
[ "PIE/transformer/sublayer_connection.py" ]
[ "# -*- coding: utf-8 -*-\n# date: 2018-11-30 15:17\nimport torch.nn as nn\n\nfrom .layer_norm import LayerNorm\n\n\nclass SublayerConnection(nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n \"\"\"\n\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer,st=1,sublayer1=None,sublayer2=None):\n \"\"\"\n Apply residual connection to any sublayer with the same size.\n \"\"\"\n if st==0:\n return x + self.dropout(sublayer(x))\n if sublayer1==None:\n return self.self_attn(x,sublayer)\n else:\n return self.src_attn(x,sublayer,sublayer1,sublayer2)\n def self_attn(self,x,sublayer):\n return x + self.dropout(sublayer(self.norm(x)))\n def src_attn(self,x,sublayer,sublayer1,sublayer2):\n x1=sublayer(self.norm(x))\n x2=sublayer1(self.norm(x))\n x3=sublayer2(self.norm(x))\n return x + self.dropout(x1)+self.dropout(x2)+self.dropout(x3)\n" ]
[ [ "torch.nn.Dropout" ] ]
mhamedouadghiri/examples
[ "853107847c1dd761592f0bc19d18fe2b0e26c051" ]
[ "tensorflow_examples/lite/model_maker/core/task/object_detector.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"APIs to train an object detection model.\"\"\"\n\nimport os\nimport tempfile\nfrom typing import Dict, Optional, Tuple, TypeVar\n\nimport tensorflow as tf\nfrom tensorflow_examples.lite.model_maker.core import compat\nfrom tensorflow_examples.lite.model_maker.core.data_util import object_detector_dataloader\nfrom tensorflow_examples.lite.model_maker.core.export_format import ExportFormat\nfrom tensorflow_examples.lite.model_maker.core.export_format import QuantizationType\nfrom tensorflow_examples.lite.model_maker.core.task import configs\nfrom tensorflow_examples.lite.model_maker.core.task import custom_model\nfrom tensorflow_examples.lite.model_maker.core.task import model_spec as ms\nfrom tensorflow_examples.lite.model_maker.core.task.metadata_writers.object_detector import metadata_writer_for_object_detector as metadata_writer\nfrom tensorflow_examples.lite.model_maker.core.task.model_spec import object_detector_spec\n\nfrom tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util\n\nT = TypeVar('T', bound='ObjectDetector')\n\n\ndef create(train_data: object_detector_dataloader.DataLoader,\n model_spec: object_detector_spec.EfficientDetModelSpec,\n validation_data: Optional[\n object_detector_dataloader.DataLoader] = None,\n epochs: Optional[object_detector_dataloader.DataLoader] = None,\n batch_size: Optional[int] = None,\n train_whole_model: bool = False,\n do_train: bool = True) -> T:\n \"\"\"Loads data and train the model for object detection.\n\n Args:\n train_data: Training data.\n model_spec: Specification for the model.\n validation_data: Validation data. If None, skips validation process.\n epochs: Number of epochs for training.\n batch_size: Batch size for training.\n train_whole_model: Boolean, False by default. If true, train the whole\n model. Otherwise, only train the layers that are not match\n `model_spec.config.var_freeze_expr`.\n do_train: Whether to run training.\n\n Returns:\n ObjectDetector\n \"\"\"\n model_spec = ms.get(model_spec)\n model_spec.config.num_epochs = epochs\n model_spec.config.batch_size = batch_size\n if train_whole_model:\n model_spec.config.var_freeze_expr = None\n if compat.get_tf_behavior() not in model_spec.compat_tf_versions:\n raise ValueError('Incompatible versions. Expect {}, but got {}.'.format(\n model_spec.compat_tf_versions, compat.get_tf_behavior()))\n\n object_detector = ObjectDetector(model_spec, train_data.label_map, train_data)\n\n if do_train:\n tf.compat.v1.logging.info('Retraining the models...')\n object_detector.train(train_data, validation_data, epochs, batch_size)\n else:\n object_detector.create_model()\n\n return object_detector\n\n\ndef _get_model_info(\n model_spec: object_detector_spec.EfficientDetModelSpec,\n quantization_type: Optional[QuantizationType] = None,\n quantization_config: Optional[configs.QuantizationConfig] = None,\n) -> metadata_writer.ModelSpecificInfo:\n \"\"\"Gets the specific info for the object detection model.\"\"\"\n\n # Gets image_min/image_max for float/quantized model.\n image_min = -1\n image_max = 1\n if quantization_config:\n if quantization_config.inference_input_type == tf.uint8:\n image_min = 0\n image_max = 255\n elif quantization_config.inference_input_type == tf.int8:\n image_min = -128\n image_max = 127\n elif quantization_type == QuantizationType.INT8:\n image_min = 0\n image_max = 255\n\n def _get_list(v):\n if isinstance(v, list) or isinstance(v, tuple):\n return v\n else:\n return [v]\n\n return metadata_writer.ModelSpecificInfo(\n name=model_spec.model_name,\n version='v1',\n image_width=model_spec.config.image_size[1],\n image_height=model_spec.config.image_size[0],\n image_min=image_min,\n image_max=image_max,\n mean=_get_list(model_spec.config.mean_rgb),\n std=_get_list(model_spec.config.stddev_rgb))\n\n\nclass ObjectDetector(custom_model.CustomModel):\n \"\"\"ObjectDetector class for inference and exporting to tflite.\"\"\"\n\n ALLOWED_EXPORT_FORMAT = (ExportFormat.TFLITE, ExportFormat.SAVED_MODEL,\n ExportFormat.LABEL)\n\n def __init__(\n self,\n model_spec: object_detector_spec.EfficientDetModelSpec,\n label_map: Dict[int, str],\n representative_data: Optional[\n object_detector_dataloader.DataLoader] = None\n ) -> None:\n \"\"\"Initializes the ObjectDetector class.\n\n Args:\n model_spec: Specification for the model.\n label_map: Dict, map label integer ids to string label names such as {1:\n 'person', 2: 'notperson'}. 0 is the reserved key for `background` and\n doesn't need to be included in `label_map`. Label names can't be\n duplicated.\n representative_data: Representative dataset for full integer\n quantization. Used when converting the keras model to the TFLite model\n and `quantization_type=INT8`.\n \"\"\"\n super().__init__(model_spec, shuffle=None)\n if model_spec.config.label_map and model_spec.config.label_map != label_map:\n tf.compat.v1.logging.warn(\n 'Label map is not the same as the previous label_map in model_spec.')\n model_spec.config.label_map = label_map\n # TODO(yuqili): num_classes = 1 have some issues during training. Thus we\n # make minimum num_classes=2 for now.\n model_spec.config.num_classes = max(2, max(label_map.keys()))\n self.representative_data = representative_data\n\n def create_model(self) -> tf.keras.Model:\n self.model = self.model_spec.create_model()\n return self.model\n\n def _get_dataset_and_steps(\n self,\n data: object_detector_dataloader.DataLoader,\n batch_size: int,\n is_training: bool,\n ) -> Tuple[Optional[tf.data.Dataset], int, Optional[str]]:\n \"\"\"Gets dataset, steps and annotations json file.\"\"\"\n if not data:\n return None, 0, None\n # TODO(b/171449557): Put this into DataLoader.\n dataset = data.gen_dataset(\n self.model_spec, batch_size, is_training=is_training)\n steps = len(data) // batch_size\n return dataset, steps, data.annotations_json_file\n\n def train(self,\n train_data: object_detector_dataloader.DataLoader,\n validation_data: Optional[\n object_detector_dataloader.DataLoader] = None,\n epochs: Optional[int] = None,\n batch_size: Optional[int] = None) -> tf.keras.Model:\n \"\"\"Feeds the training data for training.\"\"\"\n if not self.model_spec.config.drop_remainder:\n raise ValueError('Must set `drop_remainder=True` during training. '\n 'Otherwise it will fail.')\n\n batch_size = batch_size if batch_size else self.model_spec.batch_size\n # TODO(b/171449557): Upstream this to the parent class.\n if len(train_data) < batch_size:\n raise ValueError('The size of the train_data (%d) couldn\\'t be smaller '\n 'than batch_size (%d). To solve this problem, set '\n 'the batch_size smaller or increase the size of the '\n 'train_data.' % (len(train_data), batch_size))\n if validation_data and len(validation_data) < batch_size:\n tf.compat.v1.logging.warn(\n 'The size of the validation_data (%d) is smaller than batch_size '\n '(%d). Ignore the validation_data.' %\n (len(validation_data), batch_size))\n validation_data = None\n\n with self.model_spec.ds_strategy.scope():\n self.create_model()\n train_ds, steps_per_epoch, _ = self._get_dataset_and_steps(\n train_data, batch_size, is_training=True)\n validation_ds, validation_steps, val_json_file = self._get_dataset_and_steps(\n validation_data, batch_size, is_training=False)\n return self.model_spec.train(self.model, train_ds, steps_per_epoch,\n validation_ds, validation_steps, epochs,\n batch_size, val_json_file)\n\n def evaluate(self,\n data: object_detector_dataloader.DataLoader,\n batch_size: Optional[int] = None) -> Dict[str, float]:\n \"\"\"Evaluates the model.\"\"\"\n batch_size = batch_size if batch_size else self.model_spec.batch_size\n # Not to drop the smaller batch to evaluate the whole dataset.\n self.model_spec.config.drop_remainder = False\n ds = data.gen_dataset(self.model_spec, batch_size, is_training=False)\n steps = (len(data) + batch_size - 1) // batch_size\n # TODO(b/171449557): Upstream this to the parent class.\n if steps <= 0:\n raise ValueError('The size of the validation_data (%d) couldn\\'t be '\n 'smaller than batch_size (%d). To solve this problem, '\n 'set the batch_size smaller or increase the size of the '\n 'validation_data.' % (len(data), batch_size))\n\n eval_metrics = self.model_spec.evaluate(self.model, ds, steps,\n data.annotations_json_file)\n # Set back drop_remainder=True since it must be True during training.\n # Otherwise it will fail.\n self.model_spec.config.drop_remainder = True\n return eval_metrics\n\n def evaluate_tflite(\n self, tflite_filepath: str,\n data: object_detector_dataloader.DataLoader) -> Dict[str, float]:\n \"\"\"Evaluate the TFLite model.\"\"\"\n ds = data.gen_dataset(self.model_spec, batch_size=1, is_training=False)\n return self.model_spec.evaluate_tflite(tflite_filepath, ds, len(data),\n data.annotations_json_file)\n\n def _export_saved_model(self, saved_model_dir: str) -> None:\n \"\"\"Saves the model to Tensorflow SavedModel.\"\"\"\n self.model_spec.export_saved_model(saved_model_dir)\n\n def _export_tflite(\n self,\n tflite_filepath: str,\n quantization_type: QuantizationType = QuantizationType.INT8,\n representative_data: Optional[\n object_detector_dataloader.DataLoader] = None,\n quantization_config: Optional[configs.QuantizationConfig] = None,\n with_metadata: bool = True,\n export_metadata_json_file: bool = False) -> None:\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n Args:\n tflite_filepath: File path to save tflite model.\n quantization_type: Enum, type of post-training quantization. Accepted\n values are `INT8`, `FP16`, `FP32`, `DYNAMIC`. `FP16` means float16\n quantization with 2x smaller, optimized for GPU. `INT8` means full\n integer quantization with 4x smaller, 3x+ speedup, optimized for Edge\n TPU. 'DYNAMIC' means dynamic range quantization with\t4x smaller, 2x-3x\n speedup. `FP32` mean exporting float model without quantization. Please\n refer to\n https://www.tensorflow.org/lite/performance/post_training_quantization\n for more detailed about different techniques for post-training\n quantization.\n representative_data: Representative dataset for full integer\n quantization. Used when `quantization_type=INT8`.\n quantization_config: Configuration for post-training quantization.\n with_metadata: Whether the output tflite model contains metadata.\n export_metadata_json_file: Whether to export metadata in json file. If\n True, export the metadata in the same directory as tflite model.Used\n only if `with_metadata` is True.\n \"\"\"\n if quantization_type and quantization_config:\n raise ValueError('At most one of the paramaters `quantization_type` and '\n '`quantization_config` can be set.')\n if representative_data is None:\n representative_data = self.representative_data\n if quantization_type == QuantizationType.INT8 and \\\n representative_data is None:\n raise ValueError('`representative_data` must be set when '\n '`quantization_type=QuantizationType.INT8.')\n\n ds, _, _ = self._get_dataset_and_steps(\n representative_data, batch_size=1, is_training=False)\n\n self.model_spec.export_tflite(tflite_filepath, quantization_type, ds,\n quantization_config)\n\n if with_metadata:\n with tempfile.TemporaryDirectory() as temp_dir:\n tf.compat.v1.logging.info(\n 'Label file is inside the TFLite model with metadata.')\n label_filepath = os.path.join(temp_dir, 'labelmap.txt')\n self._export_labels(label_filepath)\n model_info = _get_model_info(self.model_spec, quantization_type,\n quantization_config)\n export_dir = os.path.dirname(tflite_filepath)\n populator = metadata_writer.MetadataPopulatorForObjectDetector(\n tflite_filepath, export_dir, model_info, label_filepath)\n populator.populate(export_metadata_json_file)\n\n def _export_labels(self, label_filepath: str) -> None:\n \"\"\"Export labels to label_filepath.\"\"\"\n tf.compat.v1.logging.info('Saving labels in %s.', label_filepath)\n num_classes = self.model_spec.config.num_classes\n label_map = label_util.get_label_map(self.model_spec.config.label_map)\n with tf.io.gfile.GFile(label_filepath, 'w') as f:\n # Ignores label_map[0] that's the background. The labels in the label file\n # for TFLite metadata should start from the actual labels without the\n # background.\n for i in range(num_classes):\n label = label_map[i + 1] if i + 1 in label_map else '???'\n f.write(label + '\\n')\n" ]
[ [ "tensorflow.compat.v1.logging.warn", "tensorflow.compat.v1.logging.info", "tensorflow.io.gfile.GFile" ] ]
Ivy286/cluster_basedfps
[ "7fc216537f570436f008ea567c137d03ba2b6d81" ]
[ "third_party_package/RDKit_2015_03_1/rdkit/Chem/BuildFragmentCatalog.py" ]
[ "# $Id$\n#\n# Copyright (C) 2003-2008 Greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\n\"\"\" command line utility for working with FragmentCatalogs (CASE-type analysis)\n\n**Usage**\n\n BuildFragmentCatalog [optional args] <filename>\n\n filename, the name of a delimited text file containing InData, is required\n for some modes of operation (see below)\n\n**Command Line Arguments**\n\n - -n *maxNumMols*: specify the maximum number of molecules to be processed\n\n - -b: build the catalog and OnBitLists\n *requires InData*\n\n - -s: score compounds\n *requires InData and a Catalog, can use OnBitLists*\n\n - -g: calculate info gains\n *requires Scores*\n\n - -d: show details about high-ranking fragments\n *requires a Catalog and Gains*\n\n - --catalog=*filename*: filename with the pickled catalog.\n If -b is provided, this file will be overwritten.\n\n - --onbits=*filename*: filename to hold the pickled OnBitLists.\n If -b is provided, this file will be overwritten\n \n - --scores=*filename*: filename to hold the text score data.\n If -s is provided, this file will be overwritten\n\n - --gains=*filename*: filename to hold the text gains data.\n If -g is provided, this file will be overwritten\n\n - --details=*filename*: filename to hold the text details data.\n If -d is provided, this file will be overwritten.\n\n - --minPath=2: specify the minimum length for a path\n\n - --maxPath=6: specify the maximum length for a path\n\n - --smiCol=1: specify which column in the input data file contains\n SMILES\n\n - --actCol=-1: specify which column in the input data file contains\n activities\n\n - --nActs=2: specify the number of possible activity values\n\n - --nBits=-1: specify the maximum number of bits to show details for\n\n\"\"\"\nfrom __future__ import print_function\nimport sys,os\nfrom rdkit.six.moves import cPickle #@UnresolvedImport #pylint: disable=F0401\nfrom rdkit.six import next\nfrom rdkit import Chem\nfrom rdkit import RDConfig\nfrom rdkit.Chem import FragmentCatalog\nfrom rdkit.Dbase.DbConnection import DbConnect\nimport numpy\nfrom rdkit.ML import InfoTheory\nimport types\n\n_cvsVersion=\"$Revision$\"\nidx1 = _cvsVersion.find(':')+1\nidx2 = _cvsVersion.rfind('$')\n__VERSION_STRING=\"%s\"%(_cvsVersion[idx1:idx2])\n\ndef message(msg,dest=sys.stdout):\n dest.write(msg)\n\ndef BuildCatalog(suppl,maxPts=-1,groupFileName=None,\n minPath=2,maxPath=6,reportFreq=10):\n \"\"\" builds a fragment catalog from a set of molecules in a delimited text block\n\n **Arguments**\n\n - suppl: a mol supplier\n\n - maxPts: (optional) if provided, this will set an upper bound on the\n number of points to be considered\n\n - groupFileName: (optional) name of the file containing functional group\n information\n\n - minPath, maxPath: (optional) names of the minimum and maximum path lengths\n to be considered\n\n - reportFreq: (optional) how often to display status information \n\n **Returns**\n\n a FragmentCatalog\n \n \"\"\"\n if groupFileName is None:\n groupFileName = os.path.join(RDConfig.RDDataDir,\"FunctionalGroups.txt\")\n\n fpParams = FragmentCatalog.FragCatParams(minPath,maxPath,groupFileName)\n catalog = FragmentCatalog.FragCatalog(fpParams)\n fgen = FragmentCatalog.FragCatGenerator()\n if maxPts >0:\n nPts = maxPts\n else:\n if hasattr(suppl,'__len__'):\n nPts = len(suppl)\n else:\n nPts = -1\n for i,mol in enumerate(suppl):\n if i == nPts:\n break\n if i and not i%reportFreq:\n if nPts>-1:\n message('Done %d of %d, %d paths\\n'%(i,nPts,catalog.GetFPLength()))\n else:\n message('Done %d, %d paths\\n'%(i,catalog.GetFPLength()))\n fgen.AddFragsFromMol(mol,catalog)\n return catalog\n\ndef ScoreMolecules(suppl,catalog,maxPts=-1,actName='',acts=None,\n nActs=2,reportFreq=10):\n \"\"\" scores the compounds in a supplier using a catalog\n\n **Arguments**\n\n - suppl: a mol supplier\n\n - catalog: the FragmentCatalog\n\n - maxPts: (optional) the maximum number of molecules to be\n considered\n\n - actName: (optional) the name of the molecule's activity property.\n If this is not provided, the molecule's last property will be used.\n\n - acts: (optional) a sequence of activity values (integers).\n If not provided, the activities will be read from the molecules.\n\n - nActs: (optional) number of possible activity values\n\n - reportFreq: (optional) how often to display status information \n\n **Returns**\n\n a 2-tuple:\n\n 1) the results table (a 3D array of ints nBits x 2 x nActs)\n\n 2) a list containing the on bit lists for each molecule\n\n \"\"\"\n nBits = catalog.GetFPLength()\n resTbl = numpy.zeros((nBits,2,nActs),numpy.int)\n obls = []\n\n if not actName and not acts:\n actName = suppl[0].GetPropNames()[-1]\n\n \n fpgen = FragmentCatalog.FragFPGenerator()\n suppl.reset()\n i = 1\n for mol in suppl:\n if i and not i%reportFreq:\n message('Done %d.\\n'%(i))\n if mol:\n if not acts:\n act = int(mol.GetProp(actName))\n else:\n act = acts[i-1]\n fp = fpgen.GetFPForMol(mol,catalog)\n obls.append([x for x in fp.GetOnBits()])\n for j in range(nBits):\n resTbl[j,0,act] += 1\n for id in obls[i-1]:\n resTbl[id-1,0,act] -= 1\n resTbl[id-1,1,act] += 1\n else:\n obls.append([])\n i+=1 \n return resTbl,obls\n\ndef ScoreFromLists(bitLists,suppl,catalog,maxPts=-1,actName='',acts=None,\n nActs=2,reportFreq=10):\n \"\"\" similar to _ScoreMolecules()_, but uses pre-calculated bit lists\n for the molecules (this speeds things up a lot)\n\n \n **Arguments**\n\n - bitLists: sequence of on bit sequences for the input molecules\n\n - suppl: the input supplier (we read activities from here)\n\n - catalog: the FragmentCatalog\n\n - maxPts: (optional) the maximum number of molecules to be\n considered\n\n - actName: (optional) the name of the molecule's activity property.\n If this is not provided, the molecule's last property will be used.\n\n - nActs: (optional) number of possible activity values\n\n - reportFreq: (optional) how often to display status information \n\n **Returns**\n\n the results table (a 3D array of ints nBits x 2 x nActs)\n\n \"\"\"\n nBits = catalog.GetFPLength()\n if maxPts >0:\n nPts = maxPts\n else:\n nPts = len(bitLists)\n resTbl = numpy.zeros((nBits,2,nActs),numpy.int)\n if not actName and not acts:\n actName = suppl[0].GetPropNames()[-1]\n suppl.reset()\n for i in range(1,nPts+1):\n mol = next(suppl)\n if not acts:\n act = int(mol.GetProp(actName))\n else:\n act = acts[i-1]\n if i and not i%reportFreq:\n message('Done %d of %d\\n'%(i,nPts))\n ids = set()\n for id in bitLists[i-1]:\n ids.add(id-1)\n for j in range(nBits):\n resTbl[j,0,act] += 1\n for id in ids:\n resTbl[id,0,act] -= 1\n resTbl[id,1,act] += 1\n return resTbl\n\ndef CalcGains(suppl,catalog,topN=-1,actName='',acts=None,\n nActs=2,reportFreq=10,biasList=None,collectFps=0):\n \"\"\" calculates info gains by constructing fingerprints\n *DOC*\n\n Returns a 2-tuple:\n 1) gains matrix\n 2) list of fingerprints\n \n \"\"\"\n nBits = catalog.GetFPLength()\n if topN < 0:\n topN = nBits\n if not actName and not acts:\n actName = suppl[0].GetPropNames()[-1]\n\n gains = [0]*nBits\n if hasattr(suppl,'__len__'):\n nMols = len(suppl)\n else:\n nMols = -1\n fpgen = FragmentCatalog.FragFPGenerator()\n #ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.ENTROPY)\n if biasList:\n ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.BIASENTROPY)\n ranker.SetBiasList(biasList)\n else:\n ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.ENTROPY)\n i = 0\n fps = []\n for mol in suppl:\n if not acts:\n try:\n act = int(mol.GetProp(actName))\n except KeyError:\n message('ERROR: Molecule has no property: %s\\n'%(actName))\n message('\\tAvailable properties are: %s\\n'%(str(mol.GetPropNames())))\n raise KeyError(actName)\n else:\n act = acts[i]\n if i and not i%reportFreq:\n if nMols>0:\n message('Done %d of %d.\\n'%(i,nMols))\n else:\n message('Done %d.\\n'%(i))\n fp = fpgen.GetFPForMol(mol,catalog)\n ranker.AccumulateVotes(fp,act)\n i+=1;\n if collectFps:\n fps.append(fp)\n gains = ranker.GetTopN(topN)\n return gains,fps\n\ndef CalcGainsFromFps(suppl,fps,topN=-1,actName='',acts=None,\n nActs=2,reportFreq=10,biasList=None):\n \"\"\" calculates info gains from a set of fingerprints\n\n *DOC*\n \n \"\"\"\n nBits = len(fps[0])\n if topN < 0:\n topN = nBits\n if not actName and not acts:\n actName = suppl[0].GetPropNames()[-1]\n\n gains = [0]*nBits\n if hasattr(suppl,'__len__'):\n nMols = len(suppl)\n else:\n nMols = -1\n if biasList:\n ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.BIASENTROPY)\n ranker.SetBiasList(biasList)\n else:\n ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.ENTROPY)\n for i,mol in enumerate(suppl):\n if not acts:\n try:\n act = int(mol.GetProp(actName))\n except KeyError:\n message('ERROR: Molecule has no property: %s\\n'%(actName))\n message('\\tAvailable properties are: %s\\n'%(str(mol.GetPropNames())))\n raise KeyError(actName)\n else:\n act = acts[i]\n if i and not i%reportFreq:\n if nMols>0:\n message('Done %d of %d.\\n'%(i,nMols))\n else:\n message('Done %d.\\n'%(i))\n fp = fps[i]\n ranker.AccumulateVotes(fp,act)\n gains = ranker.GetTopN(topN)\n return gains\n\ndef OutputGainsData(outF,gains,cat,nActs=2):\n actHeaders = ['Act-%d'%(x) for x in range(nActs)]\n if cat:\n outF.write('id,Description,Gain,%s\\n'%(','.join(actHeaders)))\n else:\n outF.write('id,Gain,%s\\n'%(','.join(actHeaders)))\n for entry in gains:\n id = int(entry[0])\n outL = [str(id)]\n if cat:\n descr = cat.GetBitDescription(id)\n outL.append(descr)\n outL.append('%.6f'%entry[1]) \n outL += ['%d'%x for x in entry[2:]]\n outF.write(','.join(outL))\n outF.write('\\n')\n \n\ndef ProcessGainsData(inF,delim=',',idCol=0,gainCol=1):\n \"\"\" reads a list of ids and info gains out of an input file\n\n \"\"\"\n res = []\n inL = inF.readline()\n for line in inF.xreadlines():\n splitL = line.strip().split(delim)\n res.append((splitL[idCol],float(splitL[gainCol])))\n return res \n \n\ndef ShowDetails(catalog,gains,nToDo=-1,outF=sys.stdout,idCol=0,gainCol=1,\n outDelim=','):\n \"\"\"\n gains should be a sequence of sequences. The idCol entry of each\n sub-sequence should be a catalog ID. _ProcessGainsData()_ provides\n suitable input.\n\n \"\"\"\n if nToDo < 0:\n nToDo = len(gains)\n for i in range(nToDo):\n id = int(gains[i][idCol])\n gain = float(gains[i][gainCol])\n descr = catalog.GetFragDescription(id)\n if descr:\n outF.write('%s\\n'%(outDelim.join((str(id),descr,str(gain)))))\n\ndef SupplierFromDetails(details):\n from rdkit.VLib.NodeLib.DbMolSupply import DbMolSupplyNode\n from rdkit.VLib.NodeLib.SmilesSupply import SmilesSupplyNode\n\n if details.dbName:\n conn = DbConnect(details.dbName,details.tableName)\n suppl = DbMolSupplyNode(conn.GetData())\n else:\n suppl = SmilesSupplyNode(details.inFileName,delim=details.delim,\n nameColumn=details.nameCol,\n smilesColumn=details.smiCol,\n titleLine=details.hasTitle)\n if type(details.actCol)==types.IntType:\n suppl.reset()\n m = next(suppl)\n actName = m.GetPropNames()[details.actCol]\n details.actCol = actName\n if type(details.nameCol)==types.IntType:\n suppl.reset()\n m = next(suppl)\n nameName = m.GetPropNames()[details.nameCol]\n details.nameCol = nameName\n suppl.reset()\n if type(details.actCol)==types.IntType:\n suppl.reset()\n m = next(suppl)\n actName = m.GetPropNames()[details.actCol]\n details.actCol = actName\n if type(details.nameCol)==types.IntType:\n suppl.reset()\n m = next(suppl)\n nameName = m.GetPropNames()[details.nameCol]\n details.nameCol = nameName\n suppl.reset()\n return suppl\n\n\ndef Usage():\n print(\"This is BuildFragmentCatalog version %s\"%(__VERSION_STRING))\n print('usage error')\n #print(__doc__)\n sys.exit(-1)\n\nclass RunDetails(object):\n numMols=-1\n doBuild=0\n doSigs=0\n doScore=0\n doGains=0\n doDetails=0\n catalogName=None\n onBitsName=None\n scoresName=None\n gainsName=None\n dbName=''\n tableName=None\n detailsName=None\n inFileName=None\n fpName=None\n minPath=2\n maxPath=6\n smiCol=1\n actCol=-1\n nameCol=-1\n hasTitle=1\n nActs = 2\n nBits=-1\n delim=','\n biasList=None\n topN=-1\n \ndef ParseArgs(details):\n import getopt\n try:\n args,extras = getopt.getopt(sys.argv[1:],'n:d:cst',\n ['catalog=','onbits=',\n 'scoresFile=','gainsFile=','detailsFile=','fpFile=',\n 'minPath=','maxPath=','smiCol=','actCol=','nameCol=','nActs=',\n 'nBits=','biasList=','topN=',\n 'build','sigs','gains','details','score','noTitle'])\n except:\n sys.stderr.write('Error parsing command line:\\n')\n import traceback\n traceback.print_exc()\n Usage()\n for arg,val in args:\n if arg=='-n':\n details.numMols=int(val)\n elif arg=='-c':\n details.delim=','\n elif arg=='-s':\n details.delim=' '\n elif arg=='-t':\n details.delim='\\t'\n elif arg=='-d':\n details.dbName=val\n elif arg=='--build':\n details.doBuild=1\n elif arg=='--score':\n details.doScore=1\n elif arg=='--gains':\n details.doGains=1\n elif arg=='--sigs':\n details.doSigs=1\n elif arg=='-details':\n details.doDetails=1\n elif arg=='--catalog':\n details.catalogName=val\n elif arg=='--onbits':\n details.onBitsName=val\n elif arg=='--scoresFile':\n details.scoresName=val\n elif arg=='--gainsFile':\n details.gainsName=val\n elif arg=='--detailsFile':\n details.detailsName=val\n elif arg=='--fpFile':\n details.fpName=val\n elif arg=='--minPath':\n details.minPath=int(val)\n elif arg=='--maxPath':\n details.maxPath=int(val)\n elif arg=='--smiCol':\n try:\n details.smiCol=int(val)\n except ValueError:\n details.smiCol=val\n elif arg=='--actCol':\n try:\n details.actCol=int(val)\n except ValueError:\n details.actCol=val\n elif arg=='--nameCol':\n try:\n details.nameCol=int(val)\n except ValueError:\n details.nameCol=val\n elif arg=='--nActs':\n details.nActs=int(val)\n elif arg=='--nBits':\n details.nBits=int(val)\n elif arg=='--noTitle':\n details.hasTitle=0\n elif arg=='--biasList':\n details.biasList=tuple(eval(val))\n elif arg=='--topN':\n details.topN=int(val)\n elif arg=='-h':\n Usage()\n sys.exit(0)\n else:\n Usage()\n if len(extras):\n if details.dbName:\n details.tableName=extras[0]\n else:\n details.inFileName = extras[0]\n else:\n Usage()\nif __name__=='__main__':\n import time\n details = RunDetails()\n ParseArgs(details)\n from io import StringIO\n suppl = SupplierFromDetails(details)\n\n cat = None\n obls = None\n if details.doBuild:\n if not suppl:\n message(\"We require inData to generate a catalog\\n\")\n sys.exit(-2)\n message(\"Building catalog\\n\")\n t1 = time.time()\n cat = BuildCatalog(suppl,maxPts=details.numMols,\n minPath=details.minPath,maxPath=details.maxPath)\n t2 = time.time()\n message(\"\\tThat took %.2f seconds.\\n\"%(t2-t1))\n if details.catalogName:\n message(\"Dumping catalog data\\n\")\n cPickle.dump(cat,open(details.catalogName,'wb+'))\n elif details.catalogName:\n message(\"Loading catalog\\n\")\n cat = cPickle.load(open(details.catalogName,'rb'))\n if details.onBitsName:\n try:\n obls = cPickle.load(open(details.onBitsName,'rb'))\n except:\n obls = None\n else:\n if len(obls)<(inD.count('\\n')-1):\n obls = None\n scores = None\n if details.doScore:\n if not suppl:\n message(\"We require inData to score molecules\\n\")\n sys.exit(-2)\n if not cat:\n message(\"We require a catalog to score molecules\\n\")\n sys.exit(-2)\n message(\"Scoring compounds\\n\")\n if not obls or len(obls)<details.numMols:\n scores,obls = ScoreMolecules(suppl,cat,maxPts=details.numMols,\n actName=details.actCol,\n nActs=details.nActs)\n if details.scoresName:\n cPickle.dump(scores,open(details.scoresName,'wb+'))\n if details.onBitsName:\n cPickle.dump(obls,open(details.onBitsName,'wb+'))\n else:\n scores = ScoreFromLists(obls,suppl,cat,maxPts=details.numMols,\n actName=details.actCol,\n nActs=details.nActs)\n elif details.scoresName:\n scores = cPickle.load(open(details.scoresName,'rb'))\n \n if details.fpName and os.path.exists(details.fpName) and not details.doSigs:\n message(\"Reading fingerprints from file.\\n\")\n fps = cPickle.load(open(details.fpName,'rb'))\n else:\n fps = []\n gains = None\n if details.doGains:\n if not suppl:\n message(\"We require inData to calculate gains\\n\")\n sys.exit(-2)\n if not (cat or fps):\n message(\"We require either a catalog or fingerprints to calculate gains\\n\")\n sys.exit(-2)\n message(\"Calculating Gains\\n\")\n t1 = time.time()\n if details.fpName:\n collectFps=1\n else:\n collectFps=0\n if not fps:\n gains,fps = CalcGains(suppl,cat,topN=details.topN,actName=details.actCol,\n nActs=details.nActs,biasList=details.biasList,\n collectFps=collectFps)\n if details.fpName:\n message(\"Writing fingerprint file.\\n\")\n tmpF=open(details.fpName,'wb+')\n cPickle.dump(fps,tmpF,1)\n tmpF.close()\n else:\n gains = CalcGainsFromFps(suppl,fps,topN=details.topN,actName=details.actCol,\n nActs=details.nActs,biasList=details.biasList)\n t2=time.time()\n message(\"\\tThat took %.2f seconds.\\n\"%(t2-t1))\n if details.gainsName:\n outF = open(details.gainsName,'w+')\n OutputGainsData(outF,gains,cat,nActs=details.nActs)\n else:\n if details.gainsName:\n inF = open(details.gainsName,'r')\n gains = ProcessGainsData(inF)\n\n \n if details.doDetails:\n if not cat:\n message(\"We require a catalog to get details\\n\")\n sys.exit(-2)\n if not gains:\n message(\"We require gains data to get details\\n\")\n sys.exit(-2)\n io = StringIO()\n io.write('id,SMILES,gain\\n')\n ShowDetails(cat,gains,nToDo=details.nBits,outF=io)\n if details.detailsName:\n open(details.detailsName,'w+').write(io.getvalue())\n else:\n sys.stderr.write(io.getvalue())\n \n" ]
[ [ "numpy.zeros" ] ]
divshacker/qiskit-nature
[ "08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb" ]
[ "qiskit_nature/circuit/library/initial_states/vscf.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Initial state for vibrational modes.\"\"\"\n\nfrom typing import List, Optional\n\nimport logging\n\nimport numpy as np\n\nfrom qiskit import QuantumRegister, QuantumCircuit\nfrom qiskit.opflow import PauliSumOp\nfrom qiskit_nature.mappers.second_quantization import DirectMapper\nfrom qiskit_nature.converters.second_quantization import QubitConverter\nfrom qiskit_nature.operators.second_quantization import VibrationalOp\n\nlogger = logging.getLogger(__name__)\n\n\nclass VSCF(QuantumCircuit):\n r\"\"\"Initial state for vibrational modes.\n\n Creates an occupation number vector as defined in [1].\n As example, for 2 modes with 4 modals per mode it creates: :math:`|1000 1000\\rangle`.\n\n References:\n\n [1] Ollitrault Pauline J., Chemical science 11 (2020): 6842-6855.\n \"\"\"\n\n def __init__(\n self,\n num_modals: List[int],\n qubit_converter: Optional[QubitConverter] = None,\n ) -> None:\n \"\"\"\n Args:\n num_modals: Is a list defining the number of modals per mode. E.g. for a 3 modes system\n with 4 modals per mode num_modals = [4,4,4]\n qubit_converter: a QubitConverter instance. This argument is currently being ignored\n because only a single use-case is supported at the time of release:\n that of the :class:`DirectMapper`. However, for future-compatibility of\n this functions signature, the argument has already been inserted.\n \"\"\"\n # get the bitstring encoding initial state\n bitstr = vscf_bitstring(num_modals)\n\n # encode the bitstring in a `VibrationalOp`\n label = [\"+\" if bit else \"I\" for bit in bitstr]\n bitstr_op = VibrationalOp(\"\".join(label), num_modes=len(num_modals), num_modals=num_modals)\n\n # map the `VibrationalOp` to a qubit operator\n if qubit_converter is not None:\n logger.warning(\n \"The only supported `QubitConverter` is one with a `DirectMapper` as the mapper \"\n \"instance. However you specified %s as an input, which will be ignored until more \"\n \"variants will be supported.\",\n str(qubit_converter),\n )\n qubit_converter = QubitConverter(DirectMapper())\n qubit_op: PauliSumOp = qubit_converter.convert_match(bitstr_op)\n\n # construct the circuit\n qr = QuantumRegister(qubit_op.num_qubits, \"q\")\n super().__init__(qr, name=\"VSCF\")\n\n # add gates in the right positions\n for i, bit in enumerate(qubit_op.primitive.table.X[0]):\n if bit:\n self.x(i)\n\n\ndef vscf_bitstring(num_modals: List[int]) -> List[bool]:\n \"\"\"Compute the bitstring representing the VSCF initial state based on the modals per mode.\n\n Args:\n num_modals: Is a list defining the number of modals per mode. E.g. for a 3 modes system\n with 4 modals per mode num_modals = [4,4,4].\n\n Returns:\n The bitstring representing the state of the VSCF state as array of bools.\n \"\"\"\n num_qubits = sum(num_modals)\n bitstr = np.zeros(num_qubits, bool)\n count = 0\n for modal in num_modals:\n bitstr[count] = True\n count += modal\n\n return bitstr.tolist()\n" ]
[ [ "numpy.zeros" ] ]
pr-shukla/maddpg-keras
[ "8e3d1501f78ac2b78ee2c7053dc9299862386c17" ]
[ "noise.py" ]
[ "import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom tensorflow.keras.models import load_model\r\n\r\nclass OUActionNoise:\r\n def __init__(self, mean, std_deviation, theta=0.15, dt=0.4, x_initial=None):\r\n self.theta = theta\r\n self.mean = mean\r\n self.std_dev = std_deviation\r\n self.dt = dt\r\n self.x_initial = x_initial\r\n self.reset()\r\n\r\n def __call__(self):\r\n # Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.\r\n x = (\r\n self.x_prev\r\n + self.theta * (self.mean - self.x_prev) * self.dt\r\n + self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)\r\n )\r\n # Store x into x_prev\r\n # Makes next noise dependent on current one\r\n self.x_prev = x\r\n return x\r\n\r\n def reset(self):\r\n if self.x_initial is not None:\r\n self.x_prev = self.x_initial\r\n else:\r\n self.x_prev = np.zeros_like(self.mean)\r\n " ]
[ [ "numpy.random.normal", "numpy.zeros_like", "numpy.sqrt" ] ]
shayokdutta/nelpy_modified
[ "8f3bd505beed570bfe917ed0a7f1d8c13f31b69a" ]
[ "nelpy/filtering.py" ]
[ "#encoding : utf-8\n\"\"\"This module implements filtering functionailty for core nelpy objects\n\"\"\"\n\n# NOTE: I found a really good website + implementation of doing out-of-core\n# chunked signal filtering in Python that was scalable and efficient,\n# but I have since lost the url (I mad a note, but can't find the note).\n# Frustrating as that is, here are some other pages to check out:\n#\n# http://codereview.stackexchange.com/questions/88885/efficiently-filter-a-large-100gb-csv-file-v3\n#\n# https://www.airpair.com/python/posts/top-mistakes-python-big-data-analytics (see cythonmagic!)\n#\n# https://github.com/kghose/neurapy/blob/master/neurapy/signal/continuous.py (FFB!)\n#\n# https://pypi.python.org/pypi/out-of-core-fft/1.0\n#\n# http://matthewrocklin.com/blog/work/2015/02/17/Towards-OOC-Bag\n\n__all__ = ['butter_bandpass_filter',\n 'butter_lowpass_filtfilt',]\n\nimport numpy as np\nimport warnings\n\nfrom scipy.signal import butter, lfilter, filtfilt, firwin\nfrom math import log10, ceil\n\nfrom .core import AnalogSignalArray\n\ndef butter_bandpass(lowcut, highcut, fs, order=5):\n \"\"\"Returns a bandpass butterworth filter.\"\"\"\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a\n\ndef butter_bandpass_filter(data, *, lowcut, highcut, fs, order=5):\n \"\"\"Band filter data using a butterworth filter.\"\"\"\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n\ndef butter_lowpass(cutoff, fs, order=5):\n \"\"\"Returns a lowpass butterworth filter.\"\"\"\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filtfilt(data, *, cutoff, fs, order=5):\n \"\"\"Lowpass filter data using a zero-phase filt-filt butterworth\n filter.\n\n Performs zero-phase digital filtering by processing the input data\n in both the forward and reverse directions.\n \"\"\"\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = filtfilt(b, a, data, padlen=150)\n return y\n\ndef bandpass_filter(data, lowcut=None, highcut=None, *, numtaps=None,\n fs=None):\n \"\"\"Band filter data using a zero phase FIR filter (filtfilt).\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n lowcut : float, optional (default 1 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 600 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default 25)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n\n Returns\n -------\n filtered : same type as data\n \"\"\"\n\n if numtaps is None:\n numtaps = 25\n if lowcut is None:\n lowcut = 1\n if highcut is None:\n highcut = 600\n\n if isinstance(data, (np.ndarray, list)):\n if fs is None:\n raise ValueError(\"sampling frequency must be specified!\")\n # Generate filter for detection\n b = firwin(numtaps=numtaps,\n cutoff=[lowcut/(fs/2), highcut/(fs/2)],\n pass_zero=False)\n # Filter raw data to get ripple data\n ripple_data = filtfilt(b, 1, data)\n return ripple_data\n elif isinstance(data, AnalogSignalArray):\n if fs is None:\n fs = data.fs\n warnings.warn(\"no sampling frequency provided,\"\n \" using fs={} Hz from AnalogSignalArray\".format(fs))\n # Generate filter for detection\n b = firwin(numtaps=numtaps,\n cutoff=[lowcut/(fs/2), highcut/(fs/2)],\n pass_zero=False)\n # Filter raw data to get ripple data\n ripple_data = filtfilt(b,1,data.ydata)\n # Return a copy of the AnalogSignalArray with the filtered data\n filtered_analogsignalarray = data.copy()\n filtered_analogsignalarray._ydata = ripple_data\n return filtered_analogsignalarray\n else:\n raise TypeError(\n \"Unknown data type {} to filter.\".format(str(type(data))))\n\ndef ripple_band_filter(data, lowcut=None, highcut=None, *, numtaps=None,\n fs=None, verbose=False):\n \"\"\"Filter data to the ripple band (default 150--250 Hz).\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n lowcut : float, optional (default 150 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 250 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default automatically determined)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n\n Returns\n -------\n filtered : same type as data\n \"\"\"\n if numtaps is None:\n if isinstance(data, (np.ndarray, list)):\n if fs is None:\n raise ValueError(\"sampling frequency must be specified!\")\n elif isinstance(data, AnalogSignalArray):\n if fs is None:\n fs = data.fs\n numtaps = approx_number_of_taps(fs=fs,\n delta_f=20,\n delta1=10e-2,\n delta2=10e-2)\n if verbose:\n print(\"Filtering with {} taps.\".format(numtaps))\n if lowcut is None:\n lowcut = 150\n if highcut is None:\n highcut = 250\n return bandpass_filter(data,\n lowcut=lowcut,\n highcut=highcut,\n numtaps=numtaps,\n fs=fs)\n\ndef approx_number_of_taps(fs, delta_f, delta1=None, delta2=None, verbose=False):\n \"\"\"Docstring goes here.\n http://dsp.stackexchange.com/questions/31066/how-many-taps-does-an-fir-filter-need\n\n Parameters\n ----------\n fs : float\n Sampling frequency (Hz)\n delta_f : float\n transition width; difference between end of pass band and start\n of stop band, in Hz\n delta1 : float, optional (default is 1% ==> 0.01 ==> 10e-3)\n ripple in passband\n delta2 : float, optional (default is -30 dB ==> 10e-3)\n suppression in the stopband\n\n Returns\n -------\n numtaps : int\n number of FIR filter taps\n \"\"\"\n if delta1 is None:\n delta1 = 10e-3\n if delta2 is None:\n delta2 = 10e-3\n\n numtaps = ceil(2*log10(1/(10*delta1*delta2))*fs/delta_f/3)\n if verbose:\n print(\"Number of filter taps: {}\".format(numtaps))\n return numtaps\n\ndef delta_band_filter(data, lowcut=None, highcut=None, *, numtaps=None,\n fs=None, verbose=False):\n \"\"\"Filter data to the rodent delta band (default 1--4 Hz).\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n lowcut : float, optional (default 1 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 4 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default determined automatically)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n\n Returns\n -------\n filtered : same type as data\n \"\"\"\n\n if numtaps is None:\n if isinstance(data, (np.ndarray, list)):\n if fs is None:\n raise ValueError(\"sampling frequency must be specified!\")\n elif isinstance(data, AnalogSignalArray):\n if fs is None:\n fs = data.fs\n numtaps = approx_number_of_taps(fs=fs,\n delta_f=1,\n delta1=10e-3,\n delta2=10e-3)\n if verbose:\n print(\"Filtering with {} taps.\".format(numtaps))\n\n if lowcut is None:\n lowcut = 1\n if highcut is None:\n highcut = 4\n return bandpass_filter(data,\n lowcut=lowcut,\n highcut=highcut,\n numtaps=numtaps,\n fs=fs)\n\ndef theta_band_filter(data, lowcut=None, highcut=None, *, numtaps=None,\n fs=None, verbose=False):\n \"\"\"Filter data to the rodent theta band (default 6--12 Hz).\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n lowcut : float, optional (default 6 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 12 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default determined automatically)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n\n Returns\n -------\n filtered : same type as data\n \"\"\"\n\n if numtaps is None:\n if isinstance(data, (np.ndarray, list)):\n if fs is None:\n raise ValueError(\"sampling frequency must be specified!\")\n elif isinstance(data, AnalogSignalArray):\n if fs is None:\n fs = data.fs\n numtaps = approx_number_of_taps(fs=fs,\n delta_f=1,\n delta1=10e-3,\n delta2=10e-3)\n if verbose:\n print(\"Filtering with {} taps.\".format(numtaps))\n\n if lowcut is None:\n lowcut = 6\n if highcut is None:\n highcut = 12\n return bandpass_filter(data,\n lowcut=lowcut,\n highcut=highcut,\n numtaps=numtaps,\n fs=fs)\n\ndef gamma_band_filter(data, lowcut=None, highcut=None, *, numtaps=None,\n fs=None, verbose=False):\n \"\"\"Filter data to the rodent gamma band (default 32--100 Hz).\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n lowcut : float, optional (default 32 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 100 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default determined automatically)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n\n Returns\n -------\n filtered : same type as data\n \"\"\"\n\n if numtaps is None:\n if isinstance(data, (np.ndarray, list)):\n if fs is None:\n raise ValueError(\"sampling frequency must be specified!\")\n elif isinstance(data, AnalogSignalArray):\n if fs is None:\n fs = data.fs\n numtaps = approx_number_of_taps(fs=fs,\n delta_f=1,\n delta1=10e-3,\n delta2=10e-3)\n if verbose:\n print(\"Filtering with {} taps.\".format(numtaps))\n\n if lowcut is None:\n lowcut = 32\n if highcut is None:\n highcut = 100\n return bandpass_filter(data,\n lowcut=lowcut,\n highcut=highcut,\n numtaps=numtaps,\n fs=fs)\n\ndef filter_lfp(data, band=None, *, lowcut=None, highcut=None,\n numtaps=None, fs=None, verbose=False):\n \"\"\"Filter data with a zero phase FIR filtfilt filter.\n\n This is a convenience wrapper function for\n ripple_band_filter()\n theta_band_filter()\n delta_band_filter()\n ...\n\n Parameters\n ----------\n data : AnalogSignalArray, ndarray, or list\n band : string, optional\n One of ['ripple', 'theta', 'delta', ...]\n Defaults to 'ripple'.\n lowcut : float, optional (default 6 Hz)\n Lower cut-off frequency\n highcut : float, optional (default 12 Hz)\n Upper cut-off frequency\n numtaps : int, optional (default determined automatically)\n Number of filter taps\n fs : float, optional if AnalogSignalArray is passed\n Sampling frequency (Hz)\n verbose : bool, optional\n\n Returns\n -------\n filtered : same type as data.\n \"\"\"\n supported_bands = ['ripple', 'delta', 'theta', 'gamma']\n\n\n # Delta wave – (0.1 – 3 Hz)\n # Theta wave – (4 – 7 Hz)\n # Alpha wave – (8 – 15 Hz)\n # Mu wave – (7.5 – 12.5 Hz)\n # SMR wave – (12.5 – 15.5 Hz)\n # Beta wave – (16 – 31 Hz)\n # Gamma wave – (32 – 100 Hz)\n\n # slow gamma : 10–50 Hz\n # hippocampal theta : 6–10 Hz\n # motionless but alert theta : 6–7 Hz\n # cat & rabbit theta: 4-6 Hz meow! hop!\n\n if band is None:\n band = 'ripple'\n else:\n band = band.strip().lower()\n\n if band not in supported_bands:\n raise NotImplementedError(\"filter_lfp not supported or not yet implemented for band '{}'\".format(str(band)))\n\n kwargs = {'data' : data,\n 'lowcut' : lowcut,\n 'highcut' : highcut,\n 'numtaps' : numtaps,\n 'fs' : fs,\n 'verbose' : verbose}\n\n if band == 'ripple':\n return ripple_band_filter(**kwargs)\n if band == 'theta':\n return theta_band_filter(**kwargs)\n if band == 'delta':\n return delta_band_filter(**kwargs)\n if band == 'gamma':\n return gamma_band_filter(**kwargs)\n\n return 0\n\n########################################################################\n# uncurated below this line!\n########################################################################\n\n# taken from https://github.com/kghose/neurapy/blob/master/neurapy/signal/continuous.py\n\n\"\"\"Some methods for dealing with continuous data. We assume that the original data is in files and that they are\nannoyingly large. So all the methods here work on buffered input, using memory maps.\n\"\"\"\nimport pylab\nfrom scipy.signal import filtfilt, iirdesign\n\n#Some useful presets for loading continuous data dumped from the Neuralynx system\nlynxlfp = {\n 'fmt': 'i',\n 'fs' : 32556,\n 'fl' : 5,\n 'fh' : 100,\n 'gpass' : 0.1,\n 'gstop' : 15,\n 'buffer_len' : 100000,\n 'overlap_len': 100,\n 'max_len': -1\n}\n\nlynxspike = {\n 'fmt': 'i',\n 'fs' : 32556,\n 'fl' : 500,\n 'fh' : 8000,\n 'gpass' : 0.1,\n 'gstop' : 15,\n 'buffer_len' : 100000,\n 'overlap_len': 100,\n 'max_len': -1\n}\n\"\"\"Use these presets as follows\nfrom neurapy.utility import continuous as cc\ny,b,a = cc.butterfilt('chan_000.raw', 'test.raw', **cc.lynxlfp)\"\"\"\n\n\ndef butterfilt(finname, foutname, fmt, fs, fl=5.0, fh=100.0, gpass=1.0, gstop=30.0, ftype='butter', buffer_len=100000, overlap_len=100, max_len=-1):\n \"\"\"Given sampling frequency, low and high pass frequencies design a butterworth filter and filter our data with it.\"\"\"\n fso2 = fs/2.0\n wp = [fl/fso2, fh/fso2]\n ws = [0.8*fl/fso2,1.4*fh/fso2]\n import pdb; pdb.set_trace()\n b, a = iirdesign(wp, ws, gpass=gpass, gstop=gstop, ftype=ftype, output='ba')\n y = filtfiltlong(finname, foutname, fmt, b, a, buffer_len, overlap_len, max_len)\n return y, b, a\n\ndef filtfiltlong(finname, foutname, fmt, b, a, buffer_len=100000, overlap_len=100, max_len=-1):\n \"\"\"Use memmap and chunking to filter continuous data.\n Inputs:\n finname -\n foutname -\n fmt - data format eg 'i'\n b,a - filter coefficients\n buffer_len - how much data to process at a time\n overlap_len - how much data do we add to the end of each chunk to smooth out filter transients\n max_len - how many samples to process. If set to -1, processes the whole file\n Outputs:\n y - The memmapped array pointing to the written file\n Notes on algorithm:\n 1. The arrays are memmapped, so we let pylab (numpy) take care of handling large arrays\n 2. The filtering is done in chunks:\n Chunking details:\n |<------- b1 ------->||<------- b2 ------->|\n -----[------*--------------{-----*------]--------------*------}----------\n |<-------------- c1 -------------->|\n |<-------------- c2 -------------->|\n From the array of data we cut out contiguous buffers (b1,b2,...) and to each buffer we add some extra overlap to\n make chunks (c1,c2). The overlap helps to remove the transients from the filtering which would otherwise appear at\n each buffer boundary.\n \"\"\"\n x = pylab.memmap(finname, dtype=fmt, mode='r')\n if max_len == -1:\n max_len = x.size\n y = pylab.memmap(foutname, dtype=fmt, mode='w+', shape=max_len)\n\n for buff_st_idx in xrange(0, max_len, buffer_len):\n chk_st_idx = max(0, buff_st_idx - overlap_len)\n buff_nd_idx = min(max_len, buff_st_idx + buffer_len)\n chk_nd_idx = min(x.size, buff_nd_idx + overlap_len)\n rel_st_idx = buff_st_idx - chk_st_idx\n rel_nd_idx = buff_nd_idx - chk_st_idx\n this_y_chk = filtfilt(b, a, x[chk_st_idx:chk_nd_idx])\n y[buff_st_idx:buff_nd_idx] = this_y_chk[rel_st_idx:rel_nd_idx]\n\n return y" ]
[ [ "scipy.signal.firwin", "scipy.signal.iirdesign", "scipy.signal.butter", "scipy.signal.filtfilt", "scipy.signal.lfilter" ] ]
bperez7/moments_models
[ "d83e67b5d85f611ebf8dc10bc0d7569c962a37c2" ]
[ "model_zoo/twod_models/group_mobilenet.py" ]
[ "import torch\nfrom torch import nn\nfrom functools import partial\nimport torch.utils.model_zoo as model_zoo\n#from .utils import load_state_dict_from_url\nfrom .temporal_modeling import temporal_modeling_module\nfrom model_zoo.inflate_from_2d_model import convert_rgb_model_to_group\nfrom inspect import signature\n\n\n__all__ = ['GroupMobileNetV2', 'group_mobilenet_v2']\n\n\nmodel_urls = {\n 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',\n}\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidualGroup(nn.Module):\n def __init__(self, inp, oup, num_frames, stride, expand_ratio, temporal_module = None):\n super(InvertedResidualGroup, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, groups=num_frames))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False, groups=num_frames),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n self.tam = temporal_module(duration=num_frames, channels=inp) \\\n if temporal_module is not None else None\n\n\n def forward(self, x):\n if self.use_res_connect:\n identity = x\n if self.tam:\n x = self.tam(x)\n x = self.conv(x)\n return identity + x\n # return x + self.conv(x)\n else:\n if self.tam:\n x = self.tam(x)\n return self.conv(x)\n\n\nclass GroupMobileNetV2(nn.Module):\n def __init__(self,\n num_frames,\n num_classes=1000,\n width_mult=1.0,\n inverted_residual_setting=None,\n round_nearest=8,\n block=None, \n dropout=0.5,\n without_t_stride=False, \n temporal_module=None,\n pooling_method='max'\n ):\n \"\"\"\n MobileNet V2 main class\n Args:\n num_frames (int): Number of frames\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n block: Module specifying inverted residual building block for mobilenet\n \"\"\"\n super(GroupMobileNetV2, self).__init__()\n\n if block is None:\n block = InvertedResidualGroup\n input_channel = 32\n last_channel = 1280\n\n if inverted_residual_setting is None:\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # only check the first element, assuming user knows t,c,n,s are required\n if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:\n raise ValueError(\"inverted_residual_setting should be non-empty \"\n \"or a 4-element list, got {}\".format(inverted_residual_setting))\n\n self.orig_num_frames = num_frames\n self.num_frames = num_frames\n self.temporal_module = temporal_module\n self.width_mult = width_mult\n self.without_t_stride = without_t_stride\n self.pooling_method = pooling_method.lower()\n\n # building first layer\n input_channel = _make_divisible(input_channel * width_mult, round_nearest)\n self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)\n features = [ConvBNReLU(3*num_frames, input_channel*num_frames, stride=2, groups=num_frames)]\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel*num_frames, output_channel*num_frames, num_frames, stride, expand_ratio=t, temporal_module=temporal_module))\n input_channel = output_channel\n # building last several layers\n features.append(ConvBNReLU(input_channel*num_frames, self.last_channel*num_frames, kernel_size=1, groups=num_frames))\n # make it nn.Sequential\n self.features = nn.Sequential(*features)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(dropout)\n self.fc = nn.Conv1d(self.last_channel*num_frames, num_classes*num_frames, kernel_size=1, groups=num_frames, bias=True)\n # self.fc = nn.Linear(self.last_channel, num_classes)\n\n # building classifier\n #self.action_classifier = nn.Sequential(\n # nn.Dropout(dropout),\n # nn.Linear(self.last_channel, num_classes),\n #)\n\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear) or isinstance(m, nn.Conv1d):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)\n\n def _forward_impl(self, x):\n batch_size, c_t, h, w = x.shape\n # This exists since TorchScript doesn't support inheritance, so the superclass method\n # (this one) needs to have a name other than `forward` that can be accessed in a subclass\n x = self.features(x)\n # Cannot use \"squeeze\" as batch-size can be 1 => must use reshape with x.shape[0]\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.dropout(x)\n\n # group classifiers\n x = x.view(batch_size, -1, 1)\n x = self.fc(x)\n\n n_t, ct, _ = x.shape\n out = x.view(n_t, self.orig_num_frames, -1)\n\n '''\n x = x.view(batch_size * self.orig_num_frames, -1)\n x = self.fc(x)\n\n n_t, c = x.shape\n out = x.view(batch_size, -1, c)\n '''\n \n # average the prediction from all frames\n out = torch.mean(out, dim=1)\n\n #x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)\n #x = self.classifier(x)\n return out\n\n def forward(self, x):\n return self._forward_impl(x)\n\n def mean(self, modality='rgb'):\n return [0.485, 0.456, 0.406] if modality == 'rgb' else [0.5]\n\n def std(self, modality='rgb'):\n return [0.229, 0.224, 0.225] if modality == 'rgb' else [np.mean([0.229, 0.224, 0.225])]\n\n @property\n def network_name(self):\n name = ''\n if self.temporal_module is not None:\n param = signature(self.temporal_module).parameters\n temporal_module = str(param['name']).split(\"=\")[-1][1:-1]\n blending_frames = str(param['blending_frames']).split(\"=\")[-1]\n blending_method = str(param['blending_method']).split(\"=\")[-1][1:-1]\n dw_conv = True if str(param['dw_conv']).split(\"=\")[-1] == 'True' else False\n name += \"{}-b{}-{}{}-\".format(temporal_module, blending_frames,\n blending_method,\n \"\" if dw_conv else \"-allc\")\n name += 'group-mobilenetV2-{}'.format(int(self.width_mult*100))\n print (name)\n if not self.without_t_stride:\n name += \"-ts-{}\".format(self.pooling_method)\n\n return name\n\ndef group_mobilenet_v2(width_mult, num_classes, without_t_stride, groups, temporal_module_name,\n dw_conv, blending_frames, blending_method, dropout, pooling_method, input_channels, imagenet_pretrained=True, **kwargs):\n\n temporal_module = partial(temporal_modeling_module, name=temporal_module_name,\n dw_conv=dw_conv,\n blending_frames=blending_frames,\n blending_method=blending_method) if temporal_module_name is not None \\\n else None\n\n model = GroupMobileNetV2(num_frames=groups, \n num_classes=num_classes,\n width_mult=width_mult,\n inverted_residual_setting = None,\n round_nearest = 8,\n block = None,\n dropout = dropout,\n without_t_stride = without_t_stride,\n temporal_module=temporal_module,\n pooling_method = pooling_method)\n\n# for key, value in model.state_dict().items():\n# if key == 'features.1.conv.0.0.weight':\n# print (key, value.shape)\n\n if imagenet_pretrained:\n #state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], map_location='cpu')\n state_dict = model_zoo.load_url(model_urls['mobilenet_v2'], map_location='cpu')\n# for key, value in state_dict.items():\n# if key == 'features.1.conv.0.0.weight':\n# print (key, value.shape)\n# state_dict.pop('fc.weight', None)\n# state_dict.pop('fc.bias', None)\n new_state_dict = convert_rgb_model_to_group(state_dict, model.state_dict(), groups)\n model.load_state_dict(new_state_dict, strict=False)\n \n\n # for name, param in model.named_parameters():\n # print (name, param.data.shape)\n #for key, value in state_dict.items():\n # print (key, value)\n return model\n\n'''\ndef mobilenet_v2(pretrained=False, progress=True, **kwargs):\n \"\"\"\n Constructs a MobileNetV2 architecture from\n `\"MobileNetV2: Inverted Residuals and Linear Bottlenecks\" <https://arxiv.org/abs/1801.04381>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = MobileNetV2(**kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n'''\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv1d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.utils.model_zoo.load_url", "torch.nn.init.ones_", "torch.nn.ReLU6", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.zeros_", "torch.mean" ] ]
shirou/marketstore_value_exporter
[ "8c67c584562d8c4cf9d5b2d94ab51a8953e82b64" ]
[ "main.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport datetime\nimport logging\nimport os\nimport signal\nimport sys\nimport time\n\nimport pandas as pd\nimport pymarketstore as pymkts\nfrom prometheus_client import Gauge, start_http_server\nimport trading_calendars as tc\n\nlogging.basicConfig(\n level=logging.ERROR,\n format='{\"level\": \"%(levelname)s\", \"time\": \"%(asctime)s\", \"msg\": \"%(message)s\"}',\n)\nlogger = logging.getLogger(__name__)\n\n\nERROR_VALUE_OF_LATENCY = 9999 # return this value if can not find in the lookback time\n\n\ndef is_symbol_does_not_exist_error(e: Exception) -> bool:\n msgs = [\n \"Symbol not in catalog\",\n \"AttributeGroup not in catalog\",\n \"Timeframe not in catalog\",\n ]\n return any([msg in str(e) for msg in msgs])\n\n\ndef get_value(client, query: str, column: str, start_dt: datetime, end_dt: datetime):\n symbol, timeframe, attribute = query.split(\"/\")\n try:\n params = pymkts.Params(\n symbol, timeframe, attribute, limit=1, start=start_dt, end=end_dt\n )\n df = client.query(params).first().df()\n if df is None or df.empty: # there are no result\n return (0, ERROR_VALUE_OF_LATENCY)\n value = df.tail(1).get(column)\n if value is None:\n logger.error(\"column %s does not exists\", column)\n return (0, 0)\n latency = end_dt - df.index[-1]\n return (value, latency.total_seconds())\n except ConnectionError as e:\n logger.error(\"connection error\")\n except Exception as e:\n if is_symbol_does_not_exist_error(e):\n logger.error(\"symbol does not exists: %s\", query)\n else:\n logger.error(e)\n\n return (0, 0)\n\n\ndef run(args: argparse.Namespace):\n gauges_value = {}\n gauges_latency = {}\n gauge_avg = Gauge(f\"{args.prefix}_avg_latency\", \"avg latency\")\n for query in args.queries:\n # USDJPY/1Sec/TICK -> usdjpy_1sec_tick\n key = query.replace(\"/\", \"_\").replace(\"-\", \"_\").lower()\n gauges_value[query] = Gauge(\n f\"{args.prefix}_{key}_value\", \"value of {}\".format(query)\n )\n gauges_latency[query] = Gauge(\n f\"{args.prefix}_{key}_latency\", \"latency of {}\".format(query)\n )\n\n url = f\"http://{args.marketstore_host}:{args.marketstore_port}/rpc\"\n delta = datetime.timedelta(seconds=args.lookback)\n\n cal = None\n if args.market:\n cal = tc.get_calendar(\"XNYS\")\n\n while True:\n client = pymkts.Client(url)\n\n end_dt = pd.Timestamp.utcnow()\n start_dt = end_dt - delta\n\n holiday = False\n if cal and cal.is_session(end_dt) is False:\n holiday = True\n\n total = 0\n for query in args.queries:\n if holiday:\n value, latency = (0, 0)\n else:\n (value, latency) = get_value(\n client, query, args.column, start_dt, end_dt\n )\n gauges_value[query].set(value)\n gauges_latency[query].set(latency)\n total += latency\n\n gauge_avg.set(total / len(args.queries))\n time.sleep(args.interval)\n\n\ndef exit_handler(signum, frame) -> None:\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"marketstore_value_exporter\")\n parser.add_argument(\n \"--port\", type=int, default=os.environ.get(\"PORT\", 8000), help=\"prometheus port\"\n )\n parser.add_argument(\n \"--interval\",\n type=int,\n default=os.environ.get(\"INTERVAL\", 60),\n help=\"get value interval seconds\",\n )\n parser.add_argument(\n \"--lookback\",\n type=int,\n default=os.environ.get(\"LOOKBACK\", 3600),\n help=\"lookback window size(seconds) to search result\",\n )\n parser.add_argument(\n \"--marketstore-host\",\n type=str,\n default=os.environ.get(\"MARKETSTORE_HOST\", \"localhost\"),\n help=\"marketstore host\",\n )\n parser.add_argument(\n \"--marketstore-port\",\n type=int,\n default=os.environ.get(\"MARKETSTORE_PORT\", 5993),\n help=\"marketstore port\",\n )\n parser.add_argument(\n \"--prefix\",\n type=str,\n default=os.environ.get(\"PREFIX\", \"marketstore\"),\n help=\"prometheus key prefix\",\n )\n parser.add_argument(\n \"--column\",\n type=str,\n default=os.environ.get(\"COLUMN\", \"price\"),\n help=\"column name to get\",\n )\n parser.add_argument(\n \"--market\",\n type=str,\n default=os.environ.get(\"MARKET\", \"\"),\n help=\"market to set holidays\",\n )\n\n parser.add_argument(\n \"queries\",\n metavar=\"USDJPY/1Sec/TICK\",\n type=str,\n nargs=\"+\",\n help=\"list of marketstore query\",\n )\n\n args = parser.parse_args()\n signal.signal(signal.SIGTERM, exit_handler)\n\n start_http_server(8000)\n\n run(args)\n" ]
[ [ "pandas.Timestamp.utcnow" ] ]
jobsfan/pytorch
[ "221ae8e3673f8d2fbf0a58f40a30553c76084831" ]
[ "liuer/7.py" ]
[ "# 多维特征输入,目前还没搞懂啥意思,感觉y值不像是个分类,像是个回归\nimport numpy as np\nimport torch\n\nx_ = np.loadtxt('diabetes_data.csv.gz',delimiter=' ',dtype=np.float32)\ny_ = np.loadtxt('diabetes_target.csv.gz',delimiter=' ',dtype=np.float32)\ny_ = np.expand_dims(y_,axis=1)\n\nx_data = torch.from_numpy(x_)\ny_data = torch.from_numpy(y_)\n\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model,self).__init__()\n self.linear1 = torch.nn.Linear(10,8)\n self.linear2 = torch.nn.Linear(8,6)\n self.linear3 = torch.nn.Linear(6,4)\n self.linear4 = torch.nn.Linear(4,1)\n self.sigmoid = torch.nn.Sigmoid()\n\n def forward(self,x):\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n x = self.sigmoid(self.linear3(x))\n x = self.sigmoid(self.linear4(x))\n return x\n\nmodel = Model()\n\ncriterion = torch.nn.BCELoss(reduction='mean')\n\noptimizer = torch.optim.SGD(model.parameters(),lr=0.1)\n\nfor epoch in range(1000):\n y_pred = model(x_data)\n loss = criterion(y_pred,y_data)\n print(epoch,loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid", "torch.from_numpy", "numpy.loadtxt", "torch.nn.BCELoss", "numpy.expand_dims" ] ]
GeorgeMLP/deepfake-detection
[ "9038ef46cea0dc1cd65bf7dbf25ea276391692e7" ]
[ "Faces-HQ/DeepFake Detection Faces-HQ.py" ]
[ "import cv2\nimport numpy as np\nimport os\nimport radialProfile\nimport glob\nfrom matplotlib import pyplot as plt\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\n\n\n# uncomment following code to create the features on your own\n'''\npath = ['D:/Datasets/Faces-HQ/thispersondoesntexists_10K', 'D:/Datasets/Faces-HQ/100KFake_10K',\n 'D:/Datasets/Faces-HQ/Flickr-Faces-HQ_10K', 'D:/Datasets/Faces-HQ/celebA-HQ_10K']\nlabels = [1, 1, 0, 0]\nformat_file = ['jpg', 'jpg', 'jpg', 'jpg']\nepsilon = 1e-8\ndata = {}\n# number of samples from each dataset\nstop = 250\nnumber_iter = 4 * stop\npsd1D_total = np.zeros([number_iter, 722])\nlabel_total = np.zeros([number_iter])\niter_ = 0\nfor z in range(4):\n cont = 0\n psd1D_average_org = np.zeros(722)\n print(path[z])\n for filename in glob.glob(path[z] + \"/*.\" + format_file[z]):\n img = cv2.imread(filename, 0)\n f = np.fft.fft2(img)\n fshift = np.fft.fftshift(f)\n fshift += epsilon\n magnitude_spectrum = 20 * np.log(np.abs(fshift))\n # Calculate the azimuthally averaged 1D power spectrum\n psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)\n psd1D_total[iter_, :] = psd1D\n label_total[iter_] = labels[z]\n cont += 1\n iter_ += 1\n if cont >= stop:\n break\ndata[\"data\"] = psd1D_total\ndata[\"label\"] = label_total\noutput = open('dataset_freq_1000.pkl', 'wb')\npickle.dump(data, output)\noutput.close()\nprint(\"DATA Saved\")\n'''\n\n# load feature file\npkl_file = open('dataset_freq_1000.pkl', 'rb')\ndata = pickle.load(pkl_file)\npkl_file.close()\nX = data[\"data\"]\ny = data[\"label\"]\nplt.plot(y)\nplt.show() # show the label distribution of the dataset\n\nnum = int(X.shape[0] / 2)\nnum_feat = X.shape[1]\npsd1D_org_0 = np.zeros((num, num_feat))\npsd1D_org_1 = np.zeros((num, num_feat))\npsd1D_org_0_mean = np.zeros(num_feat)\npsd1D_org_0_std = np.zeros(num_feat)\npsd1D_org_1_mean = np.zeros(num_feat)\npsd1D_org_1_std = np.zeros(num_feat)\ncont_0 = 0\ncont_1 = 0\n\n# separate real and fake using the label\nfor x in range(X.shape[0]):\n if y[x] == 0:\n psd1D_org_0[cont_0, :] = X[x, :]\n cont_0 += 1\n elif y[x] == 1:\n psd1D_org_1[cont_1, :] = X[x, :]\n cont_1 += 1\n\n# compute statistics\nfor x in range(num_feat):\n psd1D_org_0_mean[x] = np.mean(psd1D_org_0[:, x])\n psd1D_org_0_std[x] = np.std(psd1D_org_0[:, x])\n psd1D_org_1_mean[x] = np.mean(psd1D_org_1[:, x])\n psd1D_org_1_std[x] = np.std(psd1D_org_1[:, x])\n\n# Plot\nx = np.arange(0, num_feat, 1)\nfig, ax = plt.subplots(figsize=(15, 9))\nax.plot(x, psd1D_org_0_mean, alpha=0.5, color='red', label='Real', linewidth=2.0)\nax.fill_between(x, psd1D_org_0_mean - psd1D_org_0_std, psd1D_org_0_mean + psd1D_org_0_std, color='red', alpha=0.2)\nax.plot(x, psd1D_org_1_mean, alpha=0.5, color='blue', label='Fake', linewidth=2.0)\nax.fill_between(x, psd1D_org_1_mean - psd1D_org_1_std, psd1D_org_1_mean + psd1D_org_1_std, color='blue', alpha=0.2)\nax.legend()\nplt.tick_params(axis='x', labelsize=20)\nplt.tick_params(axis='y', labelsize=20)\nax.legend(loc='best', prop={'size': 20})\nplt.xlabel(\"Spatial Frequency\", fontsize=20)\nplt.ylabel(\"Power Spectrum\", fontsize=20)\nplt.show()\n\nnum = 10\nLR = 0\nSVM = 0\nfor z in range(num):\n # read python dict back from the file\n pkl_file = open('dataset_freq_1000.pkl', 'rb')\n data = pickle.load(pkl_file)\n pkl_file.close()\n X = data[\"data\"]\n y = data[\"label\"]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n svclassifier = SVC(kernel='linear')\n svclassifier.fit(X_train, y_train)\n # print('Accuracy on test set: {:.3f}'.format(svclassifier.score(X_test, y_test)))\n logreg = LogisticRegression(solver='liblinear', max_iter=1000)\n logreg.fit(X_train, y_train)\n # print('Accuracy on test set: {:.3f}'.format(logreg.score(X_test, y_test)))\n SVM += svclassifier.score(X_test, y_test)\n LR += logreg.score(X_test, y_test)\n\nprint(\"Average SVM: \"+str(SVM/num))\nprint(\"Average LR: \"+str(LR/num))\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplots", "numpy.mean", "matplotlib.pyplot.tick_params", "numpy.std", "sklearn.svm.SVC", "sklearn.linear_model.LogisticRegression", "numpy.arange", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show" ] ]
npielawski/py_alpha_amd_release
[ "6fb5b3cdef65ba8902daea050785dd73970002c2" ]
[ "transforms/util.py" ]
[ "\n#\n# Py-Alpha-AMD Registration Framework\n# Author: Johan Ofverstedt\n# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information\n#\n# Copyright 2019 Johan Ofverstedt\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\n#\n# Utility functions\n#\n\nimport numpy as np\n\nfrom .transform_base import TransformBase\nfrom .translation_transform import TranslationTransform\nfrom .composite_transform import CompositeTransform\n\ndef image_center_point(image, spacing = None):\n shape = image.shape\n if spacing is None:\n return (np.array(shape)-1) * 0.5\n else:\n return ((np.array(shape)-1) * spacing) * 0.5\n\ndef image_diagonal(image, spacing = None):\n shp = np.array(image.shape)-1\n if spacing is not None:\n shp = shp * spacing\n return np.sqrt(np.sum(np.square(shp)))\n\ndef make_centered_transform(t, cp1, cp2):\n dim = t.get_dim()\n t1 = TranslationTransform(dim)\n t2 = TranslationTransform(dim)\n t1.set_params(-cp1)\n t2.set_params(cp2)\n return CompositeTransform(dim, [t1, t, t2], [False, True, False])\n\ndef make_image_centered_transform(t, image1, image2, image1_spacing = None, image2_spacing = None):\n dim = image1.ndim\n t1 = TranslationTransform(dim)\n t2 = TranslationTransform(dim)\n t1.set_params(-image_center_point(image1, image1_spacing))\n t2.set_params(image_center_point(image2, image2_spacing))\n return CompositeTransform(dim, [t1, t, t2], [False, True, False])\n" ]
[ [ "numpy.square", "numpy.array" ] ]
yiliucs/flower
[ "db4d7db353a702b79cbef48b9d4fa5831d91eb00" ]
[ "src/flwr/strategy/qffedavg.py" ]
[ "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FAIR RESOURCE ALLOCATION IN FEDERATED LEARNING [Li et al., 2020] strategy.\nPaper: https://openreview.net/pdf?id=ByexElSYDr\n\"\"\"\n\n\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom flwr.client_manager import ClientManager\nfrom flwr.client_proxy import ClientProxy\nfrom flwr.typing import EvaluateIns, EvaluateRes, FitIns, FitRes, Weights\n\nfrom .aggregate import aggregate_qffl, weighted_loss_avg\nfrom .fedavg import FedAvg\nfrom .parameter import parameters_to_weights, weights_to_parameters\n\n\nclass QffedAvg(FedAvg):\n \"\"\"Configurable QffedAvg strategy implementation.\"\"\"\n\n # pylint: disable-msg=too-many-arguments,too-many-instance-attributes\n def __init__(\n self,\n q_param: float = 0.2,\n qffl_learning_rate: float = 0.1,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 1,\n min_eval_clients: int = 1,\n min_available_clients: int = 1,\n eval_fn: Optional[Callable[[Weights], Optional[Tuple[float, float]]]] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, str]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, str]]] = None,\n accept_failures: bool = True,\n ) -> None:\n super().__init__()\n self.min_fit_clients = min_fit_clients\n self.min_eval_clients = min_eval_clients\n self.fraction_fit = fraction_fit\n self.fraction_eval = fraction_eval\n self.min_available_clients = min_available_clients\n self.eval_fn = eval_fn\n self.on_fit_config_fn = on_fit_config_fn\n self.on_evaluate_config_fn = on_evaluate_config_fn\n self.accept_failures = accept_failures\n self.learning_rate = qffl_learning_rate\n self.q_param = q_param\n self.pre_weights: Optional[Weights] = None\n\n def __repr__(self) -> str:\n # pylint: disable-msg=line-too-long\n rep = f\"QffedAvg(learning_rate={self.learning_rate}, q_param={self.q_param}, pre_weights={self.pre_weights})\"\n return rep\n\n def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Return the sample size and the required number of available clients.\"\"\"\n num_clients = int(num_available_clients * self.fraction_fit)\n return max(num_clients, self.min_fit_clients), self.min_available_clients\n\n def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Use a fraction of available clients for evaluation.\"\"\"\n num_clients = int(num_available_clients * self.fraction_eval)\n return max(num_clients, self.min_eval_clients), self.min_available_clients\n\n def evaluate(self, weights: Weights) -> Optional[Tuple[float, float]]:\n \"\"\"Evaluate model weights using an evaluation function (if provided).\"\"\"\n if self.eval_fn is None:\n # No evaluation function provided\n return None\n return self.eval_fn(weights)\n\n def on_configure_fit(\n self, rnd: int, weights: Weights, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, FitIns]]:\n \"\"\"Configure the next round of training.\"\"\"\n self.pre_weights = weights\n parameters = weights_to_parameters(weights)\n config = {}\n if self.on_fit_config_fn is not None:\n # Custom fit config function provided\n config = self.on_fit_config_fn(rnd)\n fit_ins = (parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n\n # Return client/config pairs\n return [(client, fit_ins) for client in clients]\n\n def on_configure_evaluate(\n self, rnd: int, weights: Weights, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, EvaluateIns]]:\n \"\"\"Configure the next round of evaluation.\"\"\"\n # Do not configure federated evaluation if a centralized evaluation\n # function is provided\n if self.eval_fn is not None:\n return []\n\n # Parameters and config\n parameters = weights_to_parameters(weights)\n config = {}\n if self.on_evaluate_config_fn is not None:\n # Custom evaluation config function provided\n config = self.on_evaluate_config_fn(rnd)\n evaluate_ins = (parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_evaluation_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n\n # Return client/config pairs\n return [(client, evaluate_ins) for client in clients]\n\n def on_aggregate_fit(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[BaseException],\n ) -> Optional[Weights]:\n \"\"\"Aggregate fit results using weighted average.\"\"\"\n if not results:\n return None\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None\n # Convert results\n\n def norm_grad(grad_list: List[Weights]) -> float:\n # input: nested gradients\n # output: square of the L-2 norm\n client_grads = grad_list[0]\n for i in range(1, len(grad_list)):\n client_grads = np.append(\n client_grads, grad_list[i]\n ) # output a flattened array\n return float(np.sum(np.square(client_grads)))\n\n deltas = []\n hs_ffl = []\n\n if self.pre_weights is None:\n raise Exception(\"QffedAvg pre_weights are None in on_aggregate_fit\")\n\n weights_before = self.pre_weights\n eval_result = self.evaluate(weights_before)\n if eval_result is not None:\n loss, _ = eval_result\n\n for _, (parameters, _, _, _) in results:\n new_weights = parameters_to_weights(parameters)\n # plug in the weight updates into the gradient\n grads = [\n (u - v) * 1.0 / self.learning_rate\n for u, v in zip(weights_before, new_weights)\n ]\n deltas.append(\n [np.float_power(loss + 1e-10, self.q_param) * grad for grad in grads]\n )\n # estimation of the local Lipschitz constant\n hs_ffl.append(\n self.q_param\n * np.float_power(loss + 1e-10, (self.q_param - 1))\n * norm_grad(grads)\n + (1.0 / self.learning_rate)\n * np.float_power(loss + 1e-10, self.q_param)\n )\n\n return aggregate_qffl(weights_before, deltas, hs_ffl)\n\n def on_aggregate_evaluate(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, EvaluateRes]],\n failures: List[BaseException],\n ) -> Optional[float]:\n \"\"\"Aggregate evaluation losses using weighted average.\"\"\"\n if not results:\n return None\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None\n return weighted_loss_avg([evaluate_res for client, evaluate_res in results])\n\n def on_conclude_round(\n self, rnd: int, loss: Optional[float], acc: Optional[float]\n ) -> bool:\n \"\"\"Always continue training.\"\"\"\n return True\n" ]
[ [ "numpy.square", "numpy.float_power", "numpy.append" ] ]
echo-ray/Kashgari
[ "fc8ba49fd051bd8c09f0c4092aa96b656f27eced" ]
[ "kashgari/embeddings/bare_embedding.py" ]
[ "# encoding: utf-8\n\n# author: BrikerMan\n# contact: [email protected]\n# blog: https://eliyar.biz\n\n# file: bare_embedding.py\n# time: 2019-05-20 10:36\nimport logging\nfrom typing import Union, Optional\n\nfrom tensorflow import keras\n\nfrom kashgari.embeddings.base_embedding import Embedding\nfrom kashgari.processors.base_processor import BaseProcessor\n\nL = keras.layers\n\n\n# Todo: A better name for this class\nclass BareEmbedding(Embedding):\n\n \"\"\"Embedding layer without pre-training, train embedding layer while training model\"\"\"\n\n def __init__(self,\n task: str = None,\n sequence_length: Union[int, str] = 'auto',\n embedding_size: int = 100,\n processor: Optional[BaseProcessor] = None,\n from_saved_model: bool = False):\n \"\"\"\n Init bare embedding (embedding without pre-training)\n\n Args:\n sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length\n as sequence length. When using ``'variable'``, model input shape will set to None, which can handle\n various length of input, it will use the length of max sequence in every batch for sequence length.\n If using an integer, let's say ``50``, the input output sequence length will set to 50.\n embedding_size: Dimension of the dense embedding.\n \"\"\"\n super(BareEmbedding, self).__init__(task=task,\n sequence_length=sequence_length,\n embedding_size=embedding_size,\n processor=processor,\n from_saved_model=from_saved_model)\n if not from_saved_model:\n self._build_model()\n\n def _build_model(self, **kwargs):\n if self.sequence_length == 0 or \\\n self.sequence_length == 'auto' or \\\n self.token_count == 0:\n logging.debug('need to build after build_word2idx')\n else:\n input_tensor = L.Input(shape=(self.sequence_length,),\n name=f'input')\n layer_embedding = L.Embedding(self.token_count,\n self.embedding_size,\n name=f'layer_embedding')\n\n embedded_tensor = layer_embedding(input_tensor)\n self.embed_model = keras.Model(input_tensor, embedded_tensor)\n\n\nif __name__ == \"__main__\":\n print('hello world')\n" ]
[ [ "tensorflow.keras.Model" ] ]
ehua7365/bn3d
[ "b2ab7411c32c836fe5d0e48900461c1911408774" ]
[ "tests/test_cli.py" ]
[ "import numpy as np\nimport pytest\nfrom click.testing import CliRunner\n\nfrom panqec.cli import cli, read_bias_ratios, read_range_input\n\n\[email protected]\ndef runner():\n \"\"\"Click CliRunner with isolated file system.\"\"\"\n _runner = CliRunner()\n with _runner.isolated_filesystem():\n yield _runner\n assert hasattr(_runner, 'invoke')\n\n\[email protected]('arguments', [\n [],\n ['--help'],\n ['--version'],\n])\ndef test_cli_basic(arguments, runner):\n result = runner.invoke(cli, arguments)\n assert result.exit_code == 0\n\n\nclass TestLS:\n\n def test_ls_all(self, runner):\n result = runner.invoke(cli, ['ls'])\n assert result.exit_code == 0\n assert 'Codes:' in result.output\n assert 'Error Models (Noise):' in result.output\n assert 'Decoders:' in result.output\n\n def test_ls_codes(self, runner):\n result = runner.invoke(cli, ['ls', 'codes'])\n assert result.exit_code == 0\n assert 'Codes:' in result.output\n assert 'Error Models (Noise):' not in result.output\n assert 'Decoders:' not in result.output\n\n def test_ls_noise(self, runner):\n result = runner.invoke(cli, ['ls', 'noise'])\n assert result.exit_code == 0\n assert 'Codes:' not in result.output\n assert 'Error Models (Noise):' in result.output\n assert 'Decoders:' not in result.output\n\n def test_ls_decoders(self, runner):\n result = runner.invoke(cli, ['ls', 'decoders'])\n assert result.exit_code == 0\n assert 'Codes:' not in result.output\n assert 'Error Models (Noise):' not in result.output\n assert 'Decoders:' in result.output\n\n\ndef test_read_bias_ratios():\n expected_bias_ratios = [0.5, 1, 3, 10, 30, 100, np.inf]\n eta_string = '0.5,1,3,10,30,100,inf'\n bias_ratios = read_bias_ratios(eta_string)\n assert len(bias_ratios) == len(expected_bias_ratios)\n for eta, expected_eta in zip(bias_ratios, expected_bias_ratios):\n assert eta == expected_eta\n assert type(eta) == type(expected_eta)\n\n\[email protected]('spec,expected_values', [\n ('0:0.6:0.005', np.arange(0, 0.605, 0.005).tolist()),\n ('1,2,3', [1.0, 2.0, 3.0]),\n ('13.21', [13.21]),\n ('1e-2', [0.01]),\n])\ndef test_read_range_input(spec, expected_values):\n values = read_range_input(spec)\n assert len(values) == len(expected_values)\n for value, expected_value in zip(values, expected_values):\n assert value == expected_value\n assert type(value) == type(expected_value)\n" ]
[ [ "numpy.arange" ] ]
jrm5100/clarite-python
[ "a39d4f56490418f148195d2ff5be4e2e63afbbba", "a39d4f56490418f148195d2ff5be4e2e63afbbba" ]
[ "clarite/modules/survey/survey_design.py", "tests/modify/test_modify.py" ]
[ "from typing import Optional, Union, Dict, Tuple\n\nimport click\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.indexing import IndexingError\n\n\nclass SurveyDesignSpec:\n \"\"\"\n Holds parameters for building a statsmodels SurveyDesign object\n\n Parameters\n ----------\n survey_df: pd.DataFrame\n A DataFrame containing Cluster, Strata, and/or weights data.\n This should include all observations in the data analyzed using it (matching via index value)\n strata: string or None\n The name of the strata variable in the survey_df\n cluster: string or None\n The name of the cluster variable in the survey_df\n nest: bool, default False\n Whether or not the clusters are nested in the strata (The same cluster IDs are repeated in different strata)\n weights: string or dictionary(string:string)\n The name of the weights variable in the survey_df, or a dictionary mapping variable names to weight names\n fpc: string or None\n The name of the variable in the survey_df that contains the finite population correction information.\n This reduces variance when a substantial portion of the population is sampled.\n May be specified as the total population size, or the fraction of the population that was sampled.\n single_cluster: {'fail', 'adjust', 'average', 'certainty'}\n Setting controlling variance calculation in single-cluster ('lonely psu') strata\n 'fail': default, throw an error\n 'adjust': use the average of all observations (more conservative)\n 'average': use the average value of other strata\n 'certainty': that strata doesn't contribute to the variance (0 variance)\n drop_unweighted: bool, default False\n If True, drop observations that are missing a weight value. This may not be statistically sound.\n Otherwise the result for variables with missing weights (when the variable is not missing) is NULL.\n\n Attributes\n ----------\n\n Examples\n --------\n >>> import clarite\n >>> clarite.analyze.SurveyDesignSpec(survey_df=survey_design_replication,\n strata=\"SDMVSTRA\",\n cluster=\"SDMVPSU\",\n nest=True,\n weights=weights_replication,\n fpc=None,\n single_cluster='fail')\n \"\"\"\n\n def __init__(\n self,\n survey_df: pd.DataFrame,\n strata: Optional[str] = None,\n cluster: Optional[str] = None,\n nest: bool = False,\n weights: Union[str, Dict[str, str]] = None,\n fpc: Optional[str] = None,\n single_cluster: Optional[str] = \"fail\",\n drop_unweighted: bool = False,\n ):\n\n # Validate index\n if isinstance(survey_df.index, pd.MultiIndex):\n raise ValueError(\"survey_df: DataFrame must not have a multiindex\")\n survey_df.index.name = \"ID\"\n self.index = survey_df.index\n\n # At least one must be defined\n if all([x is None for x in (strata, cluster, weights)]):\n raise ValueError(\n \"\"\"At least one of strata, cluster, or weights must be provided\"\"\"\n )\n\n # Store parameters\n self.drop_unweighted = drop_unweighted\n self.single_cluster = single_cluster\n # Warn if drop_unweighted is set to True\n if self.drop_unweighted:\n click.echo(\n click.style(\n \"WARNING: Dropping observations with missing weights. \"\n \"This may not be statistically sound, and the cause of missing weights \"\n \"should be determined.\",\n fg=\"red\",\n )\n )\n # Validate single_cluster parameter\n if single_cluster not in {\"fail\", \"adjust\", \"average\", \"certainty\"}:\n raise ValueError(\n \"'single_cluster' must be one of 'fail', 'adjust', 'average', or 'certainty'.\"\n )\n\n # Defaults\n # Strata\n self.has_strata: bool = False\n self.strata_name: Optional[str] = None\n self.strata_values: Optional[pd.Series] = None\n self.n_strat: Optional[int] = None\n # Cluster\n self.has_cluster: bool = False\n self.cluster_name: Optional[str] = None\n self.cluster_values: Optional[pd.Series] = None\n self.n_clust: Optional[int] = None\n self.nested_clusters: bool = False\n # Weight\n self.single_weight: bool = False # If True, weight_values is a Series\n self.weight_name: Optional[str] = None\n self.multi_weight: bool = (\n False # If True, weight_values is a dict of weight name : Series\n )\n self.weight_names: Optional[\n Dict[str, str]\n ] = None # Dict of regression variable name : weight name\n self.weight_values: Optional[Union[pd.Series, Dict[str, pd.Series]]] = None\n # FPC\n self.has_fpc: bool = False\n self.fpc_name: Optional[str] = None\n self.fpc_original: Optional[pd.Series] = None\n self.fpc_values: Optional[pd.Series] = None\n\n # Process inputs\n self.process_strata(strata, survey_df)\n self.process_clusters(cluster, survey_df, nest)\n self.process_weights(weights, survey_df)\n self.process_fpc(fpc, survey_df)\n\n # Map relationships between inputs\n combined = pd.concat(\n [self.strata_values, self.cluster_values, self.fpc_values], axis=1\n )\n # The number of clusters per stratum\n self.clust_per_strat = combined.groupby(\"strat\")[\"clust\"].nunique()\n # The stratum for each cluster\n self.strat_for_clust = (\n combined.groupby(\"clust\")[\"strat\"].unique().apply(lambda l: l[0])\n )\n\n # Initialize the subset information (a boolean array of True, indicating every row is kept)\n self.subset_array = pd.Series(True, index=survey_df.index, name=\"subset\")\n self.subset_count = 0\n\n # Raise an error if single clusters were found but shouldn't be allowed\n if (\n self.has_strata\n and self.has_cluster\n and self.single_cluster not in {\"average\", \"certainty\", \"adjust\"}\n ):\n if self.clust_per_strat.min() < 2:\n single_clusters = [\n str(c)\n for c in self.clust_per_strat[\n self.clust_per_strat == 1\n ].index.values\n ]\n raise ValueError(\n f\"One or more strata have single clusters: {', '.join(single_clusters)}. \"\n f\"Adjust the 'single_cluster' SurveyDesignSpec parameter \"\n f\"or reassign the singular cluster to avoid this error.\"\n )\n\n def process_strata(self, strata, survey_df):\n \"\"\"\n Load Strata or generate default values\n \"\"\"\n if strata is None:\n self.strata_values = pd.Series(\n np.ones(len(self.index)), index=self.index, name=\"strat\"\n )\n else:\n self.strata_name = strata\n self.has_strata = True\n if strata not in survey_df:\n raise KeyError(\n f\"strata key ('{strata}') was not found in the survey_df\"\n )\n elif survey_df[strata].isna().any():\n raise ValueError(\n f\"{survey_df[strata].isna().sum():,} of {len(survey_df):,} strata values were missing\"\n )\n else:\n self.strata_values = survey_df[strata].rename(\"strat\")\n self.n_strat = len(self.strata_values.unique())\n\n # Make categorical\n self.strata_values = self.strata_values.astype(\"category\")\n\n def process_clusters(self, cluster, survey_df, nest):\n \"\"\"\n Load clusters or generate default values\n \"\"\"\n if cluster is None:\n self.cluster_values = pd.Series(\n np.arange(len(self.index)), index=self.index, name=\"clust\"\n )\n else:\n self.cluster_name = cluster\n self.has_cluster = True\n if cluster not in survey_df:\n raise KeyError(\n f\"cluster key ('{cluster}') was not found in the survey_df\"\n )\n elif survey_df[cluster].isna().any():\n raise ValueError(\n f\"{survey_df[cluster].isna().sum():,} of {len(survey_df):,} \"\n f\"cluster values were missing\"\n )\n else:\n self.cluster_values = survey_df[cluster].rename(\"clust\")\n self.n_clust = len(self.cluster_values.unique())\n\n # If 'nest', recode the PSUs to be sure that the same PSU ID in different strata are treated as distinct PSUs.\n if nest and self.has_strata and self.has_cluster:\n self.cluster_values = (\n self.strata_values.astype(str) + \"-\" + self.cluster_values.astype(str)\n )\n self.cluster_values = self.cluster_values.rename(\"clust\")\n self.nested_clusters = True\n\n # Make categorical\n self.cluster_values = self.cluster_values.astype(\"category\")\n\n def process_weights(self, weights, survey_df):\n if weights is None:\n self.weight_values = pd.Series(\n np.ones(len(self.index)), index=self.index, name=\"weights\"\n )\n elif type(weights) == dict:\n # self.weight_values will be a dictionary of weight_name: Series of weight values\n self.multi_weight = True\n self.weight_names = weights\n self.weight_values = dict() # dict of weight name : weight values\n for var_name, weight_name in weights.items():\n if weight_name not in survey_df:\n # Raise an error if the weight wasn't found in the survey dataframe\n raise KeyError(\n f\"weights key for '{var_name}' ('{weight_name}') was not found in the survey_df\"\n )\n elif weight_name not in self.weight_values:\n # If it hasn't already been processed (for another variable)\n # Replace zero/negative weights with a small number to avoid divide by zero\n zero_weights = survey_df[weight_name] <= 0\n survey_df.loc[zero_weights, weight_name] = 1e-99\n self.weight_values[weight_name] = survey_df[weight_name]\n elif type(weights) == str:\n # self.weight_values will be a Series of weight values\n self.single_weight = True\n self.weight_name = weights\n if self.weight_name not in survey_df:\n raise KeyError(\n f\"the weight ('{self.weight_name}') was not found in the survey_df\"\n )\n else:\n # Replace zero weights with a small number to avoid divide by zero\n zero_weights = survey_df[self.weight_name] <= 0\n survey_df.loc[zero_weights, self.weight_name] = 1e-99\n self.weight_values = survey_df[self.weight_name]\n else:\n raise ValueError(\n \"'weight' must be None, a weight name string, or a dictionary\"\n \" mapping variable name strings to weight name strings\"\n )\n\n def process_fpc(self, fpc, survey_df):\n \"\"\"\n FPC is passed in as\n \"\"\"\n if fpc is None:\n self.fpc_values = pd.Series(\n np.zeros(len(self.index)), index=self.index, name=\"fpc\"\n )\n else:\n # TODO: Should fpc scaling occuring after subsetting?\n self.fpc_name = fpc\n self.has_fpc = True\n if fpc not in survey_df:\n raise KeyError(f\"fpc key ('{fpc}') was not found in the survey_df\")\n elif survey_df[fpc].isna().any():\n raise ValueError(\n f\"{survey_df[fpc].isna().sum():,} of {len(survey_df):,} fpc values were missing\"\n )\n else:\n self.fpc_values_original = survey_df[fpc].rename(\n \"fpc\"\n ) # Need unmodified version for R code\n self.fpc_values = survey_df[fpc].rename(\"fpc\")\n # Validate\n if not all(self.fpc_values <= 1):\n # Assume these are actual population size, and convert to a fraction\n if self.has_strata:\n # Divide the sampled strata size by the fpc\n combined = pd.merge(\n self.strata_values,\n self.fpc_values,\n left_index=True,\n right_index=True,\n )\n sampled_strata_size = combined.groupby(\"strat\")[\n \"fpc\"\n ].transform(\"size\")\n self.fpc_values = sampled_strata_size / self.fpc_values\n elif self.has_cluster and not self.has_strata:\n # Clustered sampling: Divide sampled clusters by the fpc\n sampled_cluster_size = len(self.cluster_values.unique())\n self.fpc_values = sampled_cluster_size / self.fpc_values\n try:\n assert all((self.fpc_values >= 0) & (self.fpc_values <= 1))\n except AssertionError:\n raise ValueError(\"Error processing FPC- invalid values\")\n # Reindex to list fpc for each observation\n combined = pd.concat([self.cluster_values, self.fpc_values], axis=1)\n self.fpc_values = (\n combined.groupby(\"clust\")[\"fpc\"].unique().apply(lambda l: l[0])\n )\n\n def __str__(self):\n \"\"\"String version of the survey design specification, used in logging\"\"\"\n result = (\n f\"Survey Design\\n\\t{len(self.index):,} rows in the survey design data\\n\"\n )\n # Strata\n if self.has_strata:\n result += f\"\\tStrata: {len(self.strata_values.unique())} unique values of {self.strata_name}\\n\"\n else:\n result += \"\\tStrata: None\\n\"\n # Clusters\n if self.has_cluster:\n result += f\"\\tCluster: {len(self.cluster_values.unique())} unique values of {self.cluster_name}\"\n if self.nested_clusters:\n result += \" (nested)\\n\"\n else:\n result += \"\\n\"\n else:\n result += \"\\tCluster: None\\n\"\n # FPC\n if self.has_fpc:\n result += f\"\\tFPC: {self.fpc_name}\\n\"\n else:\n result += \"\\tFPC: None\\n\"\n # Weights\n if self.single_weight:\n result += (\n f\"\\tWeight: {self.weight_name}\\n\"\n f\"\\tDrop Unweighted: {self.drop_unweighted}\\n\"\n )\n elif self.multi_weight:\n result += (\n f\"\\tMultiple Weights: {len(set(self.weight_names.values())):,} \"\n f\"unique weights associated with {len(set(self.weight_names.keys())):,} variables\\n\"\n f\"\\tDrop Unweighted: {self.drop_unweighted}\\n\"\n )\n else:\n result += \"\\tWeights: None\\n\"\n # single cluster\n result += f\"\\tSingle Cluster ('Lonely PSU') Option: {self.single_cluster}\"\n\n result += (\n f\"\\n\\tSubsets: {self.subset_count:,} applied\"\n f\"\\n\\t\\tKeeping {self.subset_array.sum():,} of {len(self.subset_array):,} observations\"\n )\n\n return result\n\n def get_weights(\n self, regression_variable: str\n ) -> Tuple[bool, Optional[str], Optional[pd.Series]]:\n \"\"\"\n Return weight information for a specific regression variable, subset to match the data\n \"\"\"\n if self.single_weight:\n has_weights = True\n weight_name = self.weight_name\n weight_values = self.weight_values\n elif self.multi_weight:\n has_weights = True\n weight_name = self.weight_names.get(regression_variable, None)\n if weight_name is None:\n raise ValueError(\n f\"No weight found in the survey design for the '{regression_variable}' variable\"\n )\n else:\n weight_values = self.weight_values[weight_name]\n else:\n return False, None, None\n\n # Normalize weights before subsetting\n weight_values = weight_values.div(weight_values.mean())\n\n # Subset\n weight_values = weight_values.loc[self.subset_array]\n\n return has_weights, weight_name, weight_values\n\n def check_missing_weights(\n self, data: pd.DataFrame, regression_variable: str\n ) -> Tuple[Optional[str], Optional[pd.Series], Optional[str]]:\n \"\"\"\n Return:\n None, None, None if no weights are used\n weight_name, None, None if there are no missing weights\n weight_name, missing_weight_mask, warning if there are missing weights and 'drop_unweighted' is True\n Raise an error if there are missing weights and 'drop_unweighted' is False\n\n missing_weight_mask is True if the weight is missing\n \"\"\"\n # Get weight values\n has_weight, weight_name, weight_values = self.get_weights(regression_variable)\n if not has_weight:\n return None, None, None # No idx and no warning needed\n\n # Check if the survey design is missing weights when the variable value is not\n variable_na = data[regression_variable].isna()\n weight_na = weight_values.isna()\n missing_weight_mask = ~variable_na & weight_na\n\n if missing_weight_mask.sum() == 0:\n return weight_name, missing_weight_mask, None\n elif missing_weight_mask.sum() > 0 and self.drop_unweighted:\n # Warn, Drop observations with missing weights, and re-validate (for nonvarying covariates, for example)\n warning = (\n f\"Dropping {missing_weight_mask.sum():,} non-missing observation(s) due to missing weights\"\n f\" (f{weight_name})\"\n )\n weight_name += (\n f\" ({missing_weight_mask.sum()} observations are missing weights)\"\n )\n return weight_name, missing_weight_mask, warning\n elif missing_weight_mask.sum() > 0 and not self.drop_unweighted:\n # Get unique values\n values_with_missing_weight = data.loc[\n missing_weight_mask, regression_variable\n ]\n unique_missing = values_with_missing_weight.unique()\n unique_not_missing = data.loc[\n ~variable_na & ~weight_na, regression_variable\n ].unique()\n sometimes_missing = sorted(\n [str(v) for v in (set(unique_missing) & set(unique_not_missing))]\n )\n always_missing = sorted(\n [str(v) for v in (set(unique_missing) - set(unique_not_missing))]\n )\n\n # Build a detailed error string\n error = (\n f\"{missing_weight_mask.sum():,} observations are missing weights ({weight_name})\"\n f\" when the variable is not missing.\"\n )\n\n # Add more information to the error and raise it, skipping analysis of this variable\n if len(sometimes_missing) == 0:\n pass\n elif len(sometimes_missing) == 1:\n error += f\"\\n\\tOne value sometimes occurs in observations with missing weight: {sometimes_missing[0]}\"\n elif len(sometimes_missing) <= 5:\n error += (\n f\"\\n\\t{len(sometimes_missing)} values sometimes occur in observations with missing weight:\"\n f\" {', '.join(sometimes_missing)}\"\n )\n elif len(sometimes_missing) > 5:\n error += (\n f\"\\n\\t{len(sometimes_missing)} values sometimes occur in observations with missing weight:\"\n f\" {', '.join(sometimes_missing[:5])}, ...\"\n )\n # Log always missing values\n if len(always_missing) == 0:\n pass\n elif len(always_missing) == 1:\n error += (\n f\"\\n\\tOne value is only found in observations with missing weights: {always_missing[0]}.\"\n \" Should it be encoded as NaN?\"\n )\n elif len(always_missing) <= 5:\n error += (\n f\"\\n\\t{len(always_missing)} values are only found in observations with missing weights: \"\n f\"{', '.join(always_missing)}. Should they be encoded as NaN?\"\n )\n elif len(always_missing) > 5:\n error += (\n f\"\\n\\t{len(always_missing)} values are only found in observations with missing weights: \"\n f\"{', '.join(always_missing[:5])}, ... Should they be encoded as NaN?\"\n )\n raise ValueError(error)\n\n def validate(self, data: pd.DataFrame) -> Optional[str]:\n \"\"\"\n Validate that the survey design matches the data\n\n Parameters\n ----------\n data: pd.DataFrame\n Data being used with the survey design\n\n Returns\n -------\n error: str or None\n Any error message\n \"\"\"\n # Check to see if survey information or weights are included in the data\n msg = \" Survey design variables should not be included in the data.\"\n if self.has_strata:\n if self.strata_name in data.columns:\n return (\n f\"Strata variable ({self.strata_name}) found in the passed data.\"\n + msg\n )\n if self.has_cluster:\n if self.cluster_name in data.columns:\n return (\n f\"Cluster variable ({self.cluster_name}) found in the passed data.\"\n + msg\n )\n if self.has_fpc:\n if self.fpc_name in data.columns:\n return f\"FPC variable ({self.fpc_name}) found in the passed data.\" + msg\n if self.single_weight:\n if self.weight_name in data.columns:\n return (\n f\"Weight variable ({self.weight_name}) found in the passed data.\"\n + msg\n )\n if self.multi_weight:\n matched = set(self.weight_names.values()) & set(data.columns)\n if len(matched) == 1:\n return (\n f\"Weight variable ({list(matched)[0]}) found in the passed data.\"\n + msg\n )\n if len(matched) > 1:\n return (\n f\"{len(matched):,} Weight variables found in the passed data.\" + msg\n )\n # Validate that subsets apply to the data\n if self.subset_count > 0:\n try:\n data = data.loc[self.subset_array]\n except Exception as e:\n return f\"Error applying the subset to the provided data: {e}\"\n # Compare index values to the survey index (as included in self.cluster_values) after applying subsets to data\n missing_survey = ~data.index.isin(self.cluster_values.index)\n if any(missing_survey):\n return (\n f\"The survey design is missing information for {missing_survey.sum():,} rows in the data,\"\n f\" as matched by the index values. Here are the first few:\\n\"\n + str(data.loc[missing_survey].head())\n )\n return None\n\n def subset(self, bool_array: pd.Series) -> None:\n \"\"\"\n Subset the SurveyDesignSpec (in-place) to look only at a subpopulation.\n\n Example:\n design.subset(data['age'] > 18)\n \"\"\"\n # Confirm it is a boolean Series\n if type(bool_array) != pd.Series:\n raise ValueError(\n \"SurveyDesignSpec.subset takes a boolean pandas Series object\"\n )\n elif bool_array.dtype != bool:\n raise ValueError(\n \"SurveyDesignSpec.subset takes a boolean pandas Series object\"\n )\n\n # Try to subset, raising any indexing error\n try:\n # Update subset_array for tracking\n self.subset_array = self.subset_array & bool_array\n self.subset_count += 1\n except IndexingError:\n raise ValueError(\n \"The boolean array passed to `subset` could not be used:\"\n \" the index is incompatible with the survey design\"\n )\n\n def get_survey_design(self, regression_variable, complete_case_idx):\n \"\"\"\n Get a survey design based on the SurveyDesignSpec, but specific to the rv and data\n\n Parameters\n ----------\n regression_variable: str\n Name of the regression variable, used to match the weight if needed\n complete_case_idx: pd.Index\n Index of the data being analyzed. Either the same as the existing design arrays\n or smaller due to dropping rows with NA values during regression\n\n Returns\n -------\n sd: SurveyDesign\n\n \"\"\"\n # Get parameters using the built-in subset\n has_strata, strata_values = (\n self.has_strata,\n self.strata_values.loc[self.subset_array],\n )\n has_cluster, cluster_values = (\n self.has_cluster,\n self.cluster_values.loc[self.subset_array],\n )\n has_weights, weight_name, weight_values = self.get_weights(regression_variable)\n\n # Filter out any incomplete cases\n strata_values = strata_values.loc[complete_case_idx]\n cluster_values = cluster_values.loc[complete_case_idx]\n weight_values = weight_values.loc[complete_case_idx]\n\n # Initialize Survey Design\n sd = SurveyDesign(\n has_strata=has_strata,\n strat=strata_values,\n n_strat=self.n_strat,\n has_cluster=has_cluster,\n clust=cluster_values,\n n_clust=self.n_clust,\n has_weights=has_weights,\n weights=weight_values,\n has_fpc=self.has_fpc,\n fpc=self.fpc_values,\n single_cluster=self.single_cluster,\n clust_per_strat=self.clust_per_strat,\n strat_for_clust=self.strat_for_clust,\n )\n\n return sd\n\n\nclass SurveyDesign(object):\n \"\"\"\n Holds the same kind of data as SurveyDesignSpec, but specific to a single regression variable:\n - Only one weight value\n - Values subset to match complete cases in the data\n \"\"\"\n\n def __init__(\n self,\n has_strata,\n strat,\n n_strat,\n has_cluster,\n clust,\n n_clust,\n has_weights,\n weights,\n has_fpc,\n fpc,\n single_cluster,\n clust_per_strat,\n strat_for_clust,\n ):\n\n # Store values\n self.has_strata = has_strata\n self.strat = strat\n self.n_strat = n_strat\n self.has_cluster = has_cluster\n self.clust = clust\n self.n_clust = n_clust\n self.has_weights = has_weights\n self.weights = weights\n self.has_fpc = has_fpc\n self.fpc = fpc\n self.single_cluster = single_cluster\n self.clust_per_strat = clust_per_strat\n self.strat_for_clust = strat_for_clust\n\n if self.has_strata:\n self.n_strat = len(self.strat.unique())\n if self.has_cluster:\n self.n_clust = len(self.clust.unique())\n\n def __str__(self):\n \"\"\"\n The __str__ method for our data\n \"\"\"\n summary_list = [\n \"Number of observations: \",\n str(len(self.strat)),\n \"Sum of weights: \",\n str(self.weights.sum()),\n \"Number of strata: \",\n str(self.n_strat),\n \"Clusters per stratum: \",\n str(self.clust_per_strat),\n ]\n\n return \"\\n\".join(summary_list)\n\n def get_jackknife_rep_weights(self, dropped_clust):\n \"\"\"\n Computes 'delete 1' jackknife replicate weights\n\n Parameters\n ----------\n dropped_clust : string\n Which cluster to leave out when computing 'delete 1' jackknife replicate weights\n\n Returns\n -------\n w : ndarray\n Augmented weight\n \"\"\"\n # get stratum that the cluster belongs in\n s = self.strat_for_clust[dropped_clust]\n nh = self.clust_per_strat[s]\n w = self.weights.copy()\n # all weights within the stratum are modified\n w[self.strat == s] *= nh / float(nh - 1)\n # but if you're within the cluster to be removed, set as 0\n w[self.clust == dropped_clust] = 0\n return w\n\n def get_dof(self, X):\n \"\"\"\n Calculate degrees of freedom based on a subset of the design\n\n Parameters\n ----------\n X : pd.DataFrame\n Input data used in the calculation, possibly fewer rows than exist in the design\n\n Returns\n -------\n int\n Degrees of freedom\n \"\"\"\n # num of clusters minus num of strata minus (num of predictors - 1)\n if self.has_cluster and self.has_strata:\n return self.n_clust - self.n_strat - (X.shape[1] - 1)\n elif self.has_cluster and not self.has_strata:\n return self.n_clust - 1 - (X.shape[1] - 1)\n elif not self.has_cluster and self.has_strata:\n return X.shape[0] - self.n_strat - (X.shape[1] - 1)\n else:\n return X.shape[0] - (X.shape[1]) - 1\n", "import re\n\nimport pytest\nimport pandas as pd\n\nfrom clarite import modify\n\n\ndef test_make_binary(plantTraits, capfd):\n # Fail due to non-binary\n with pytest.raises(\n ValueError,\n match=re.escape(\n \"11 variable(s) did not have 2 unique values and couldn't be processed \"\n \"as a binary type: pdias, longindex, durflow, height, begflow, mycor, \"\n \"vegaer, vegsout, autopoll, insects, wind\"\n ),\n ):\n modify.make_binary(plantTraits)\n\n # Pass, selecting 5 columns known to be binary\n cols = [\"piq\", \"ros\", \"leafy\", \"winan\", \"suman\"]\n result = modify.make_binary(plantTraits, only=cols)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running make_binary\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"================================================================================\\n\"\n \"Running make_binary\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Set 5 of 31 variable(s) as binary, each with 136 observations\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert all(result[cols].dtypes == \"category\")\n\n\ndef test_make_categorical(plantTraits, capfd):\n \"\"\"Currently no validation for maximum unique values\"\"\"\n result = modify.make_categorical(plantTraits)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running make_categorical\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Set 31 of 31 variable(s) as categorical, each with 136 observations\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert all(result.dtypes == \"category\")\n\n\ndef test_make_continuous(plantTraits, capfd):\n \"\"\"Currently no validation for minimum unique values\"\"\"\n result = modify.make_continuous(plantTraits)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running make_continuous\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Set 31 of 31 variable(s) as continuous, each with 136 observations\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert all(result.dtypes != \"category\")\n\n\ndef test_merge(plantTraits):\n \"\"\"Merge the different parts of a dataframe and ensure they are merged back to the original\"\"\"\n df1 = plantTraits.loc[:, list(plantTraits)[:3]]\n df2 = plantTraits.loc[:, list(plantTraits)[3:6]]\n df3 = plantTraits.loc[:, list(plantTraits)[6:]]\n df = modify.merge_variables(df1, df2)\n df = modify.merge_variables(df, df3)\n assert all(df == plantTraits)\n\n\ndef test_colfilter_percent_zero(plantTraits, capfd):\n result = modify.colfilter_percent_zero(plantTraits)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running colfilter_percent_zero\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Testing 31 of 31 continuous variables\\n\"\n \"\\tRemoved 7 (22.58%) tested continuous variables which were equal to zero in at least 90.00% of non-NA observations.\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert result.shape == (136, 24)\n\n\ndef test_colfilter_min_n(plantTraits, capfd):\n n = len(plantTraits)\n plantTraits[\"test\"] = [None] + [True] * 2 + [False] * (n - 3)\n plantTraits = modify.make_binary(data=plantTraits, only=[\"test\"])\n result = modify.colfilter_min_n(plantTraits, n=n)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running make_binary\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Set 1 of 32 variable(s) as binary, each with 136 observations\\n\"\n \"================================================================================\\n\"\n \"================================================================================\\n\"\n \"Running colfilter_min_n\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Testing 1 of 1 binary variables\\n\"\n \"\\tRemoved 1 (100.00%) tested binary variables which had less than 136 non-null values.\\n\"\n \"Testing 0 of 0 categorical variables\\n\"\n \"Testing 31 of 31 continuous variables\\n\"\n \"\\tRemoved 19 (61.29%) tested continuous variables which had less than 136 non-null values.\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert result.shape == (136, 12)\n\n\ndef test_colfilter_min_cat_n(plantTraits, capfd):\n plantTraits[\"test\"] = (\n [\"cat1\"] * 2 + [\"cat2\"] * 6 + [\"cat3\"] * (len(plantTraits) - 8)\n )\n plantTraits[\"test2\"] = (\n [\"cat1\"] * 3 + [\"cat2\"] * 6 + [\"cat3\"] * (len(plantTraits) - 9)\n )\n plantTraits = modify.make_categorical(data=plantTraits, only=[\"test\", \"test2\"])\n result = modify.colfilter_min_cat_n(plantTraits, n=3)\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running make_categorical\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Set 2 of 33 variable(s) as categorical, each with 136 observations\\n\"\n \"================================================================================\\n\"\n \"================================================================================\\n\"\n \"Running colfilter_min_cat_n\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Testing 0 of 0 binary variables\\n\"\n \"Testing 2 of 2 categorical variables\\n\"\n \"\\tRemoved 1 (50.00%) tested categorical variables which had a category with less than 3 values.\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert result.shape == (136, 32)\n\n\ndef test_rowfilter_incomplete_obs(plantTraits, capfd):\n col_names = list(plantTraits)[:4]\n plantTraits.iloc[0, 0] = None\n plantTraits.iloc[2, 5:7] = None\n result = modify.rowfilter_incomplete_obs(plantTraits, only=col_names)\n\n out, err = capfd.readouterr()\n assert (\n out\n == \"================================================================================\\n\"\n \"Running rowfilter_incomplete_obs\\n\"\n \"--------------------------------------------------------------------------------\\n\"\n \"Removed 45 of 136 observations (33.09%) due to NA values in any of 4 variables\\n\"\n \"================================================================================\\n\"\n )\n assert err == \"\"\n assert result.shape == (91, 31)\n\n\ndef test_recode_values(plantTraits):\n # TODO\n return\n\n\ndef test_remove_outliers_gaussian(plantTraits):\n # Gaussian\n result = modify.remove_outliers(plantTraits, method=\"gaussian\", skip=[\"durflow\"])\n assert result.isna().sum()[\"longindex\"] == plantTraits.isna().sum()[\"longindex\"]\n assert result.isna().sum()[\"durflow\"] == plantTraits.isna().sum()[\"durflow\"]\n assert result.isna().sum()[\"vegaer\"] == plantTraits.isna().sum()[\"vegaer\"] + 12\n return\n\n\ndef test_remove_outliers_iqr(plantTraits):\n # Inter-Quartile Range\n result = modify.remove_outliers(\n plantTraits, method=\"iqr\", cutoff=1.5, skip=[\"durflow\"]\n )\n assert result.isna().sum()[\"longindex\"] == plantTraits.isna().sum()[\"longindex\"]\n assert result.isna().sum()[\"durflow\"] == plantTraits.isna().sum()[\"durflow\"]\n assert result.isna().sum()[\"vegaer\"] == plantTraits.isna().sum()[\"vegaer\"] + 17\n return\n\n\ndef test_categorize(plantTraits, capfd):\n # TODO\n modify.categorize(plantTraits)\n return\n\n\ndef test_transform(plantTraits, capfd):\n \"\"\"Test a log10 transform\"\"\"\n df = pd.DataFrame(\n {\"a\": [10, 100, 1000], \"b\": [100, 1000, 10000], \"c\": [True, False, True]}\n )\n\n result = modify.transform(df, \"log10\", skip=[\"c\"])\n\n assert all(result[\"a\"] == [1, 2, 3])\n assert all(result[\"b\"] == [2, 3, 4])\n assert all(result[\"c\"] == [True, False, True])\n return\n\n\ndef test_categorize_many_string():\n \"\"\"\n Ensure an error isn't thrown when attempting to make a string column continuous\n \"\"\"\n df = pd.DataFrame(\n {\"a\": range(100), \"b\": range(100), \"c\": [str(n) + \"ABC\" for n in range(100)]}\n )\n categorized = modify.categorize(df)\n # Dtypes and data shouldn't have actually changed. 'c' will remain an 'unknown' type.\n assert (categorized.dtypes == df.dtypes).all()\n assert (categorized == df).all().all()\n return\n" ]
[ [ "pandas.Series", "pandas.merge", "pandas.concat" ], [ "pandas.DataFrame" ] ]
zeuseyera/pysc2
[ "df837baa43afd486d9f70a83c64bf12ff1962781" ]
[ "pysc2/lib/colors.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A basic Color class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport random\n\nfrom future.builtins import range # pylint: disable=redefined-builtin\nimport numpy\n\nfrom pysc2.lib import static_data\n\n\nclass Color(collections.namedtuple(\"Color\", [\"r\", \"g\", \"b\"])):\n \"\"\"A basic Color class.\"\"\"\n __slots__ = ()\n\n def set(self, r=None, g=None, b=None):\n return Color(r or self.r, b or self.b, g or self.g)\n\n def round(self):\n return Color(int(round(self.r)), int(round(self.g)), int(round(self.b)))\n\n def floor(self):\n return Color(int(math.floor(self.r)), int(math.floor(self.g)),\n int(math.floor(self.b)))\n\n def ceil(self):\n return Color(int(math.ceil(self.r)), int(math.ceil(self.g)),\n int(math.ceil(self.b)))\n\n def __str__(self):\n return \"%d,%d,%d\" % self\n\n def __add__(self, o):\n return Color(self.r + o.r, self.g + o.g, self.b + o.b)\n\n def __sub__(self, o):\n return Color(self.r - o.r, self.g - o.g, self.b - o.b)\n\n def __mul__(self, val):\n return Color(self.r * val, self.g * val, self.b * val)\n\n def __truediv__(self, val):\n return Color(self.r / val, self.g / val, self.b / val)\n\n def __floordiv__(self, val):\n return Color(self.r // val, self.g // val, self.b // val)\n\n __div__ = __truediv__\n\nblack = Color(0, 0, 0)\nwhite = Color(255, 255, 255)\nred = Color(255, 0, 0)\ngreen = Color(0, 255, 0)\nblue = Color(0, 0, 255)\ncyan = Color(0, 255, 255)\nyellow = Color(255, 255, 0)\npurple = Color(255, 0, 255)\n\n\ndef smooth_hue_palette(scale):\n \"\"\"Takes an array of ints and returns a corresponding colored rgb array.\"\"\"\n # http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL\n # Based on http://stackoverflow.com/a/17382854 , with simplifications and\n # optimizations. Assumes S=1, L=0.5, meaning C=1 and m=0.\n # 0 stays black, everything else moves into a hue.\n\n # Some initial values and scaling. Check wikipedia for variable meanings.\n array = numpy.arange(scale)\n h = array * (6 / scale) # range of [0,6)\n x = 255 * (1 - numpy.absolute(numpy.mod(h, 2) - 1))\n c = 255\n\n # Initialize outputs to zero/black\n out = numpy.zeros(h.shape + (3,), float)\n r = out[..., 0]\n g = out[..., 1]\n b = out[..., 2]\n\n mask = (0 < h) & (h < 1)\n r[mask] = c\n g[mask] = x[mask]\n\n mask = (1 <= h) & (h < 2)\n r[mask] = x[mask]\n g[mask] = c\n\n mask = (2 <= h) & (h < 3)\n g[mask] = c\n b[mask] = x[mask]\n\n mask = (3 <= h) & (h < 4)\n g[mask] = x[mask]\n b[mask] = c\n\n mask = (4 <= h) & (h < 5)\n r[mask] = x[mask]\n b[mask] = c\n\n mask = 5 <= h\n r[mask] = c\n b[mask] = x[mask]\n\n return out\n\n\ndef shuffled_hue(scale):\n palette = list(smooth_hue_palette(scale))\n random.shuffle(palette, lambda: 0.5) # Return a fixed shuffle\n return numpy.array(palette)\n\n\ndef piece_wise_linear(scale, points):\n \"\"\"Create a palette that is piece-wise linear given some colors at points.\"\"\"\n assert len(points) >= 2\n assert points[0][0] == 0\n assert points[-1][0] == 1\n assert all(i < j for i, j in zip(points[:-1], points[1:]))\n out = numpy.zeros((scale, 3))\n p1, c1 = points[0]\n p2, c2 = points[1]\n next_pt = 2\n\n for i in range(1, scale):\n v = i / scale\n if v > p2:\n p1, c1 = p2, c2\n p2, c2 = points[next_pt]\n next_pt += 1\n frac = (v - p1) / (p2 - p1)\n out[i, :] = c1 * (1 - frac) + c2 * frac\n return out\n\n\ndef winter(scale):\n return piece_wise_linear(scale, [(0, Color(0, 0.5, 0.4) * 255),\n (1, Color(1, 1, 0.4) * 255)])\n\n\ndef hot(scale):\n return piece_wise_linear(scale, [(0, Color(0.5, 0, 0) * 255),\n (0.2, Color(1, 0, 0) * 255),\n (0.6, Color(1, 1, 0) * 255),\n (1, Color(1, 1, 1) * 255)])\n\n\n# Palette used to color player_relative features.\nPLAYER_RELATIVE_PALETTE = numpy.array([\n black, # Background.\n Color(0, 142, 0), # Self. (Green).\n yellow, # Ally.\n Color(129, 166, 196), # Neutral. (Cyan.)\n Color(113, 25, 34), # Enemy. (Red).\n])\n\nPLAYER_ABSOLUTE_PALETTE = numpy.array([\n black, # Background\n Color(0, 142, 0), # 1: Green\n Color(113, 25, 34), # 2: Red\n Color(223, 215, 67), # 3: Yellow\n Color(66, 26, 121), # 4: Purple\n Color(222, 144, 50), # 5: Orange\n Color(46, 72, 237), # 6: Blue\n Color(207, 111, 176), # 7: Pink\n Color(189, 251, 157), # 8: Light green\n white * 0.1, # 9: Does the game ever have more than 8 players?\n white * 0.1, # 10: Does the game ever have more than 8 players?\n white * 0.1, # 11: Does the game ever have more than 8 players?\n white * 0.1, # 12: Does the game ever have more than 8 players?\n white * 0.1, # 13: Does the game ever have more than 8 players?\n white * 0.1, # 14: Does the game ever have more than 8 players?\n white * 0.1, # 15: Does the game ever have more than 8 players?\n Color(129, 166, 196), # 16 Neutral: Cyan\n])\n\nVISIBILITY_PALETTE = numpy.array([\n black, # Hidden\n white * 0.25, # Fogged\n white * 0.6, # Visible\n])\n\nCAMERA_PALETTE = numpy.array([black, white * 0.6])\nCREEP_PALETTE = numpy.array([black, purple * 0.4])\nPOWER_PALETTE = numpy.array([black, cyan * 0.7])\nSELECTED_PALETTE = numpy.array([black, green * 0.7])\n\n\ndef unit_type(scale=None):\n \"\"\"Returns a palette that maps unit types to rgb colors.\"\"\"\n # Can specify a scale to match the api or to accept unknown unit types.\n palette_size = scale or max(static_data.UNIT_TYPES) + 1\n palette = shuffled_hue(palette_size)\n assert len(static_data.UNIT_TYPES) <= len(distinct_colors)\n for i, v in enumerate(static_data.UNIT_TYPES):\n palette[v] = distinct_colors[i]\n return palette\n\n\neffects = numpy.array([\n [0, 0, 0],\n [72, 173, 207],\n [203, 76, 49],\n [122, 98, 209],\n [109, 183, 67],\n [192, 80, 181],\n [86, 185, 138],\n [211, 63, 115],\n [81, 128, 60],\n [182, 135, 208],\n [182, 174, 73],\n [95, 123, 196],\n [220, 146, 71],\n [187, 102, 147],\n [138, 109, 48],\n [197, 103, 99],\n])\n\n\n# Generated with http://tools.medialab.sciences-po.fr/iwanthue/\n# 255 colors: H: 0-360, C: 0-100, L: 35-100; then shuffled.\ndistinct_colors = numpy.array([\n [85, 238, 255],\n [79, 84, 36],\n [227, 117, 255],\n [255, 86, 137],\n [210, 0, 141],\n [152, 51, 0],\n [255, 233, 174],\n [125, 149, 0],\n [198, 0, 57],\n [169, 26, 0],\n [0, 84, 234],\n [215, 255, 144],\n [0, 108, 123],\n [1, 150, 136],\n [185, 88, 255],\n [255, 49, 42],\n [137, 124, 255],\n [244, 84, 255],\n [231, 191, 255],\n [255, 171, 174],\n [229, 255, 231],\n [172, 0, 205],\n [198, 20, 0],\n [212, 159, 0],\n [0, 98, 46],\n [176, 102, 0],\n [203, 175, 255],\n [133, 49, 102],\n [195, 255, 124],\n [1, 224, 129],\n [151, 39, 51],\n [49, 81, 135],\n [249, 176, 0],\n [255, 203, 125],\n [0, 169, 192],\n [1, 59, 221],\n [165, 194, 255],\n [0, 164, 74],\n [99, 106, 0],\n [217, 200, 255],\n [255, 134, 79],\n [255, 150, 143],\n [147, 25, 115],\n [150, 0, 154],\n [122, 86, 0],\n [2, 143, 194],\n [255, 29, 80],\n [149, 32, 89],\n [1, 150, 227],\n [255, 153, 66],\n [40, 88, 88],\n [0, 125, 211],\n [0, 180, 84],\n [60, 53, 221],\n [219, 218, 255],\n [183, 103, 255],\n [0, 90, 160],\n [138, 103, 255],\n [208, 0, 94],\n [0, 189, 237],\n [90, 77, 91],\n [255, 83, 45],\n [121, 66, 51],\n [173, 254, 255],\n [130, 58, 66],\n [237, 117, 0],\n [2, 172, 234],\n [85, 81, 59],\n [78, 173, 255],\n [255, 147, 174],\n [255, 50, 155],\n [255, 170, 53],\n [0, 112, 242],\n [224, 79, 0],\n [1, 122, 129],\n [31, 210, 24],\n [127, 63, 31],\n [240, 255, 76],\n [112, 72, 31],\n [255, 93, 24],\n [117, 67, 69],\n [74, 84, 72],\n [253, 255, 222],\n [1, 253, 168],\n [255, 93, 89],\n [181, 0, 117],\n [58, 120, 0],\n [1, 83, 191],\n [141, 110, 0],\n [188, 164, 0],\n [180, 226, 0],\n [66, 83, 95],\n [1, 135, 28],\n [169, 255, 176],\n [16, 92, 75],\n [158, 26, 36],\n [255, 130, 253],\n [0, 199, 138],\n [229, 255, 107],\n [255, 104, 109],\n [93, 255, 235],\n [35, 91, 58],\n [0, 161, 255],\n [1, 85, 174],\n [2, 211, 246],\n [0, 122, 97],\n [156, 255, 140],\n [111, 196, 0],\n [0, 143, 2],\n [160, 3, 81],\n [255, 244, 154],\n [255, 66, 15],\n [255, 175, 114],\n [133, 225, 0],\n [255, 176, 98],\n [123, 70, 0],\n [120, 22, 187],\n [1, 199, 179],\n [236, 0, 13],\n [213, 151, 255],\n [160, 105, 0],\n [255, 114, 141],\n [255, 118, 193],\n [67, 138, 0],\n [114, 72, 5],\n [114, 50, 154],\n [167, 127, 0],\n [128, 65, 239],\n [101, 136, 255],\n [177, 209, 255],\n [143, 27, 211],\n [143, 0, 165],\n [1, 116, 178],\n [255, 247, 199],\n [255, 241, 244],\n [255, 202, 88],\n [237, 255, 151],\n [196, 1, 166],\n [199, 255, 199],\n [255, 185, 205],\n [1, 79, 210],\n [138, 53, 44],\n [250, 255, 249],\n [255, 233, 100],\n [255, 151, 123],\n [194, 76, 0],\n [72, 80, 106],\n [255, 106, 206],\n [132, 44, 125],\n [255, 109, 68],\n [98, 143, 0],\n [0, 156, 162],\n [255, 169, 218],\n [255, 219, 68],\n [79, 255, 177],\n [171, 85, 0],\n [184, 120, 255],\n [237, 255, 199],\n [214, 0, 80],\n [168, 213, 0],\n [98, 78, 38],\n [138, 54, 32],\n [106, 69, 94],\n [129, 43, 136],\n [116, 60, 115],\n [167, 252, 31],\n [255, 194, 92],\n [224, 233, 255],\n [0, 132, 69],\n [255, 247, 50],\n [255, 200, 216],\n [144, 145, 0],\n [97, 215, 255],\n [1, 212, 166],\n [254, 166, 255],\n [255, 29, 131],\n [84, 85, 0],\n [93, 79, 54],\n [200, 255, 160],\n [42, 92, 16],\n [1, 214, 106],\n [137, 207, 255],\n [183, 191, 0],\n [255, 132, 225],\n [210, 255, 106],\n [36, 248, 255],\n [1, 193, 196],\n [136, 255, 111],\n [0, 82, 241],\n [124, 169, 255],\n [0, 141, 237],\n [171, 255, 224],\n [255, 246, 134],\n [0, 92, 100],\n [145, 255, 170],\n [255, 172, 77],\n [0, 88, 119],\n [255, 194, 175],\n [0, 98, 21],\n [192, 195, 255],\n [61, 97, 0],\n [150, 255, 203],\n [71, 53, 202],\n [216, 67, 246],\n [120, 255, 208],\n [88, 82, 13],\n [210, 0, 115],\n [189, 119, 0],\n [255, 171, 157],\n [215, 171, 0],\n [238, 104, 0],\n [115, 104, 0],\n [160, 229, 255],\n [0, 166, 116],\n [0, 127, 147],\n [222, 1, 27],\n [85, 57, 181],\n [255, 178, 148],\n [100, 75, 70],\n [255, 81, 106],\n [39, 240, 75],\n [247, 0, 54],\n [27, 69, 189],\n [77, 146, 255],\n [255, 66, 206],\n [242, 0, 174],\n [255, 217, 216],\n [161, 255, 244],\n [159, 20, 58],\n [176, 143, 255],\n [255, 161, 39],\n [0, 214, 199],\n [163, 93, 255],\n [88, 68, 142],\n [131, 122, 0],\n [206, 0, 46],\n [224, 47, 230],\n [51, 89, 69],\n [50, 90, 41],\n [211, 227, 0],\n [255, 195, 238],\n [176, 255, 134],\n [196, 247, 255],\n [48, 78, 147],\n [79, 68, 156],\n [1, 105, 200],\n [255, 117, 230],\n [2, 225, 235],\n [255, 72, 230],\n [1, 132, 97],\n [255, 213, 155],\n [151, 33, 73],\n [1, 185, 30],\n [255, 159, 221],\n [0, 141, 86],\n])\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.zeros", "numpy.mod" ] ]
JiaminRen/RandWireNN
[ "8c729f62be049bc20db4f27cafa6866ba4744296" ]
[ "model.py" ]
[ "import torch.nn as nn\nfrom utils import Node, get_graph_info, build_graph, save_graph, load_graph\nimport torch\nimport math\nimport os\n\n\n\nclass depthwise_separable_conv_3x3(nn.Module):\n def __init__(self, nin, nout, stride):\n super(depthwise_separable_conv_3x3, self).__init__()\n self.depthwise = nn.Conv2d(nin, nin, kernel_size=3, stride=stride, padding=1, groups=nin)\n self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)\n\n def forward(self, x):\n out = self.depthwise(x)\n out = self.pointwise(out)\n return out\n\n\nclass Triplet_unit(nn.Module):\n def __init__(self, inplanes, outplanes, stride=1):\n super(Triplet_unit, self).__init__()\n self.relu = nn.ReLU()\n self.conv = depthwise_separable_conv_3x3(inplanes, outplanes, stride)\n self.bn = nn.BatchNorm2d(outplanes)\n\n def forward(self, x):\n out = self.relu(x)\n out = self.conv(out)\n out = self.bn(out)\n return out\n\n\nclass Node_OP(nn.Module):\n def __init__(self, Node, inplanes, outplanes):\n super(Node_OP, self).__init__()\n self.is_input_node = Node.type == 0\n self.input_nums = len(Node.inputs)\n if self.input_nums > 1:\n self.mean_weight = nn.Parameter(torch.ones(self.input_nums))\n self.sigmoid = nn.Sigmoid()\n if self.is_input_node:\n self.conv = Triplet_unit(inplanes, outplanes, stride=2)\n else:\n self.conv = Triplet_unit(outplanes, outplanes, stride=1)\n\n def forward(self, *input):\n if self.input_nums > 1:\n out = self.sigmoid(self.mean_weight[0]) * input[0]\n for i in range(1, self.input_nums):\n out = out + self.sigmoid(self.mean_weight[i]) * input[i]\n else:\n out = input[0]\n out = self.conv(out)\n return out\n\n\nclass StageBlock(nn.Module):\n def __init__(self, graph, inplanes, outplanes):\n super(StageBlock, self).__init__()\n self.nodes, self.input_nodes, self.output_nodes = get_graph_info(graph)\n self.nodeop = nn.ModuleList()\n for node in self.nodes:\n self.nodeop.append(Node_OP(node, inplanes, outplanes))\n\n def forward(self, x):\n results = {}\n for id in self.input_nodes:\n results[id] = self.nodeop[id](x)\n for id, node in enumerate(self.nodes):\n if id not in self.input_nodes:\n results[id] = self.nodeop[id](*[results[_id] for _id in node.inputs])\n result = results[self.output_nodes[0]]\n for idx, id in enumerate(self.output_nodes):\n if idx > 0:\n result = result + results[id]\n result = result / len(self.output_nodes)\n return result\n\nclass CNN(nn.Module):\n def __init__(self, args, num_classes=1000):\n super(CNN, self).__init__()\n self.conv1 = depthwise_separable_conv_3x3(3, args.channels // 2, 2)\n self.bn1 = nn.BatchNorm2d(args.channels // 2)\n if args.net_type == 'small':\n self.conv2 = Triplet_unit(args.channels // 2, args.channels, 2)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv3.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv3.yaml'))\n self.conv3 = StageBlock(graph, args.channels, args.channels)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv4.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv4.yaml'))\n self.conv4 = StageBlock(graph, args.channels, args.channels *2)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv5.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv5.yaml'))\n self.conv5 = StageBlock(graph, args.channels * 2, args.channels * 4)\n self.relu = nn.ReLU()\n self.conv = nn.Conv2d(args.channels * 4, 1280, kernel_size=1)\n self.bn2 = nn.BatchNorm2d(1280)\n elif args.net_type == 'regular':\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv2.yaml'))\n else:\n graph = build_graph(args.nodes // 2, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv2.yaml'))\n self.conv2 = StageBlock(graph, args.channels // 2, args.channels)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv3.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv3.yaml'))\n self.conv3 = StageBlock(graph, args.channels, args.channels * 2)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv4.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv4.yaml'))\n self.conv4 = StageBlock(graph, args.channels * 2, args.channels * 4)\n if args.resume:\n graph = load_graph(os.path.join(args.model_dir, 'conv5.yaml'))\n else:\n graph = build_graph(args.nodes, args)\n save_graph(graph, os.path.join(args.model_dir, 'conv5.yaml'))\n self.conv5 = StageBlock(graph, args.channels * 4, args.channels * 8)\n self.relu = nn.ReLU()\n self.conv = nn.Conv2d(args.channels * 8, 1280, kernel_size=1)\n self.bn2 = nn.BatchNorm2d(1280)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(1280, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.relu(x)\n x = self.conv(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.Sigmoid", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.ones", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
aksakalli/incubator-superset
[ "e21a354b3b560465a866c25dd687fbeef73eee31" ]
[ "tests/utils_tests.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# isort:skip_file\nimport unittest\nimport uuid\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal\nimport hashlib\nimport json\nimport os\nimport re\nfrom unittest.mock import Mock, patch\n\nimport numpy\nfrom flask import Flask, g\nimport marshmallow\nfrom sqlalchemy.exc import ArgumentError\n\nimport tests.test_app\nfrom superset import app, db, security_manager\nfrom superset.exceptions import CertificateException, SupersetException\nfrom superset.models.core import Database, Log\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.slice import Slice\nfrom superset.utils.core import (\n base_json_conv,\n cast_to_num,\n convert_legacy_filters_into_adhoc,\n create_ssl_cert_file,\n format_timedelta,\n get_form_data_token,\n get_iterable,\n get_email_address_list,\n get_or_create_db,\n get_since_until,\n get_stacktrace,\n json_int_dttm_ser,\n json_iso_dttm_ser,\n JSONEncodedDict,\n memoized,\n merge_extra_filters,\n merge_request_params,\n parse_ssl_cert,\n parse_human_timedelta,\n parse_js_uri_path_item,\n parse_past_timedelta,\n split,\n TimeRangeEndpoint,\n validate_json,\n zlib_compress,\n zlib_decompress,\n)\nfrom superset.utils import schema\nfrom superset.views.utils import (\n build_extra_filters,\n get_form_data,\n get_time_range_endpoints,\n)\nfrom tests.base_tests import SupersetTestCase\n\nfrom .fixtures.certificates import ssl_certificate\n\n\ndef mock_parse_human_datetime(s):\n if s == \"now\":\n return datetime(2016, 11, 7, 9, 30, 10)\n elif s == \"today\":\n return datetime(2016, 11, 7)\n elif s == \"yesterday\":\n return datetime(2016, 11, 6)\n elif s == \"tomorrow\":\n return datetime(2016, 11, 8)\n elif s == \"Last year\":\n return datetime(2015, 11, 7)\n elif s == \"Last week\":\n return datetime(2015, 10, 31)\n elif s == \"Last 5 months\":\n return datetime(2016, 6, 7)\n elif s == \"Next 5 months\":\n return datetime(2017, 4, 7)\n elif s in [\"5 days\", \"5 days ago\"]:\n return datetime(2016, 11, 2)\n elif s == \"2018-01-01T00:00:00\":\n return datetime(2018, 1, 1)\n elif s == \"2018-12-31T23:59:59\":\n return datetime(2018, 12, 31, 23, 59, 59)\n\n\ndef mock_to_adhoc(filt, expressionType=\"SIMPLE\", clause=\"where\"):\n result = {\"clause\": clause.upper(), \"expressionType\": expressionType}\n\n if expressionType == \"SIMPLE\":\n result.update(\n {\"comparator\": filt[\"val\"], \"operator\": filt[\"op\"], \"subject\": filt[\"col\"]}\n )\n elif expressionType == \"SQL\":\n result.update({\"sqlExpression\": filt[clause]})\n\n return result\n\n\nclass TestUtils(SupersetTestCase):\n def test_json_int_dttm_ser(self):\n dttm = datetime(2020, 1, 1)\n ts = 1577836800000.0\n assert json_int_dttm_ser(dttm) == ts\n assert json_int_dttm_ser(date(2020, 1, 1)) == ts\n assert json_int_dttm_ser(datetime(1970, 1, 1)) == 0\n assert json_int_dttm_ser(date(1970, 1, 1)) == 0\n assert json_int_dttm_ser(dttm + timedelta(milliseconds=1)) == (ts + 1)\n\n with self.assertRaises(TypeError):\n json_int_dttm_ser(\"this is not a date\")\n\n def test_json_iso_dttm_ser(self):\n dttm = datetime(2020, 1, 1)\n dt = date(2020, 1, 1)\n t = time()\n assert json_iso_dttm_ser(dttm) == dttm.isoformat()\n assert json_iso_dttm_ser(dt) == dt.isoformat()\n assert json_iso_dttm_ser(t) == t.isoformat()\n\n with self.assertRaises(TypeError):\n json_iso_dttm_ser(\"this is not a date\")\n\n def test_base_json_conv(self):\n assert isinstance(base_json_conv(numpy.bool_(1)), bool) is True\n assert isinstance(base_json_conv(numpy.int64(1)), int) is True\n assert isinstance(base_json_conv(numpy.array([1, 2, 3])), list) is True\n assert isinstance(base_json_conv(set([1])), list) is True\n assert isinstance(base_json_conv(Decimal(\"1.0\")), float) is True\n assert isinstance(base_json_conv(uuid.uuid4()), str) is True\n assert isinstance(base_json_conv(timedelta(0)), str) is True\n\n @patch(\"superset.utils.core.datetime\")\n def test_parse_human_timedelta(self, mock_datetime):\n mock_datetime.now.return_value = datetime(2019, 4, 1)\n mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)\n self.assertEqual(parse_human_timedelta(\"now\"), timedelta(0))\n self.assertEqual(parse_human_timedelta(\"1 year\"), timedelta(366))\n self.assertEqual(parse_human_timedelta(\"-1 year\"), timedelta(-365))\n self.assertEqual(parse_human_timedelta(None), timedelta(0))\n\n @patch(\"superset.utils.core.datetime\")\n def test_parse_past_timedelta(self, mock_datetime):\n mock_datetime.now.return_value = datetime(2019, 4, 1)\n mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)\n self.assertEqual(parse_past_timedelta(\"1 year\"), timedelta(365))\n self.assertEqual(parse_past_timedelta(\"-1 year\"), timedelta(365))\n self.assertEqual(parse_past_timedelta(\"52 weeks\"), timedelta(364))\n self.assertEqual(parse_past_timedelta(\"1 month\"), timedelta(31))\n\n def test_zlib_compression(self):\n json_str = '{\"test\": 1}'\n blob = zlib_compress(json_str)\n got_str = zlib_decompress(blob)\n self.assertEqual(json_str, got_str)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters(self):\n # does nothing if no extra filters\n form_data = {\"A\": 1, \"B\": 2, \"c\": \"test\"}\n expected = {\"A\": 1, \"B\": 2, \"c\": \"test\"}\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n # empty extra_filters\n form_data = {\"A\": 1, \"B\": 2, \"c\": \"test\", \"extra_filters\": []}\n expected = {\"A\": 1, \"B\": 2, \"c\": \"test\", \"adhoc_filters\": []}\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n # copy over extra filters into empty filters\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\"]},\n ]\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n # adds extra filters to existing filters\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\"]},\n ],\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"G1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"!=\",\n \"subject\": \"D\",\n }\n ],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"G1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"!=\",\n \"subject\": \"D\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n # adds extra filters to existing filters and sets time options\n form_data = {\n \"extra_filters\": [\n {\"col\": \"__time_range\", \"op\": \"in\", \"val\": \"1 year ago :\"},\n {\"col\": \"__time_col\", \"op\": \"in\", \"val\": \"birth_year\"},\n {\"col\": \"__time_grain\", \"op\": \"in\", \"val\": \"years\"},\n {\"col\": \"A\", \"op\": \"like\", \"val\": \"hello\"},\n {\"col\": \"__time_origin\", \"op\": \"in\", \"val\": \"now\"},\n {\"col\": \"__granularity\", \"op\": \"in\", \"val\": \"90 seconds\"},\n ]\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"hello\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"like\",\n \"subject\": \"A\",\n }\n ],\n \"time_range\": \"1 year ago :\",\n \"granularity_sqla\": \"birth_year\",\n \"time_grain_sqla\": \"years\",\n \"granularity\": \"90 seconds\",\n \"druid_time_origin\": \"now\",\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters_ignores_empty_filters(self):\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": \"\"},\n {\"col\": \"B\", \"op\": \"==\", \"val\": []},\n ]\n }\n expected = {\"adhoc_filters\": []}\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters_ignores_nones(self):\n form_data = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": None,\n }\n ],\n \"extra_filters\": [{\"col\": \"B\", \"op\": \"==\", \"val\": []}],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": None,\n }\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters_ignores_equal_filters(self):\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\"]},\n {\"col\": \"c\", \"op\": \"in\", \"val\": [\"c1\", 1, None]},\n ],\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", 1, None],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"c\",\n },\n ],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", 1, None],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"c\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters_merges_different_val_types(self):\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": [\"g1\", \"g2\"]},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\"]},\n ],\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\"]},\n ],\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_merge_extra_filters_adds_unequal_lists(self):\n form_data = {\n \"extra_filters\": [\n {\"col\": \"a\", \"op\": \"in\", \"val\": [\"g1\", \"g2\", \"g3\"]},\n {\"col\": \"B\", \"op\": \"==\", \"val\": [\"c1\", \"c2\", \"c3\"]},\n ],\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ],\n }\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"g1\", \"g2\", \"g3\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n },\n {\n \"clause\": \"WHERE\",\n \"comparator\": [\"c1\", \"c2\", \"c3\"],\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"B\",\n },\n ]\n }\n merge_extra_filters(form_data)\n self.assertEqual(form_data, expected)\n\n def test_merge_request_params_when_url_params_undefined(self):\n form_data = {\"since\": \"2000\", \"until\": \"now\"}\n url_params = {\"form_data\": form_data, \"dashboard_ids\": \"(1,2,3,4,5)\"}\n merge_request_params(form_data, url_params)\n self.assertIn(\"url_params\", form_data.keys())\n self.assertIn(\"dashboard_ids\", form_data[\"url_params\"])\n self.assertNotIn(\"form_data\", form_data.keys())\n\n def test_merge_request_params_when_url_params_predefined(self):\n form_data = {\n \"since\": \"2000\",\n \"until\": \"now\",\n \"url_params\": {\"abc\": \"123\", \"dashboard_ids\": \"(1,2,3)\"},\n }\n url_params = {\"form_data\": form_data, \"dashboard_ids\": \"(1,2,3,4,5)\"}\n merge_request_params(form_data, url_params)\n self.assertIn(\"url_params\", form_data.keys())\n self.assertIn(\"abc\", form_data[\"url_params\"])\n self.assertEqual(\n url_params[\"dashboard_ids\"], form_data[\"url_params\"][\"dashboard_ids\"]\n )\n\n def test_format_timedelta(self):\n self.assertEqual(format_timedelta(timedelta(0)), \"0:00:00\")\n self.assertEqual(format_timedelta(timedelta(days=1)), \"1 day, 0:00:00\")\n self.assertEqual(format_timedelta(timedelta(minutes=-6)), \"-0:06:00\")\n self.assertEqual(\n format_timedelta(timedelta(0) - timedelta(days=1, hours=5, minutes=6)),\n \"-1 day, 5:06:00\",\n )\n self.assertEqual(\n format_timedelta(timedelta(0) - timedelta(days=16, hours=4, minutes=3)),\n \"-16 days, 4:03:00\",\n )\n\n def test_json_encoded_obj(self):\n obj = {\"a\": 5, \"b\": [\"a\", \"g\", 5]}\n val = '{\"a\": 5, \"b\": [\"a\", \"g\", 5]}'\n jsonObj = JSONEncodedDict()\n resp = jsonObj.process_bind_param(obj, \"dialect\")\n self.assertIn('\"a\": 5', resp)\n self.assertIn('\"b\": [\"a\", \"g\", 5]', resp)\n self.assertEqual(jsonObj.process_result_value(val, \"dialect\"), obj)\n\n def test_validate_json(self):\n valid = '{\"a\": 5, \"b\": [1, 5, [\"g\", \"h\"]]}'\n self.assertIsNone(validate_json(valid))\n invalid = '{\"a\": 5, \"b\": [1, 5, [\"g\", \"h]]}'\n with self.assertRaises(SupersetException):\n validate_json(invalid)\n\n def test_memoized_on_functions(self):\n watcher = {\"val\": 0}\n\n @memoized\n def test_function(a, b, c):\n watcher[\"val\"] += 1\n return a * b * c\n\n result1 = test_function(1, 2, 3)\n result2 = test_function(1, 2, 3)\n self.assertEqual(result1, result2)\n self.assertEqual(watcher[\"val\"], 1)\n\n def test_memoized_on_methods(self):\n class test_class:\n def __init__(self, num):\n self.num = num\n self.watcher = 0\n\n @memoized\n def test_method(self, a, b, c):\n self.watcher += 1\n return a * b * c * self.num\n\n instance = test_class(5)\n result1 = instance.test_method(1, 2, 3)\n result2 = instance.test_method(1, 2, 3)\n self.assertEqual(result1, result2)\n self.assertEqual(instance.watcher, 1)\n instance.num = 10\n self.assertEqual(result2, instance.test_method(1, 2, 3))\n\n def test_memoized_on_methods_with_watches(self):\n class test_class:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.watcher = 0\n\n @memoized(watch=(\"x\", \"y\"))\n def test_method(self, a, b, c):\n self.watcher += 1\n return a * b * c * self.x * self.y\n\n instance = test_class(3, 12)\n result1 = instance.test_method(1, 2, 3)\n result2 = instance.test_method(1, 2, 3)\n self.assertEqual(result1, result2)\n self.assertEqual(instance.watcher, 1)\n result3 = instance.test_method(2, 3, 4)\n self.assertEqual(instance.watcher, 2)\n result4 = instance.test_method(2, 3, 4)\n self.assertEqual(instance.watcher, 2)\n self.assertEqual(result3, result4)\n self.assertNotEqual(result3, result1)\n instance.x = 1\n result5 = instance.test_method(2, 3, 4)\n self.assertEqual(instance.watcher, 3)\n self.assertNotEqual(result5, result4)\n result6 = instance.test_method(2, 3, 4)\n self.assertEqual(instance.watcher, 3)\n self.assertEqual(result6, result5)\n instance.x = 10\n instance.y = 10\n result7 = instance.test_method(2, 3, 4)\n self.assertEqual(instance.watcher, 4)\n self.assertNotEqual(result7, result6)\n instance.x = 3\n instance.y = 12\n result8 = instance.test_method(1, 2, 3)\n self.assertEqual(instance.watcher, 4)\n self.assertEqual(result1, result8)\n\n @patch(\"superset.utils.core.parse_human_datetime\", mock_parse_human_datetime)\n def test_get_since_until(self):\n result = get_since_until()\n expected = None, datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(\" : now\")\n expected = None, datetime(2016, 11, 7, 9, 30, 10)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"yesterday : tomorrow\")\n expected = datetime(2016, 11, 6), datetime(2016, 11, 8)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"2018-01-01T00:00:00 : 2018-12-31T23:59:59\")\n expected = datetime(2018, 1, 1), datetime(2018, 12, 31, 23, 59, 59)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Last year\")\n expected = datetime(2015, 11, 7), datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Last 5 months\")\n expected = datetime(2016, 6, 7), datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Next 5 months\")\n expected = datetime(2016, 11, 7), datetime(2017, 4, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(since=\"5 days\")\n expected = datetime(2016, 11, 2), datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(since=\"5 days ago\", until=\"tomorrow\")\n expected = datetime(2016, 11, 2), datetime(2016, 11, 8)\n self.assertEqual(result, expected)\n\n result = get_since_until(time_range=\"yesterday : tomorrow\", time_shift=\"1 day\")\n expected = datetime(2016, 11, 5), datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(time_range=\"5 days : now\")\n expected = datetime(2016, 11, 2), datetime(2016, 11, 7, 9, 30, 10)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Last week\", relative_end=\"now\")\n expected = datetime(2016, 10, 31), datetime(2016, 11, 7, 9, 30, 10)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Last week\", relative_start=\"now\")\n expected = datetime(2016, 10, 31, 9, 30, 10), datetime(2016, 11, 7)\n self.assertEqual(result, expected)\n\n result = get_since_until(\"Last week\", relative_start=\"now\", relative_end=\"now\")\n expected = datetime(2016, 10, 31, 9, 30, 10), datetime(2016, 11, 7, 9, 30, 10)\n self.assertEqual(result, expected)\n\n with self.assertRaises(ValueError):\n get_since_until(time_range=\"tomorrow : yesterday\")\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_where(self):\n form_data = {\"where\": \"a = 1\"}\n expected = {\n \"adhoc_filters\": [\n {\"clause\": \"WHERE\", \"expressionType\": \"SQL\", \"sqlExpression\": \"a = 1\"}\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_filters(self):\n form_data = {\"filters\": [{\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"}]}\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"WHERE\",\n \"comparator\": \"someval\",\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"in\",\n \"subject\": \"a\",\n }\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_having(self):\n form_data = {\"having\": \"COUNT(1) = 1\"}\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"HAVING\",\n \"expressionType\": \"SQL\",\n \"sqlExpression\": \"COUNT(1) = 1\",\n }\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_having_filters(self):\n form_data = {\"having_filters\": [{\"col\": \"COUNT(1)\", \"op\": \"==\", \"val\": 1}]}\n expected = {\n \"adhoc_filters\": [\n {\n \"clause\": \"HAVING\",\n \"comparator\": 1,\n \"expressionType\": \"SIMPLE\",\n \"operator\": \"==\",\n \"subject\": \"COUNT(1)\",\n }\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_present_and_empty(self):\n form_data = {\"adhoc_filters\": [], \"where\": \"a = 1\"}\n expected = {\n \"adhoc_filters\": [\n {\"clause\": \"WHERE\", \"expressionType\": \"SQL\", \"sqlExpression\": \"a = 1\"}\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n @patch(\"superset.utils.core.to_adhoc\", mock_to_adhoc)\n def test_convert_legacy_filters_into_adhoc_present_and_nonempty(self):\n form_data = {\n \"adhoc_filters\": [\n {\"clause\": \"WHERE\", \"expressionType\": \"SQL\", \"sqlExpression\": \"a = 1\"}\n ],\n \"filters\": [{\"col\": \"a\", \"op\": \"in\", \"val\": \"someval\"}],\n \"having\": \"COUNT(1) = 1\",\n \"having_filters\": [{\"col\": \"COUNT(1)\", \"op\": \"==\", \"val\": 1}],\n }\n expected = {\n \"adhoc_filters\": [\n {\"clause\": \"WHERE\", \"expressionType\": \"SQL\", \"sqlExpression\": \"a = 1\"}\n ]\n }\n convert_legacy_filters_into_adhoc(form_data)\n self.assertEqual(form_data, expected)\n\n def test_parse_js_uri_path_items_eval_undefined(self):\n self.assertIsNone(parse_js_uri_path_item(\"undefined\", eval_undefined=True))\n self.assertIsNone(parse_js_uri_path_item(\"null\", eval_undefined=True))\n self.assertEqual(\"undefined\", parse_js_uri_path_item(\"undefined\"))\n self.assertEqual(\"null\", parse_js_uri_path_item(\"null\"))\n\n def test_parse_js_uri_path_items_unquote(self):\n self.assertEqual(\"slashed/name\", parse_js_uri_path_item(\"slashed%2fname\"))\n self.assertEqual(\n \"slashed%2fname\", parse_js_uri_path_item(\"slashed%2fname\", unquote=False)\n )\n\n def test_parse_js_uri_path_items_item_optional(self):\n self.assertIsNone(parse_js_uri_path_item(None))\n self.assertIsNotNone(parse_js_uri_path_item(\"item\"))\n\n def test_get_stacktrace(self):\n with app.app_context():\n app.config[\"SHOW_STACKTRACE\"] = True\n try:\n raise Exception(\"NONONO!\")\n except Exception:\n stacktrace = get_stacktrace()\n self.assertIn(\"NONONO\", stacktrace)\n\n app.config[\"SHOW_STACKTRACE\"] = False\n try:\n raise Exception(\"NONONO!\")\n except Exception:\n stacktrace = get_stacktrace()\n assert stacktrace is None\n\n def test_split(self):\n self.assertEqual(list(split(\"a b\")), [\"a\", \"b\"])\n self.assertEqual(list(split(\"a,b\", delimiter=\",\")), [\"a\", \"b\"])\n self.assertEqual(list(split(\"a,(b,a)\", delimiter=\",\")), [\"a\", \"(b,a)\"])\n self.assertEqual(\n list(split('a,(b,a),\"foo , bar\"', delimiter=\",\")),\n [\"a\", \"(b,a)\", '\"foo , bar\"'],\n )\n self.assertEqual(\n list(split(\"a,'b,c'\", delimiter=\",\", quote=\"'\")), [\"a\", \"'b,c'\"]\n )\n self.assertEqual(list(split('a \"b c\"')), [\"a\", '\"b c\"'])\n self.assertEqual(list(split(r'a \"b \\\" c\"')), [\"a\", r'\"b \\\" c\"'])\n\n def test_get_or_create_db(self):\n get_or_create_db(\"test_db\", \"sqlite:///superset.db\")\n database = db.session.query(Database).filter_by(database_name=\"test_db\").one()\n self.assertIsNotNone(database)\n self.assertEqual(database.sqlalchemy_uri, \"sqlite:///superset.db\")\n self.assertIsNotNone(\n security_manager.find_permission_view_menu(\"database_access\", database.perm)\n )\n # Test change URI\n get_or_create_db(\"test_db\", \"sqlite:///changed.db\")\n database = db.session.query(Database).filter_by(database_name=\"test_db\").one()\n self.assertEqual(database.sqlalchemy_uri, \"sqlite:///changed.db\")\n db.session.delete(database)\n db.session.commit()\n\n def test_get_or_create_db_invalid_uri(self):\n with self.assertRaises(ArgumentError):\n get_or_create_db(\"test_db\", \"yoursql:superset.db/()\")\n\n def test_get_time_range_endpoints(self):\n self.assertEqual(\n get_time_range_endpoints(form_data={}),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE),\n )\n\n self.assertEqual(\n get_time_range_endpoints(\n form_data={\"time_range_endpoints\": [\"inclusive\", \"inclusive\"]}\n ),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE),\n )\n\n self.assertEqual(\n get_time_range_endpoints(form_data={\"datasource\": \"1_druid\"}),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE),\n )\n\n slc = Mock()\n slc.datasource.database.get_extra.return_value = {}\n\n self.assertEqual(\n get_time_range_endpoints(form_data={\"datasource\": \"1__table\"}, slc=slc),\n (TimeRangeEndpoint.UNKNOWN, TimeRangeEndpoint.INCLUSIVE),\n )\n\n slc.datasource.database.get_extra.return_value = {\n \"time_range_endpoints\": [\"inclusive\", \"inclusive\"]\n }\n\n self.assertEqual(\n get_time_range_endpoints(form_data={\"datasource\": \"1__table\"}, slc=slc),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE),\n )\n\n self.assertIsNone(get_time_range_endpoints(form_data={}, slc=slc))\n\n with app.app_context():\n app.config[\"SIP_15_GRACE_PERIOD_END\"] = date.today() + timedelta(days=1)\n\n self.assertEqual(\n get_time_range_endpoints(form_data={\"datasource\": \"1__table\"}, slc=slc),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.INCLUSIVE),\n )\n\n app.config[\"SIP_15_GRACE_PERIOD_END\"] = date.today()\n\n self.assertEqual(\n get_time_range_endpoints(form_data={\"datasource\": \"1__table\"}, slc=slc),\n (TimeRangeEndpoint.INCLUSIVE, TimeRangeEndpoint.EXCLUSIVE),\n )\n\n def test_get_iterable(self):\n self.assertListEqual(get_iterable(123), [123])\n self.assertListEqual(get_iterable([123]), [123])\n self.assertListEqual(get_iterable(\"foo\"), [\"foo\"])\n\n def test_build_extra_filters(self):\n world_health = db.session.query(Dashboard).filter_by(slug=\"world_health\").one()\n layout = json.loads(world_health.position_json)\n filter_ = db.session.query(Slice).filter_by(slice_name=\"Region Filter\").one()\n world = db.session.query(Slice).filter_by(slice_name=\"World's Population\").one()\n box_plot = db.session.query(Slice).filter_by(slice_name=\"Box plot\").one()\n treemap = db.session.query(Slice).filter_by(slice_name=\"Treemap\").one()\n\n filter_scopes = {\n str(filter_.id): {\n \"region\": {\"scope\": [\"ROOT_ID\"], \"immune\": [treemap.id]},\n \"country_name\": {\n \"scope\": [\"ROOT_ID\"],\n \"immune\": [treemap.id, box_plot.id],\n },\n }\n }\n\n default_filters = {\n str(filter_.id): {\n \"region\": [\"North America\"],\n \"country_name\": [\"United States\"],\n }\n }\n\n # immune to all filters\n assert (\n build_extra_filters(layout, filter_scopes, default_filters, treemap.id)\n == []\n )\n\n # in scope\n assert build_extra_filters(\n layout, filter_scopes, default_filters, world.id\n ) == [\n {\"col\": \"region\", \"op\": \"==\", \"val\": \"North America\"},\n {\"col\": \"country_name\", \"op\": \"in\", \"val\": [\"United States\"]},\n ]\n\n assert build_extra_filters(\n layout, filter_scopes, default_filters, box_plot.id\n ) == [{\"col\": \"region\", \"op\": \"==\", \"val\": \"North America\"}]\n\n def test_ssl_certificate_parse(self):\n parsed_certificate = parse_ssl_cert(ssl_certificate)\n self.assertEqual(parsed_certificate.serial_number, 12355228710836649848)\n self.assertRaises(CertificateException, parse_ssl_cert, \"abc\" + ssl_certificate)\n\n def test_ssl_certificate_file_creation(self):\n path = create_ssl_cert_file(ssl_certificate)\n expected_filename = hashlib.md5(ssl_certificate.encode(\"utf-8\")).hexdigest()\n self.assertIn(expected_filename, path)\n self.assertTrue(os.path.exists(path))\n\n def test_get_email_address_list(self):\n self.assertEqual(get_email_address_list(\"a@a\"), [\"a@a\"])\n self.assertEqual(get_email_address_list(\" a@a \"), [\"a@a\"])\n self.assertEqual(get_email_address_list(\"a@a\\n\"), [\"a@a\"])\n self.assertEqual(get_email_address_list(\",a@a;\"), [\"a@a\"])\n self.assertEqual(\n get_email_address_list(\",a@a; b@b c@c a-c@c; d@d, f@f\"),\n [\"a@a\", \"b@b\", \"c@c\", \"a-c@c\", \"d@d\", \"f@f\"],\n )\n\n def test_get_form_data_default(self) -> None:\n with app.test_request_context():\n form_data, slc = get_form_data()\n\n self.assertEqual(\n form_data,\n {\"time_range_endpoints\": get_time_range_endpoints(form_data={}),},\n )\n\n self.assertEqual(slc, None)\n\n def test_get_form_data_request_args(self) -> None:\n with app.test_request_context(\n query_string={\"form_data\": json.dumps({\"foo\": \"bar\"})}\n ):\n form_data, slc = get_form_data()\n\n self.assertEqual(\n form_data,\n {\n \"foo\": \"bar\",\n \"time_range_endpoints\": get_time_range_endpoints(form_data={}),\n },\n )\n\n self.assertEqual(slc, None)\n\n def test_get_form_data_request_form(self) -> None:\n with app.test_request_context(data={\"form_data\": json.dumps({\"foo\": \"bar\"})}):\n form_data, slc = get_form_data()\n\n self.assertEqual(\n form_data,\n {\n \"foo\": \"bar\",\n \"time_range_endpoints\": get_time_range_endpoints(form_data={}),\n },\n )\n\n self.assertEqual(slc, None)\n\n def test_get_form_data_request_args_and_form(self) -> None:\n with app.test_request_context(\n data={\"form_data\": json.dumps({\"foo\": \"bar\"})},\n query_string={\"form_data\": json.dumps({\"baz\": \"bar\"})},\n ):\n form_data, slc = get_form_data()\n\n self.assertEqual(\n form_data,\n {\n \"baz\": \"bar\",\n \"foo\": \"bar\",\n \"time_range_endpoints\": get_time_range_endpoints(form_data={}),\n },\n )\n\n self.assertEqual(slc, None)\n\n def test_get_form_data_globals(self) -> None:\n with app.test_request_context():\n g.form_data = {\"foo\": \"bar\"}\n form_data, slc = get_form_data()\n delattr(g, \"form_data\")\n\n self.assertEqual(\n form_data,\n {\n \"foo\": \"bar\",\n \"time_range_endpoints\": get_time_range_endpoints(form_data={}),\n },\n )\n\n self.assertEqual(slc, None)\n\n def test_log_this(self) -> None:\n # TODO: Add additional scenarios.\n self.login(username=\"admin\")\n slc = self.get_slice(\"Girls\", db.session)\n dashboard_id = 1\n\n resp = self.get_json_resp(\n f\"/superset/explore_json/{slc.datasource_type}/{slc.datasource_id}/\"\n + f'?form_data={{\"slice_id\": {slc.id}}}&dashboard_id={dashboard_id}',\n {\"form_data\": json.dumps(slc.viz.form_data)},\n )\n\n record = (\n db.session.query(Log)\n .filter_by(action=\"explore_json\", slice_id=slc.id)\n .order_by(Log.dttm.desc())\n .first()\n )\n\n self.assertEqual(record.dashboard_id, dashboard_id)\n self.assertEqual(json.loads(record.json)[\"dashboard_id\"], str(dashboard_id))\n self.assertEqual(json.loads(record.json)[\"form_data\"][\"slice_id\"], slc.id)\n\n self.assertEqual(\n json.loads(record.json)[\"form_data\"][\"viz_type\"],\n slc.viz.form_data[\"viz_type\"],\n )\n\n def test_schema_validate_json(self):\n valid = '{\"a\": 5, \"b\": [1, 5, [\"g\", \"h\"]]}'\n self.assertIsNone(schema.validate_json(valid))\n invalid = '{\"a\": 5, \"b\": [1, 5, [\"g\", \"h]]}'\n self.assertRaises(marshmallow.ValidationError, schema.validate_json, invalid)\n\n def test_schema_one_of_case_insensitive(self):\n validator = schema.OneOfCaseInsensitive(choices=[1, 2, 3, \"FoO\", \"BAR\", \"baz\"])\n self.assertEqual(1, validator(1))\n self.assertEqual(2, validator(2))\n self.assertEqual(\"FoO\", validator(\"FoO\"))\n self.assertEqual(\"FOO\", validator(\"FOO\"))\n self.assertEqual(\"bar\", validator(\"bar\"))\n self.assertEqual(\"BaZ\", validator(\"BaZ\"))\n self.assertRaises(marshmallow.ValidationError, validator, \"qwerty\")\n self.assertRaises(marshmallow.ValidationError, validator, 4)\n\n def test_cast_to_num(self) -> None:\n assert cast_to_num(\"5\") == 5\n assert cast_to_num(\"5.2\") == 5.2\n assert cast_to_num(10) == 10\n assert cast_to_num(10.1) == 10.1\n assert cast_to_num(None) is None\n assert cast_to_num(\"this is not a string\") is None\n\n def test_get_form_data_token(self):\n assert get_form_data_token({\"token\": \"token_abcdefg1\"}) == \"token_abcdefg1\"\n generated_token = get_form_data_token({})\n assert re.match(r\"^token_[a-z0-9]{8}$\", generated_token) is not None\n" ]
[ [ "numpy.int64", "numpy.array", "numpy.bool_" ] ]
ICOS-Carbon-Portal/jupyter
[ "628c16b18352411a6c5cd9b44ed0c01aad9cf3ac" ]
[ "notebooks/icos_jupyter_notebooks/station_characterization/stc_functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 9 08:04:31 2020\n\n@author: Ida Storm \n\nFunctions to run the station characterization notebook on exploredata.\n\n\"\"\"\n\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport math\nimport numpy as np\nfrom netCDF4 import Dataset\nimport textwrap\nimport datetime as dt\nimport os\nimport six\nimport requests\nimport tex\nfrom IPython.core.display import display, HTML \nimport netCDF4 as cdf\nimport cartopy\ncartopy.config['data_dir'] = '/data/project/cartopy/'\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport cartopy.io.shapereader as shpreader\nfrom cartopy.feature import ShapelyFeature\nimport warnings\nwarnings.filterwarnings('ignore')\nimport json\n\n#path to data such as population data and land cover data\nstcDataPath='/data/project/stc/'\n\n#path to footprints\npathFP='/data/stiltweb/stations/'\n\n#added to not show the land cover bar graph that is being saved for the PDF which is different size than the one displayed\nmatplotlib.pyplot.ioff()\n\n#Earth's radius in km (for calculating distances between the station and cells)\nR = 6373.8\n\n#Colors for the land cover plots\ndictionary_color = {'Broad leaf forest': {'color': '#4c9c5e'}, 'Coniferous forest':{'color':'#CAE0AB'}, 'Mixed forest':{'color':'#90C987'}, 'Ocean':{'color':'#1964B0'}, 'Other':{'color':'#882E72'}, 'Grass/shrubland':{'color':'#F1932D'}, 'Cropland':{'color': '#521A13'}, 'Pasture':{'color':'#F7F056'}, 'Urban':{'color':'#DC050C'}, 'Unknown':{'color':'#777777'}}\n\n#function to read and aggregate footprints for given date range\ndef read_aggreg_footprints(station, date_range):\n \n # loop over all dates and read netcdf files\n\n # path to footprint files in new stiltweb directory structure\n pathFP='/data/stiltweb/stations/'\n \n fp=[]\n nfp=0\n first = True\n for date in date_range:\n\n filename=(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'\n +str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/foot')\n \n if os.path.isfile(filename):\n f_fp = cdf.Dataset(filename)\n \n if (first):\n fp=f_fp.variables['foot'][:,:,:]\n lon=f_fp.variables['lon'][:]\n lat=f_fp.variables['lat'][:]\n first = False\n else:\n fp=fp+f_fp.variables['foot'][:,:,:]\n f_fp.close()\n nfp+=1\n #else:\n #print ('file does not exist: ',filename)\n if nfp > 0:\n fp=fp/nfp\n title = 'not used'\n \n return nfp, fp, lon, lat, title\n\n else:\n\n return 0, None, None, None, None\n\n# function to read STILT concentration time series (new format of STILT results)\ndef read_stilt_timeseries(station,date_range,timeselect_list):\n url = 'https://stilt.icos-cp.eu/viewer/stiltresult'\n headers = {'Content-Type': 'application/json', 'Accept-Charset': 'UTF-8'}\n\n new_range=[]\n \n for date in date_range:\n if os.path.exists(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'\n +str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/'):\n new_range.append(date)\n \n if len(new_range) > 0:\n date_range = new_range\n fromDate = date_range[0].strftime('%Y-%m-%d')\n toDate = date_range[-1].strftime('%Y-%m-%d')\n columns = ('[\"isodate\",\"co2.stilt\",\"co2.fuel\",\"co2.bio\",\"co2.bio.gee\",\"co2.bio.resp\",\"co2.fuel.coal\",\"co2.fuel.oil\",'+\n '\"co2.fuel.gas\",\"co2.fuel.bio\",\"co2.energy\",\"co2.transport\", \"co2.industry\",'+\n '\"co2.others\", \"co2.cement\", \"co2.background\",'+\n '\"co.stilt\",\"co.fuel\",\"co.bio\",\"co.fuel.coal\",\"co.fuel.oil\",'+\n '\"co.fuel.gas\",\"co.fuel.bio\",\"co.energy\",\"co.transport\", \"co.industry\",'+\n '\"co.others\", \"co.cement\", \"co.background\",'+\n '\"rn\", \"rn.era\",\"rn.noah\",\"wind.dir\",\"wind.u\",\"wind.v\",\"latstart\",\"lonstart\"]')\n data = '{\"columns\": '+columns+', \"fromDate\": \"'+fromDate+'\", \"toDate\": \"'+toDate+'\", \"stationId\": \"'+station+'\"}'\n #print (data)\n response = requests.post(url, headers=headers, data=data)\n if response.status_code != 500:\n #print (response.json())\n output=np.asarray(response.json())\n df = pd.DataFrame(output[:,:], columns=eval(columns))\n df = df.replace('null',np.NaN)\n df = df.astype(float)\n df['date'] = pd.to_datetime(df['isodate'], unit='s')\n df.set_index(['date'],inplace=True)\n df['name'] = station\n df['model'] = 'STILT'\n df['wind.speed']=np.sqrt((df['wind.u']**2)+(df['wind.v']**2))\n #print (df.columns)\n else:\n df=pd.DataFrame({'A' : []})\n\n df=df[(df['co2.fuel'].index.hour.isin(timeselect_list))]\n\n return df\n\n#given the input - create an updated pandas date range with only hours in timeselect_list\ndef date_range_hour_filtered(start_date, end_date, timeselect_list):\n \n date_range = pd.date_range(start_date, end_date, freq='3H')\n\n #depending on how many input (max 8 for 0 3 6 9 12 15 18 21), filter to include hours.\n for time_value in timeselect_list:\n if len(timeselect_list)==1:\n date_range = date_range[(timeselect_list[0] == date_range.hour)]\n #df_nine = df.loc[(timeselect_list[count_timeselect] == df.index.hour)]\n if len(timeselect_list)==2:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)]\n if len(timeselect_list)==3:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)]\n\n if len(timeselect_list)==4:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\n\n if len(timeselect_list)==5:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\\\n | date_range[(timeselect_list[4] == date_range.hour)]\n\n if len(timeselect_list)==6:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\\\n | date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]\n\n if len(timeselect_list)==7:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\\\n | date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]\\\n | date_range[(timeselect_list[6] == date_range.hour)]\n \n if len(timeselect_list)==8:\n date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \\\n | date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\\\n | date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]\\\n | date_range[(timeselect_list[6] == date_range.hour)] | date_range[(timeselect_list[7] == date_range.hour)]\n \n #consider return timeselect\n return date_range\n\ndef import_landcover_HILDA(year='2018'):\n \n name_data = 'hilda_lulc_'+ year +'.nc' \n \n all_hilda_classes= Dataset(stcDataPath + name_data)\n\n #access all the different land cover classes in the .nc files:\n cropland = all_hilda_classes.variables['cropland'][:,:]\n ocean = all_hilda_classes.variables['ocean'][:,:]\n forest_decidious_broad_leaf = all_hilda_classes.variables['f_de_br_le'][:,:]\n forest_decidious_needle_leaf = all_hilda_classes.variables['f_de_ne_le'][:,:]\n forest_evergreen_broad_leaf = all_hilda_classes.variables['f_eg_br_le'][:,:]\n forest_evergreen_needle_leaf = all_hilda_classes.variables['f_eg_ne_le'][:,:]\n mixed_forest = all_hilda_classes.variables['forest_mix'][:,:]\n forest_unknown = all_hilda_classes.variables['forest_unk'][:,:]\n grass_shrub = all_hilda_classes.variables['grass_shru'][:,:]\n other_land = all_hilda_classes.variables['other_land'][:,:]\n pasture = all_hilda_classes.variables['pasture'][:,:]\n urban = all_hilda_classes.variables['urban'][:,:]\n water = all_hilda_classes.variables['water'][:,:]\n unknown = all_hilda_classes.variables['unknown'][:,:]\n \n # aggregated classes:\n broad_leaf_forest = forest_decidious_broad_leaf + forest_evergreen_broad_leaf \n coniferous_forest = forest_decidious_needle_leaf+ forest_evergreen_needle_leaf\n mixed_forest = mixed_forest + forest_unknown\n other = other_land + water\n \n return broad_leaf_forest, coniferous_forest, mixed_forest, ocean, other, grass_shrub, cropland, pasture, urban, unknown\n\ndef import_population_data(year=2018):\n \n pop_data= Dataset(stcDataPath + 'GEOSTAT_population_2011_2018.nc')\n fp_pop=pop_data.variables[str(year)][:,:]\n return fp_pop\n\ndef import_point_source_data():\n\n point_source_data= Dataset(stcDataPath+ 'E_PRTR_pointsource_2017.nc')\n\n #emissions in kg/year in the variable \"Sum_Tota_1\"\n fp_point_source=point_source_data.variables['Sum_Tota_1'][:,:]\n\n # different from population data: can translate the emissions within each stilt cell\n # to the effect it will have to the final CO2 concentrations at the stations.\n # just need to get it in the right unit (micromole/m2s) and multiply by the individual or aggregated footprints\n\n # divide by the molar weight in kg. 12 (C)+16(O)+16(O) =44 0.044 in kg.\n # get number of moles of C this way. Want it in micromole though: 1 mole= 1000000 micromole\n fp_point_source_moles_C=fp_point_source/0.044\n\n #how many micro-mole is that? multiply by 1000000\n fp_point_source_micromoles_C=fp_point_source_moles_C*1000000\n\n #a NetCDF file with the grid size calues in m2\n f_gridarea = cdf.Dataset(stcDataPath + 'gridareaSTILT.nc')\n\n #area stored in \"cell_area\"\n gridarea = f_gridarea.variables['cell_area'][:]\n\n fp_point_source_m2= fp_point_source_micromoles_C/gridarea\n\n #how many micro moles let out per second (have yearly data)\n fp_point_source_m2_s= fp_point_source_m2/31536000\n \n return fp_point_source_m2_s\n\ndef date_and_time_string_for_title(date_range, timeselect_list):\n \n date_index_number=len(date_range) - 1\n \n timeselect_string=[str(value) for value in timeselect_list]\n timeselect_string =':00, '.join(timeselect_string) + ':00'\n\n date_and_time_string=('\\n' + str(date_range[0].year) + '-' + str(date_range[0].month) + '-' + str(date_range[0].day)\\\n + ' to ' + str(date_range[date_index_number].year) + '-' + str(date_range[date_index_number].month) + '-' \\\n + str(date_range[date_index_number].day)+ ', Hour(s): ' + timeselect_string+ '\\n')\n return date_and_time_string\n\n#function to generate maps with cells binned by defined intervals and direction\ndef nondirection_labels(bins, units): \n labels = []\n \n #for the label - want bin before and after (range)\n for left, right in zip(bins[:-1], bins[1:]):\n \n #if the last object - everything above (>value unit)\n if np.isinf(right):\n labels.append('>{} {}'.format(left, units))\n else:\n \n #how the labels normally look (value - value unit)\n labels.append('{} - {} {}'.format(left, right, units))\n\n return list(labels)\n\ndef compass_bearing(pointA, pointB):\n \"\"\"\n Calculates the bearing between two points.\n The formulae used is the following:\n θ = atan2(sin(Δlong).cos(lat2),\n cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))\n :Parameters:\n - `pointA: The tuple representing the latitude/longitude for the\n first point. Latitude and longitude must be in decimal degrees\n - `pointB: The tuple representing the latitude/longitude for the\n second point. Latitude and longitude must be in decimal degrees\n :Returns:\n The bearing in degrees\n :Returns Type:\n float\n \"\"\"\n if (type(pointA) != tuple) or (type(pointB) != tuple):\n raise TypeError(\"Only tuples are supported as arguments\")\n\n lat1 = math.radians(pointA[0])\n lat2 = math.radians(pointB[0])\n\n diffLong = math.radians(pointB[1] - pointA[1])\n\n x = math.sin(diffLong) * math.cos(lat2)\n y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)\n * math.cos(lat2) * math.cos(diffLong))\n\n initial_bearing = math.atan2(x, y)\n\n # Now we have the initial bearing but math.atan2 return values\n # from -180° to + 180° which is not what we want for a compass bearing\n # The solution is to normalize the initial bearing as shown below\n initial_bearing = math.degrees(initial_bearing)\n compass_bearing = (initial_bearing + 360) % 360\n\n return compass_bearing\n\ndef define_bins_maprose(km_intervals, bin_size):\n \n #the number of bins\n number_bins=round((5000/km_intervals),0)\n \n #start at 0 km of the station. Then append to this list\n interval_bins=[0]\n \n #ex 100, 200, 300 if km_intervals=100\n for number in range(1, int(number_bins)):\n interval_bins.append(km_intervals*number)\n \n #the last number is infinity - however far the domain stretches marks the end of the last bin.\n interval_bins.append(np.inf)\n \n #labels: not used in map - but used in the grouping\n interval_labels = nondirection_labels(interval_bins, units='km')\n\n #direction: using the input (degree_bin) to set the bins so that the first bin has \"north (0 degrees) in the middle\"\n #\"from_degree\" becomes a negative value (half of the degree value \"to the left\" of 0)\n from_degree=-(bin_size/2)\n\n #\"to_degree\" is a vale to indicate the last bins ending. Must check values all the way to 360 which means the last bin \n #will go past 360 and later be joined with the \"0\" bin (replace function in next cell)\n to_degree= 360 + (bin_size/2) \n\n #the bin_size is the \"step\". generate an array with all the direction bins\n dir_bins = np.arange(from_degree, (to_degree+1), bin_size)\n\n #the direction bin is the first bin + the next bin divided by two:\n dir_labels = (dir_bins[:-1] + dir_bins[1:]) / 2\n \n #return these values to use in the function map_representation_polar_graph\n return interval_bins, interval_labels, dir_bins, dir_labels\n\n# function to convert station longitude and latitude (slat, slon) to indices of STILT model grid (ix,jy)\ndef lonlat_2_ixjy(slon,slat,mlon,mlat):\n #slon, slat: longitude and latitude of station\n #mlon, mlat: 1-dim. longitude and latitude of model grid\n ix = (np.abs(mlon-slon)).argmin()\n jy = (np.abs(mlat-slat)).argmin()\n return ix,jy\n\ndef plot_maps(myStation, field, title='', label='', linlog='linear', zoom='', \n vmin=0.0001, vmax=None, colors='GnBu'): \n\n station=myStation.stationId\n lon=myStation.lon\n lat=myStation.lat\n unit=myStation.settings['unit']\n if unit=='percent':\n unit='%'\n else:\n if label=='point source contribution':\n unit='(ppm)'\n if label=='population sensitivity':\n unit='(population * (ppm /(μmol / (m²s))))'\n if label=='sensitivity':\n unit='(ppm /(μmol / (m²s)))'\n\n fp_lon=myStation.fpLon\n fp_lat=myStation.fpLat\n \n \n mcolor='m'\n \n # Set scale for features from Natural Earth\n NEscale = '50m'\n\n # Create a feature for Countries at 1:50m from Natural Earth\n countries = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_0_countries',\n scale=NEscale,\n facecolor='none')\n\n fig = plt.figure(figsize=(18,10))\n\n # set up a map\n ax = plt.subplot(1, 2, 1, projection=ccrs.PlateCarree())\n img_extent = (fp_lon.min(), fp_lon.max(), fp_lat.min(), fp_lat.max())\n ax.set_extent([fp_lon.min(), fp_lon.max(), fp_lat.min(), fp_lat.max()],crs=ccrs.PlateCarree())\n \n ax.add_feature(countries, edgecolor='grey', linewidth=0.5)\n \n reader = shpreader.Reader('/data/project/cartopy/shapefiles/natural_earth/cultural/ne_10m_admin_0_countries.shp')\n \n # Color countries that miss data for population and point source respectively\n if label == 'point source contribution': \n list_countries_to_add = ['Russian Federation', 'Belarus', 'Ukraine', 'Moldova', 'Turkey', 'Tunisia', 'Algeria', 'Morocco','Bosnia and Herzegovina', 'Serbia', 'Montenegro', 'Kosovo', 'Albania', 'Macedonia']\n legend_title= 'Countries with no point source data'\n \n if label == 'population sensitivity':\n \n list_countries_to_add = ['Russian Federation', 'Belarus', 'Ukraine', 'Moldova', 'Turkey', 'Tunisia', 'Algeria', 'Morocco']\n legend_title= 'Countries with no population data'\n \n if label == 'point source contribution' or label == 'population sensitivity':\n \n for country_to_add in list_countries_to_add:\n\n country_information = [country for country in reader.records() if country.attributes[\"NAME_LONG\"] == country_to_add][0]\n country_shape = ShapelyFeature([country_information.geometry], ccrs.PlateCarree(), facecolor=\"white\", hatch=\"/\", edgecolor='lightgrey', lw=0.3) \n ax.add_feature(country_shape)\n \n # add a legend \n proxy_artist = mpatches.Rectangle((0, 0), 1, 0.1, facecolor=\"white\", hatch=\"/\", edgecolor='lightgrey', lw=0.5)\n ax.legend([proxy_artist], [legend_title], loc='upper left', fancybox=True)\n \n cmap = plt.get_cmap(colors)\n cmap.set_under(color='white') \n \n if linlog == 'linear':\n \n im = ax.imshow(field[:,:],interpolation=None,origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)\n cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='neither')\n cbar.set_label(label+' '+unit)\n\n else:\n \n im = ax.imshow(np.log10(field)[:,:],interpolation='none',origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)\n cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='neither')\n cbar.set_label(label+' log$_{10}$ '+unit)\n \n ax.text(0.01, -0.25, 'min: %.2f' % np.min(field[:,:]), horizontalalignment='left',transform=ax.transAxes)\n ax.text(0.99, -0.25, 'max: %.2f' % np.max(field[:,:]), horizontalalignment='right',transform=ax.transAxes)\n \n #show station location if station is provided\n if station != '':\n\n ax.plot(lon,lat,'+',color=mcolor,ms=10,markeredgewidth=1,transform=ccrs.PlateCarree())\n\n \n return fig \n \n#or min_lat, max_lat, step (in degrees) \ndef distances_from_point_to_grid_cells(station_lat, station_lon, grid_lat, grid_lon):\n \n x = [math.radians(station_lon-lon)*math.cos(math.radians(station_lat+lat)/2) for lat in grid_lat for lon in grid_lon]\n\n y = [math.radians(station_lat-lat) for lat in grid_lat for lon in grid_lon]\n\n distance=[math.sqrt((x[index]*x[index])+(y[index]*y[index])) * R for index in range(len(x))]\n \n #return list with distances.\n return distance\n\ndef degrees_from_point_to_grid_cells(station_lat, station_lon, grid_lat, grid_lon):\n \n degrees_0_360=[compass_bearing((station_lat, station_lon), (lat, lon)) for lat in grid_lat for lon in grid_lon]\n \n #return list with degrees\n return degrees_0_360\n\n\ndef polar_graph(myStation, rose_type, colorbar='gist_heat_r', zoom=''): \n \n \"\"\"\n function contained - can make shorter. not global. \n \"\"\"\n \n interval_bins=myStation.intervalBins\n interval_labels=myStation.intervalLabels\n dir_bins=myStation.dirBins\n dir_labels=myStation.dirLabels\n fp=myStation.fp\n unit=myStation.settings['unit']\n \n fp_pop= import_population_data(year=2018)\n fp_point= import_point_source_data()\n \n #same function used to all three types of map\n if rose_type=='sensitivity':\n \n #only want to look at the aggregated footprint - not multiplied by anciallary datalayer \n grid_to_display=fp\n \n elif rose_type=='point source contribution':\n \n grid_to_display=fp*fp_point\n \n elif rose_type=='population sensitivity':\n \n grid_to_display=fp*fp_pop\n \n sens_value = []\n for sublist in grid_to_display[0].tolist():\n sens_value.extend(sublist)\n\n\n #putting it into a dataframe - to perform groupby etc\n df_sensitivity_map = pd.DataFrame()\n df_sensitivity_map['distance'] = myStation.distances\n df_sensitivity_map['sensitivity'] = sens_value\n df_sensitivity_map['degrees'] = myStation.degrees\n\n #for % later - sensitivity within certain bin (distance and direction)\n total_sensitivity= df_sensitivity_map['sensitivity'].sum()\n \n #binning - by the distace intervals and degree intervals. Summarize these. \n rosedata=df_sensitivity_map.assign(Interval_bins=lambda df: pd.cut(df['distance'], bins=interval_bins, labels=interval_labels, right=True))\n\n rosedata=rosedata.assign(Degree_bins=lambda df: pd.cut(df['degrees'], bins=dir_bins, labels=dir_labels, right=False))\n \n #the 360 degree are the same as 0:\n rosedata=rosedata.replace({'Degree_bins': {360: 0}})\n\n #the combination of the distance and direction columns is used to create a unique column value for all cells\n #with certain direction/distance combination.\n #make it to string to be able to combine.\n rosedata['key']=rosedata['Interval_bins'].astype(str) + ' ' + rosedata['Degree_bins'].astype(str) \n\n #group by the unique combination of direction and distance\n rosedata_groupby=rosedata.groupby(by=['key'], as_index=False)['sensitivity'].sum().reset_index()\n\n #merge between the 192000 cells and the \"groupedby\" values: each cell in a specific direction and distance will\n #get the sum of the cells in that same specific bin. Same color on the map corresponing to % or absolute sensitivity.\n #reset_index() creates a column with the original index of the dataframes that are joined. Needed to sort the dataframe\n #in the next spted because default is to sort by the key used. \n rosedata=rosedata.reset_index().merge(rosedata_groupby, left_on='key', right_on='key', sort=False)\n\n #sort by the original index of the 192000 cells: \n rosedata=rosedata.sort_values(by=['index_x'])\n\n #x is the \"fist\" (rosedata.merge) dataframe that was merged (the 192000 individual cells) \n #y is the dataframe that is merged to the first. Both columns name \"sensitivity\". \n #sensitivity_y is the merged data - the summarized sensitivity value for the whole bin (direction and distance bin)\n rosedata_list=rosedata['sensitivity_y'].tolist()\n\n #now starts the process of \"packing it back up\" so that it can be displayed as a map (same format as the netCDF files with 480\n #lists of lists - the first list is all tha values that has \"the first\" latitude value and all 400 different longitude values)\n #calculate the % sensitivity - can be changed to absolute sensitivity\n if unit=='percent':\n rosedata_list=[(sensitivity_value/total_sensitivity)*100 for sensitivity_value in rosedata_list]\n\n #the \"netcdf simulation\" (see text above)\n rosedata_list_of_lists=[]\n\n index=0\n while index<192000:\n index_to=index+400\n\n #for each list: need to grab the 400 values that are the combination of the same latitude value\n #but different longitude values\n rosedata_list_of_lists.append(rosedata_list[index:index_to])\n\n #start at the next 400 in the list in the next turn of the loop:\n index=index+400\n\n #numpy array works to display in map\n rosedata_array=np.array(rosedata_list_of_lists) \n \n caption=(unit.capitalize() + ' ' + rose_type + ' given direction and distance')\n \n polar_map=plot_maps(myStation, rosedata_array, title=caption, label=rose_type, \n linlog='linear', zoom='', vmin=0.0001, vmax=None, colors=colorbar)\n \n return polar_map, caption\n \n\ndef land_cover_bar_graph(myStation): \n \n fp_lon=myStation.fpLon\n fp_lat=myStation.fpLat\n degrees=myStation.degrees\n fp=myStation.fp\n\n #get all the land cover data from netcdfs \n broad_leaf_forest, coniferous_forest, mixed_forest, ocean, other, grass_shrub, cropland, pasture, urban, unknown = import_landcover_HILDA(year='2018')\n \n #land cover classes (imported in the land cover section):\n broad_leaf_forest=fp*broad_leaf_forest\n coniferous_forest=fp*coniferous_forest\n mixed_forest=fp*mixed_forest\n ocean=fp*ocean\n other=fp*other\n grass_shrub=fp*grass_shrub\n cropland=fp*cropland\n pasture=fp*pasture\n urban=fp*urban\n unknown=fp*unknown\n \n #lists of these values\n broad_leaf_forest_values = [item for sublist in broad_leaf_forest[0] for item in sublist]\n coniferous_forest_values = [item for sublist in coniferous_forest[0] for item in sublist]\n mixed_forest_values = [item for sublist in mixed_forest[0] for item in sublist]\n ocean_values = [item for sublist in ocean[0] for item in sublist]\n other_values = [item for sublist in other[0] for item in sublist]\n grass_shrub_values = [item for sublist in grass_shrub[0] for item in sublist]\n cropland_values = [item for sublist in cropland[0] for item in sublist]\n pasture_values = [item for sublist in pasture[0] for item in sublist]\n urban_values = [item for sublist in urban[0] for item in sublist]\n unknown_values = [item for sublist in unknown[0] for item in sublist]\n \n \n #putting it into a dataframe: initially 192000 values (one per cell) for each of the aggregated land cover classes\n #into same dataframe - have the same coulmn heading. \"landcover_type\" will be used in \"groupby\" together with the \"slice\" (in degrees)\n df_broad_leaf_forest = pd.DataFrame({'landcover_vals': broad_leaf_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Broad leaf forest'})\n \n df_coniferous_forest = pd.DataFrame({'landcover_vals': coniferous_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Coniferous forest'})\n \n df_mixed_forest = pd.DataFrame({'landcover_vals': mixed_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Mixed forest'})\n \n df_ocean = pd.DataFrame({'landcover_vals': ocean_values,\n 'degrees': degrees,\n 'landcover_type':'Ocean'})\n \n df_other = pd.DataFrame({'landcover_vals': other_values,\n 'degrees': degrees,\n 'landcover_type':'Other'})\n \n df_grass_shrub = pd.DataFrame({'landcover_vals': grass_shrub_values,\n 'degrees': degrees,\n 'landcover_type':'Grass/shrubland'})\n \n df_cropland = pd.DataFrame({'landcover_vals': cropland_values,\n 'degrees': degrees,\n 'landcover_type':'Cropland'})\n \n df_pasture = pd.DataFrame({'landcover_vals': pasture_values,\n 'degrees': degrees,\n 'landcover_type':'Pasture'})\n \n df_urban = pd.DataFrame({'landcover_vals': urban_values,\n 'degrees': degrees,\n 'landcover_type':'Urban'})\n \n df_unknown = pd.DataFrame({'landcover_vals': unknown_values,\n 'degrees': degrees,\n 'landcover_type':'Unknown'})\n \n\n #into one dataframe\n df_all = df_cropland.append([df_broad_leaf_forest, df_coniferous_forest, df_mixed_forest, df_ocean, df_other, df_grass_shrub, df_pasture, df_urban, df_unknown])\n \n\n #not change with user input\n dir_bins = np.arange(22.5, 383.5, 45)\n dir_bins= np.asarray([0, 22.5,67.5,112.5,157.5,202.5,247.5,292.5,337.5,383.5])\n dir_labels= np.asarray([0, 22.5,67.5,112.5,157.5,202.5,247.5,292.5,337.5])\n\n #get columns - for each degree\n rosedata=df_all.assign(Degree_bins=lambda df: pd.cut(df['degrees'], bins=dir_bins, labels=dir_labels, right=False))\n\n rosedata=rosedata.replace({'Degree_bins': {0.0: 337.5}})\n\n #group the data by the distance bins, and again by the direction bins. The value to be summed in the sensitivity values.\n rosedata=rosedata.groupby(by=['landcover_type', 'Degree_bins'])['landcover_vals'].sum()\n\n #changes the format:\n rosedata=rosedata.unstack(level='landcover_type')\n\n #want to sort the dataframe so that the land cover the station is the most\n #sensitive to is first. \n rosedata_sum=rosedata.sum() \n rosedata_sum_sorted=rosedata_sum.sort_values(ascending=False)\n list_land_cover_names_sorted=list(rosedata_sum_sorted.index)\n rosedata=rosedata[list_land_cover_names_sorted] \n\n #for all values: want the % of the total sensitivity (one value for each distance for each direction)\n total_all=sum(rosedata_sum)\n\n rosedata= rosedata.applymap(lambda x: x / total_all * 100)\n\n list_land_cover_values=[]\n for land_cover_type in list_land_cover_names_sorted:\n list_land_cover_values.append(rosedata[land_cover_type].values)\n \n matplotlib.rcParams.update({'font.size': 20})\n fig = plt.figure(figsize=(11,13)) \n ax = fig.add_subplot(1,1,1)\n N = 8\n ind = np.arange(N) \n width = 0.35 \n\n p1 = ax.bar(ind, list_land_cover_values[0], width, color=dictionary_color[list_land_cover_names_sorted[0]]['color'])\n p2 = ax.bar(ind, list_land_cover_values[1], width, color=dictionary_color[list_land_cover_names_sorted[1]]['color'],\n bottom=list_land_cover_values[0])\n p3 = ax.bar(ind, list_land_cover_values[2], width, color=dictionary_color[list_land_cover_names_sorted[2]]['color'],\n bottom=list_land_cover_values[0]+list_land_cover_values[1])\n p4 = ax.bar(ind, list_land_cover_values[3], width, color=dictionary_color[list_land_cover_names_sorted[3]]['color'],\n bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2])\n p5 = ax.bar(ind, list_land_cover_values[4], width, color=dictionary_color[list_land_cover_names_sorted[4]]['color'],\n bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3])\n p6 = ax.bar(ind, list_land_cover_values[5], width, color=dictionary_color[list_land_cover_names_sorted[5]]['color'],\n bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3]+\\\n list_land_cover_values[4])\n p7 = ax.bar(ind, list_land_cover_values[6], width, color=dictionary_color[list_land_cover_names_sorted[6]]['color'],\n bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3]+\\\n list_land_cover_values[4]+list_land_cover_values[5])\n \n p8 = ax.bar(ind, list_land_cover_values[7], width, color=dictionary_color[list_land_cover_names_sorted[7]]['color'], \n bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3]+\\\n list_land_cover_values[4]+list_land_cover_values[5]+list_land_cover_values[6])\n \n p9 = ax.bar(ind, list_land_cover_values[8], width, color=dictionary_color[list_land_cover_names_sorted[8]]['color'], bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3]+\\\n list_land_cover_values[4]+list_land_cover_values[5]+list_land_cover_values[6]+list_land_cover_values[7])\n \n p10 = ax.bar(ind, list_land_cover_values[9], width, color=dictionary_color[list_land_cover_names_sorted[9]]['color'], bottom=list_land_cover_values[0]+list_land_cover_values[1]+list_land_cover_values[2]+list_land_cover_values[3]+\\\n list_land_cover_values[4]+list_land_cover_values[5]+list_land_cover_values[6]+ list_land_cover_values[7]+list_land_cover_values[8])\n\n #want to reverese the order (ex if oceans at the \"bottom\" in the graph - ocean label should be furthest down)\n handles=(p1[0], p2[0], p3[0], p4[0], p5[0], p6[0], p7[0], p8[0], p9[0], p10[0])\n\n index=0\n list_labels=[]\n for land_cover_name in list_land_cover_names_sorted:\n\n for_lable= (land_cover_name + ' (' + str(\"%.1f\" % sum(list_land_cover_values[index])) + '%)')\n list_labels.append(for_lable)\n index=index+1\n\n labels=[textwrap.fill(text,20) for text in list_labels]\n\n plt.legend(handles[::-1], labels[::-1],bbox_to_anchor=(1, 0.52))\n \n plt.ylabel('Percent')\n \n #first one is not north (in rosedata - rather 22.5 to 67.5 (NE). \n plt.xticks(ind, ('NE', 'E','SE', 'S', 'SW','W', 'NW', 'N'))\n\n ax.yaxis.grid(True)\n \n caption='Land cover within average footprint aggregated by direction'\n fig.set_size_inches(11, 13)\n\n return fig, caption\n\n\n#14 font before\ndef render_mpl_seasonal_table(myStation, data, station, col_width=2, row_height=0.625, font_size=16,\n header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',\n bbox=[0, 0, 1, 1], header_columns=0, \n ax=None):\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values, cellLoc='center', bbox=bbox, colLabels=data.columns, colWidths=[2.5,1.5,2.5,2,2,2,3.5])\n\n mpl_table.auto_set_font_size(True)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0]%len(row_colors) ])\n\n return fig\n\n\ndef seasonal_table(myStation):\n \n station=myStation.stationId\n year=myStation.settings['startYear']\n available_STILT= myStation.settings['stilt']\n months= available_STILT[str(year)]['months']\n var_load=pd.read_csv(stcDataPath + 'seasonal_table_values.csv') \n station_year= station +'_' + str(year)\n \n if station_year in set(var_load.station_year):\n \n sens_whole= var_load.loc[var_load['station_year'] == station_year, 'sens_whole'].iloc[0]\n sens_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'sens_diff_winter'].iloc[0]\n sens_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'sens_diff_spring'].iloc[0]\n sens_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'sens_diff_summer'].iloc[0]\n sens_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'sens_diff_fall'].iloc[0]\n\n point_whole = var_load.loc[var_load['station_year'] == station_year, 'point_whole'].iloc[0]\n point_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'point_diff_winter'].iloc[0]\n point_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'point_diff_spring'].iloc[0]\n point_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'point_diff_summer'].iloc[0] \n point_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'point_diff_fall'].iloc[0]\n\n pop_whole = var_load.loc[var_load['station_year'] == station_year, 'pop_whole'].iloc[0]\n pop_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'pop_diff_winter'].iloc[0]\n pop_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'pop_diff_spring'].iloc[0] \n pop_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'pop_diff_summer'].iloc[0] \n pop_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'pop_diff_fall'].iloc[0]\n\n gee_whole = var_load.loc[var_load['station_year'] == station_year, 'gee_whole'].iloc[0] \n gee_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'gee_diff_winter'].iloc[0]\n gee_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'gee_diff_spring'].iloc[0]\n gee_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'gee_diff_summer'].iloc[0]\n gee_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'gee_diff_fall'].iloc[0]\n\n resp_whole = var_load.loc[var_load['station_year'] == station_year, 'resp_whole'].iloc[0]\n resp_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'resp_diff_winter'].iloc[0]\n resp_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'resp_diff_spring'].iloc[0]\n resp_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'resp_diff_summer'].iloc[0]\n resp_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'resp_diff_fall'].iloc[0]\n\n anthro_whole = var_load.loc[var_load['station_year'] == station_year, 'anthro_whole'].iloc[0]\n anthro_diff_winter = var_load.loc[var_load['station_year'] == station_year, 'anthro_diff_winter'].iloc[0]\n anthro_diff_spring = var_load.loc[var_load['station_year'] == station_year, 'anthro_diff_spring'].iloc[0]\n anthro_diff_summer = var_load.loc[var_load['station_year'] == station_year, 'anthro_diff_summer'].iloc[0]\n anthro_diff_fall = var_load.loc[var_load['station_year'] == station_year, 'anthro_diff_fall'].iloc[0]\n\n else:\n \n fp_pop= import_population_data(year=2018)\n\n fp_point = import_point_source_data()\n\n # if full year avaialbe - go ahead and create the tabel - else a message will be sent to the user\n if len(months)==12:\n #all hours for the date ranges... could update with date_range_hour_filtered\n winter_date_range1=pd.date_range(dt.datetime(year,1,1,0), (dt.datetime(year, 3, 1,0)-dt.timedelta(hours=3)), freq='3H')\n winter_date_range2=pd.date_range(dt.datetime(year,12,1,0), (dt.datetime(year+1, 1, 1,0)-dt.timedelta(hours=3)), freq='3H')\n spring_date_range=pd.date_range(dt.datetime(year,3,1,0), (dt.datetime(year, 6, 1,0)-dt.timedelta(hours=3)), freq='3H')\n summer_date_range=pd.date_range(dt.datetime(year,6,1,0), (dt.datetime(year, 9, 1,0)-dt.timedelta(hours=3)), freq='3H')\n fall_date_range=pd.date_range(dt.datetime(year,9,1,0), (dt.datetime(year, 12, 1,0)-dt.timedelta(hours=3)), freq='3H')\n\n #the average footprints given the selected date range\n nfp_winter1, fp_winter1, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, winter_date_range1)\n nfp_winter2, fp_winter2, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, winter_date_range2)\n nfp_winter = nfp_winter1 + nfp_winter2\n fp_winter = (fp_winter1 * (nfp_winter1/nfp_winter)) + (fp_winter2 * (nfp_winter2/nfp_winter))\n nfp_spring, fp_spring, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, spring_date_range)\n nfp_summer, fp_summer, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, summer_date_range)\n nfp_fall, fp_fall, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, fall_date_range)\n\n #want also the whole year - get from the footprints of the seasons.\n nfp_total = nfp_winter + nfp_spring + nfp_summer + nfp_fall\n\n part_winter = nfp_winter/nfp_total\n part_spring = nfp_spring/nfp_total\n part_summer = nfp_summer/nfp_total\n part_fall = nfp_fall/nfp_total\n\n fp_whole=(fp_winter*part_winter)+ (fp_spring*part_spring)+(fp_summer*part_summer)+(fp_fall*part_fall)\n sens_whole = fp_whole[0].sum()\n\n sens_diff_winter=((fp_winter[0].sum()/sens_whole)*100)-100\n sens_diff_spring=((fp_spring[0].sum()/sens_whole)*100)-100\n sens_diff_summer=((fp_summer[0].sum()/sens_whole)*100)-100\n sens_diff_fall=((fp_fall[0].sum()/sens_whole)*100)-100\n\n #point source \n point_whole=(fp_whole*fp_point)[0].sum()\n\n point_diff_winter=(((fp_winter*fp_point)[0].sum()/point_whole)*100)-100\n point_diff_spring=(((fp_spring*fp_point)[0].sum()/point_whole)*100)-100\n point_diff_summer=(((fp_summer*fp_point)[0].sum()/point_whole)*100)-100\n point_diff_fall=(((fp_fall*fp_point)[0].sum()/point_whole)*100)-100\n\n #population \n pop_whole=(fp_whole*fp_pop)[0].sum()\n pop_diff_winter=(((fp_winter*fp_pop)[0].sum()/pop_whole)*100)-100\n pop_diff_spring=(((fp_spring*fp_pop)[0].sum()/pop_whole)*100)-100\n pop_diff_summer=(((fp_summer*fp_pop)[0].sum()/pop_whole)*100)-100\n pop_diff_fall=(((fp_fall*fp_pop)[0].sum()/pop_whole)*100)-100\n\n #get the modelled concentration values\n timeselect_list=[0, 3, 6, 9, 12, 15, 18, 21]\n df_winter1 = read_stilt_timeseries(station, winter_date_range1, timeselect_list)\n df_winter2 = read_stilt_timeseries(station, winter_date_range2, timeselect_list)\n df_winter = df_winter1.append(df_winter2)\n df_spring = read_stilt_timeseries(station, spring_date_range, timeselect_list)\n df_summer = read_stilt_timeseries(station, summer_date_range, timeselect_list)\n df_fall = read_stilt_timeseries(station, fall_date_range, timeselect_list)\n\n #averages of the modelled concentration values.\n df_winter_mean=df_winter.mean()\n df_spring_mean=df_spring.mean()\n df_summer_mean=df_summer.mean()\n df_fall_mean=df_fall.mean()\n\n df_whole_mean=(df_winter_mean*part_winter)+(df_spring_mean*part_spring)+(df_summer_mean*part_summer)+(df_fall_mean*part_fall)\n\n gee_whole=df_whole_mean['co2.bio.gee']\n\n gee_diff_winter=((df_winter_mean['co2.bio.gee']/gee_whole)*100)-100\n gee_diff_spring=((df_spring_mean['co2.bio.gee']/gee_whole)*100)-100\n gee_diff_summer=((df_summer_mean['co2.bio.gee']/gee_whole)*100)-100\n gee_diff_fall=((df_fall_mean['co2.bio.gee']/gee_whole)*100)-100\n\n #respiration\n resp_whole=df_whole_mean['co2.bio.resp']\n\n resp_diff_winter=((df_winter_mean['co2.bio.resp']/resp_whole)*100)-100\n resp_diff_spring=((df_spring_mean['co2.bio.resp']/resp_whole)*100)-100\n resp_diff_summer=((df_summer_mean['co2.bio.resp']/resp_whole)*100)-100\n resp_diff_fall=((df_fall_mean['co2.bio.resp']/resp_whole)*100)-100\n\n #anthropogenic\n anthro_whole=df_whole_mean['co2.industry']+df_whole_mean['co2.energy']+ df_whole_mean['co2.transport']+ df_whole_mean['co2.others']\n\n anthro_diff_winter=(((df_winter_mean['co2.industry']+df_winter_mean['co2.energy']+ df_winter_mean['co2.transport']+ df_winter_mean['co2.others'])/anthro_whole)*100)-100\n anthro_diff_spring=(((df_spring_mean['co2.industry']+df_spring_mean['co2.energy']+ df_spring_mean['co2.transport']+ df_spring_mean['co2.others'])/anthro_whole)*100)-100\n anthro_diff_summer=(((df_summer_mean['co2.industry']+df_summer_mean['co2.energy']+ df_summer_mean['co2.transport']+ df_summer_mean['co2.others'])/anthro_whole)*100)-100\n anthro_diff_fall=(((df_fall_mean['co2.industry']+df_fall_mean['co2.energy']+ df_fall_mean['co2.transport']+ df_fall_mean['co2.others'])/anthro_whole)*100)-100\n\n #where there is no information in loaded file, and not all footpritns \n else:\n seasonal_table = None\n caption = 'No seasonal table, footprints are not available for the whole (start-)year'\n return seasonal_table, caption \n #here have values either from loaded file or calculated\n year_var=str(year) \n df_seasonal_table = pd.DataFrame(columns=['Variable', year_var, 'Jan + Feb + Dec', 'Mar-May', 'Jun-Aug','Sep-Nov', 'Unit'], index=['Sensitivity', 'Population','Point source', 'GEE', 'Respiration', 'Anthropogenic'])\n\n df_seasonal_table.loc['Sensitivity'] = pd.Series({'Variable': 'Sensitivity', year_var:(\"%.2f\" % sens_whole), 'Jan + Feb + Dec':(\"%+.2f\" % sens_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % sens_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % sens_diff_summer + '%'), 'Sep-Nov':(\"%+.2f\" % sens_diff_fall+ '%'), 'Unit': 'ppm / ($\\mu$mol / (m$^{2}$s))'})\n\n df_seasonal_table.loc['Population'] = pd.Series({'Variable': 'Population', year_var:(\"%.0f\" % pop_whole), 'Jan + Feb + Dec':(\"%+.2f\" % pop_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % pop_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % pop_diff_summer+ '%'), 'Sep-Nov':(\"%+.2f\" % pop_diff_fall+ '%'), 'Unit': 'pop*(ppm / ($\\mu$mol / (m$^{2}$s)))'})\n\n\n df_seasonal_table.loc['Point source'] = pd.Series({'Variable': 'Point source', year_var:(\"%.2f\" % point_whole), 'Jan + Feb + Dec':(\"%+.2f\" % point_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % point_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % point_diff_summer+ '%'), 'Sep-Nov':(\"%+.2f\" % point_diff_fall+ '%'), 'Unit': 'ppm'})\n\n\n df_seasonal_table.loc['GEE'] = pd.Series({'Variable': 'GEE','Unit': 'ppm (uptake)', year_var:(\"%.2f\" % gee_whole), 'Jan + Feb + Dec':(\"%+.2f\" % gee_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % gee_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % gee_diff_summer+ '%'), 'Sep-Nov':(\"%+.2f\" % gee_diff_fall+ '%'), 'Unit': 'ppm (uptake)'})\n\n df_seasonal_table.loc['Respiration'] = pd.Series({'Variable': 'Respiration', year_var:(\"%.2f\" % resp_whole), 'Jan + Feb + Dec':(\"%+.2f\" % resp_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % resp_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % resp_diff_summer+ '%'), 'Sep-Nov':(\"%+.2f\" % resp_diff_fall+ '%'), 'Unit': 'ppm'})\n\n\n df_seasonal_table.loc['Anthropogenic'] = pd.Series({'Variable': 'Anthropogenic', year_var:(\"%.2f\" % anthro_whole), 'Jan + Feb + Dec':(\"%+.2f\" % anthro_diff_winter+ '%'), 'Mar-May':(\"%+.2f\" % anthro_diff_spring+ '%'), \n 'Jun-Aug':(\"%+.2f\" % anthro_diff_summer+ '%'), 'Sep-Nov':(\"%+.2f\" % anthro_diff_fall+ '%'), 'Unit': 'ppm'})\n\n caption = 'Seasonal variation year ' + str(year) \n\n seasonal_table=render_mpl_seasonal_table(myStation, df_seasonal_table, station, header_columns=0, col_width=2.5)\n\n return seasonal_table, caption\n \n#land cover polar graph:\n\ndef land_cover_polar_graph(myStation):\n \n station=myStation.stationId\n station_name=myStation.stationName\n date_range=myStation.dateRange\n timeselect=myStation.settings['timeOfDay']\n fp_lon=myStation.fpLon\n fp_lat=myStation.fpLat\n fp=myStation.fp\n \n title=myStation.settings['titles']\n bin_size=myStation.settings['binSize']\n degrees=myStation.degrees\n polargraph_label= myStation.settings['labelPolar']\n\n #get all the land cover data from netcdfs \n broad_leaf_forest, coniferous_forest, mixed_forest, ocean, other, grass_shrub, cropland, pasture, urban, unknown = import_landcover_HILDA(year='2018')\n \n dir_bins, dir_labels=define_bins_landcover_polar_graph(bin_size=bin_size)\n \n #land cover classes (imported in the land cover section):\n broad_leaf_forest=fp*broad_leaf_forest\n coniferous_forest=fp*coniferous_forest\n mixed_forest=fp*mixed_forest\n ocean=fp*ocean\n other=fp*other\n grass_shrub=fp*grass_shrub\n cropland=fp*cropland\n pasture=fp*pasture\n urban=fp*urban\n unknown=fp*unknown\n \n \n #lists of these values\n broad_leaf_forest_values = [item for sublist in broad_leaf_forest[0] for item in sublist]\n coniferous_forest_values = [item for sublist in coniferous_forest[0] for item in sublist]\n mixed_forest_values = [item for sublist in mixed_forest[0] for item in sublist]\n ocean_values = [item for sublist in ocean[0] for item in sublist]\n other_values = [item for sublist in other[0] for item in sublist]\n grass_shrub_values = [item for sublist in grass_shrub[0] for item in sublist]\n cropland_values = [item for sublist in cropland[0] for item in sublist]\n pasture_values = [item for sublist in pasture[0] for item in sublist]\n urban_values = [item for sublist in urban[0] for item in sublist]\n unknown_values = [item for sublist in unknown[0] for item in sublist]\n \n \n #putting it into a dataframe: initially 192000 values (one per cell) for each of the aggregated land cover classes\n #into same dataframe - have the same coulmn heading. \"landcover_type\" will be used in \"groupby\" together with the \"slice\" (in degrees)\n df_broad_leaf_forest = pd.DataFrame({'landcover_vals': broad_leaf_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Broad leaf forest'})\n \n df_coniferous_forest = pd.DataFrame({'landcover_vals': coniferous_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Coniferous forest'})\n \n df_mixed_forest = pd.DataFrame({'landcover_vals': mixed_forest_values,\n 'degrees': degrees,\n 'landcover_type':'Mixed forest'})\n \n df_ocean = pd.DataFrame({'landcover_vals': ocean_values,\n 'degrees': degrees,\n 'landcover_type':'Ocean'})\n \n df_other = pd.DataFrame({'landcover_vals': other_values,\n 'degrees': degrees,\n 'landcover_type':'Other'})\n \n df_grass_shrub = pd.DataFrame({'landcover_vals': grass_shrub_values,\n 'degrees': degrees,\n 'landcover_type':'Grass/shrubland'})\n \n df_cropland = pd.DataFrame({'landcover_vals': cropland_values,\n 'degrees': degrees,\n 'landcover_type':'Cropland'})\n \n df_pasture = pd.DataFrame({'landcover_vals': pasture_values,\n 'degrees': degrees,\n 'landcover_type':'Pasture'})\n \n df_urban = pd.DataFrame({'landcover_vals': urban_values,\n 'degrees': degrees,\n 'landcover_type':'Urban'})\n \n df_unknown = pd.DataFrame({'landcover_vals': unknown_values,\n 'degrees': degrees,\n 'landcover_type':'Unknown'})\n \n\n #into one dataframe\n df_all = df_cropland.append([df_broad_leaf_forest, df_coniferous_forest, df_mixed_forest, df_ocean, df_other, df_grass_shrub, df_pasture, df_urban, df_unknown])\n\n #already have the different land cover classes in one cell (no need to use \"pandas.cut\" to generate new column with information for groupby)\n #still need a column with the different direction bins - defined in last cell - to use for the groupby (slice)\n rosedata=df_all.assign(Degree_bins=lambda df: pd.cut(df['degrees'], bins=dir_bins, labels=dir_labels, right=False))\n\n #the 360 degrees are the same as 0:\n rosedata=rosedata.replace({'Degree_bins': {360: 0}})\n\n #group the data by the distance bins, and again by the direction bins. The value to be summed in the sensitivity values.\n rosedata=rosedata.groupby(by=['landcover_type', 'Degree_bins'])['landcover_vals'].sum()\n\n #changes the format:\n rosedata=rosedata.unstack(level='landcover_type')\n\n #want to sort the dataframe so that the land cover the station is the most\n #sensitive to is first. \n rosedata_sum_per_class=rosedata.sum() \n rosedata_sum_per_class_sorted=rosedata_sum_per_class.sort_values(ascending=False)\n list_land_cover_names_sorted=list(rosedata_sum_per_class_sorted.index)\n rosedata=rosedata[list_land_cover_names_sorted] \n\n #for all values: want the % of the total sensitivity (one value for each distance for each direction)\n total_all=sum(rosedata_sum_per_class_sorted)\n #for all values: want the % of the total sensitivity (one value for each distance for each direction)\n rosedata= rosedata.applymap(lambda x: x / total_all * 100)\n \n directions = np.arange(0, 360, bin_size)\n \n if title=='yes':\n \n date_and_time_string= date_and_time_string_for_title(date_range, timeselect)\n for_title=('Station: ' + station_name + ' (' + station + ')' + '<br>' + 'Area corresponding to land cover sensitivity (%)<br>' + date_and_time_string)\n \n else:\n for_title=''\n \n matplotlib.rcParams.update({'font.size': 18})\n \n #change the data so that each % values is mapped as area:\n #first step - make the \"cumsum\" value for each direction.\n #ex if the innermost value represent cropland, that remains, the next class \n #will be that value + the cropland valueand so on...\n index=0\n #loop over all land cover classes except the fitst one (nothing to add to)\n for land_cover_class in list_land_cover_names_sorted[1:]:\n land_cover_before = list_land_cover_names_sorted[index]\n index=index+1\n rosedata[land_cover_class]=rosedata[land_cover_before].values + rosedata[land_cover_class].values\n \n #the max radius is the max value in the last column (ex the \"others\" column \n #if that one is the one with the smalles contribution = mapped the furthest \n #from the station)\n max_radius=max(rosedata[list_land_cover_names_sorted[-1]].values)\n\n #the area given the \"max radius\" (area of that slice by dividing it by number of directions)\n area_max=(math.pi*max_radius*max_radius)/len(directions)\n\n #all other values mapped in relation to this: \n #first: what is the \"area value\" for specific class given the max area\n rosedata=rosedata.applymap(lambda x: (x/max_radius)*area_max)\n \n #second: given that area value, what is the radius? (=where it should be placed in the graph)\n rosedata=rosedata.applymap(lambda x: math.sqrt(x / math.pi))\n \n #bar direction and height\n bar_dir, bar_width = _convert_dir(directions)\n\n fig, ax = plt.subplots(figsize=(12, 10), subplot_kw=dict(polar=True))\n ax.set_theta_direction('clockwise')\n ax.set_theta_zero_location('N')\n \n def update_yticks(x, pos):\n area=x*x*math.pi\n area_part=area/area_max\n label=max_radius*area_part\n\n return (str(\"%.2f\" % label) + '%')\n\n def update_yticks_none(x, pos):\n return (str('')) \n \n if polargraph_label=='yes':\n ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(update_yticks))\n\n #need this, else the labels will show up\n else:\n ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(update_yticks_none))\n\n labels=list_land_cover_names_sorted \n #max 20 characters in lable - if more it will be on a new line\n labels=[textwrap.fill(text,20) for text in labels]\n \n first=True\n index=0\n #loop over all except the last one. \n for land_cover_class in list_land_cover_names_sorted[:-1]:\n land_cover_after = list_land_cover_names_sorted[index+1]\n index=index+1\n \n if first:\n ax.bar(bar_dir, rosedata[land_cover_class].values,\n #bar width always the same --> depending on how many slices. Each slice same size.\n width=bar_width,\n color=dictionary_color[land_cover_class]['color'],\n label=land_cover_class,\n linewidth=0)\n first=False\n \n #values are accumulated\n ax.bar(bar_dir, (rosedata[land_cover_after].values-rosedata[land_cover_class].values), \n width=bar_width, \n #all the values \"leading up\" to this one. then add \"the different (see above) on top\n bottom=rosedata[land_cover_class].values,\n color=dictionary_color[land_cover_after]['color'],\n label=land_cover_after,\n linewidth=0)\n \n \n ax.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']) \n ax.legend(labels, bbox_to_anchor=(1.5, 0), ncol=1, loc=4)\n \n caption = \"Land cover within average footprint aggregated by direction (polar)\"\n \n return fig, caption\n\n \n#given the directions (and number of them), get the direction of the bars and their width\n#function used in the final step of generating a graph\ndef _convert_dir(directions, N=None):\n if N is None:\n N = directions.shape[0]\n barDir = directions * np.pi/180. - np.pi/N\n barWidth = 2 * np.pi / N\n return barDir, barWidth\n \ndef define_bins_landcover_polar_graph(bin_size):\n \n #direction: using the input (bin_size) to set the bins so that the first bin has \"north (0 degrees) in the middle\"\n #\"from_degree\" becomes a negative value (half of the degree value \"to the left\" of 0)\n from_degree=-(bin_size/2)\n\n #\"to_degree\" is a vale to indicate the last bins ending. Must check values all the way to 360 which means the last bin \n #will go past 360 and later be joined with the \"0\" bin (replace function in next cell)\n to_degree= 360 + (bin_size/2) \n\n #the \"degree_bin\" is the \"step\".\n dir_bins = np.arange(from_degree, (to_degree+1), bin_size)\n\n #the direction bin is the first bin + the next bin divided by two:\n dir_labels = (dir_bins[:-1] + dir_bins[1:]) / 2\n \n return dir_bins, dir_labels\n \n#multiple variables graph\ndef values_multiple_variable_graph(all_stations, selected_station, date_range, timeselect_list, df_saved):\n\n df_new_values = pd.DataFrame(columns=['Station','Sensitivity','GEE','Respiration','Anthro','Point source','Population'])\n \n start_date = min(date_range)\n \n fp_pop= import_population_data(year=2018)\n\n fp_point= import_point_source_data()\n\n list_stations_without_footprints=[]\n index=0\n #need to compute for all stations that will be shown together with the \"selected stations\"\n for station in all_stations:\n \n #could use aggregated station footprint.\n #lon, lat, title not needed\n nfp, fp_station, lon, lat, title = read_aggreg_footprints(station, date_range)\n \n if nfp > 0:\n\n percent_footprints=(nfp/len(date_range))*100\n \n if percent_footprints<75:\n \n display(HTML('<p style=\"font-size:12px;\">' + selected_station + ' (' + str(nfp) + '/' + str(len(date_range)) +' footprints)</p>')) \n\n #total average sensitivity for specific station:\n average_sensitivity=fp_station[0].sum()\n\n #read the modelled concentration data - for anthro and bio values\n #using the updated version of read_stilt_timeseries allows for filtering out different hours of the days\n df_modelled_concentrations = read_stilt_timeseries(station, date_range, timeselect_list)\n\n #averages of the values --> default skip nan\n df_mean=df_modelled_concentrations.mean()\n\n average_gee=df_mean['co2.bio.gee']\n\n average_respiration=df_mean['co2.bio.resp']\n\n #anthro:\n average_anthro=(df_mean['co2.industry']+df_mean['co2.energy']+ df_mean['co2.transport']+ df_mean['co2.others'])\n\n #point source for specific station \n fp_pointsource_multiplied=fp_station*fp_point\n average_pointsource=fp_pointsource_multiplied.sum()\n\n #population for specific station\n fp_pop_multiplied=fp_station*fp_pop\n average_population=fp_pop_multiplied.sum()\n \n df_new_values.loc[index] = [station, average_sensitivity, average_gee, average_respiration, average_anthro, average_pointsource, average_population]\n \n index=index+1\n \n else:\n \n list_stations_without_footprints.append(station)\n \n continue \n\n #list the reference stations without footprints\n if len(list_stations_without_footprints)>0:\n \n stations_without_footprints_string = ', '.join(list_stations_without_footprints)\n \n display(HTML('<p style=\"font-size:12px;\">Reference stations without footprints and not included for the multiple variables graph: ' + stations_without_footprints_string + '</p>'))\n \n df_saved=pd.concat([df_saved, df_new_values], ignore_index=True)\n \n #these are returned to the function \"multiple_variables_graph\"\n return df_saved\n\n\ndef values_multiple_variable_graph_land_cover(all_stations, selected_station, date_range, timeselect_list, df_saved):\n \n df_new_values = pd.DataFrame(columns=['Station', 'Broad leaf forest','Coniferous forest','Mixed forest','Natural grassland','Cropland','Pasture','Urban', 'Ocean', 'Unknown'])\n \n broad_leaf_forest, coniferous_forest, mixed_forest, ocean, other, grass_shrub, cropland, pasture, urban, unknown = import_landcover_HILDA(year='2018')\n \n list_stations_without_footprints = []\n index=0\n #need to compute for all stations that will be shown together with the \"selected stations\"\n for station in all_stations:\n\n #could use aggregated station footprint.\n #lon, lat, title not needed\n nfp, fp_station, lon, lat, title = read_aggreg_footprints(station, date_range)\n \n if nfp > 0:\n\n percent_footprints=(nfp/len(date_range))*100\n \n if percent_footprints<75:\n \n display(HTML('<p style=\"font-size:12px;\">' + selected_station + ' (' + str(nfp) + '/' + str(len(date_range)) +' footprints)</p>')) \n\n broad_leaf_forest_station = (fp_station * broad_leaf_forest).sum()\n coniferous_forest_station = (fp_station * coniferous_forest).sum()\n mixed_forest_station = (fp_station * mixed_forest).sum()\n ocean_station = (fp_station * ocean).sum()\n other_station = (fp_station * other).sum()\n natural_grassland_station = (fp_station * grass_shrub).sum()\n cropland_station = (fp_station * cropland).sum()\n pasture_station = (fp_station * pasture).sum()\n urban_station = (fp_station * urban).sum()\n unknown_station = (fp_station * unknown).sum()\n\n df_new_values.loc[index] = [station, broad_leaf_forest_station, coniferous_forest_station, mixed_forest_station, natural_grassland_station, cropland_station, pasture_station, urban_station, ocean_station, unknown_station]\n \n index=index+1\n \n else:\n \n list_stations_without_footprints.append(station)\n \n continue \n\n #list the reference stations without footprints\n if len(list_stations_without_footprints)>0:\n \n stations_without_footprints_string = ', '.join(list_stations_without_footprints)\n \n display(HTML('<p style=\"font-size:12px;\">Reference stations without footprints and not included for the multiple variables graph: ' + stations_without_footprints_string + '</p>'))\n \n df_saved=pd.concat([df_saved, df_new_values], ignore_index=True)\n \n #these are returned to the function \"multiple_variables_graph\"\n return df_saved\n\n \ndef compute_normalized(df_saved_for_normalized, station, column, min_value, range_value):\n\n value=df_saved_for_normalized.loc[df_saved_for_normalized['Station'] == station, column]\n\n value=value.values[0]\n\n if value==min_value:\n value_normalized=0\n df_saved_for_normalized.loc[df_saved_for_normalized['Station'] == station, column]=0\n else:\n #min and range in dictionary to column? \n #min GEE is the value with the highest contirubtion to co2. \n if column=='GEE':\n \n value_normalized = ((abs(value)-abs(min_value))/range_value)*100\n \n else:\n \n value_normalized=((value-min_value)/range_value)*100\n df_saved_for_normalized.loc[df_saved_for_normalized['Station'] == station, column]=value_normalized\n\n \n return df_saved_for_normalized\n \ndef multiple_variables_graph(myStation):\n \n selected_station=myStation.stationId\n station_name=[myStation.stationName]\n timeselect_list=myStation.settings['timeOfDay'] \n date_range=myStation.dateRange\n \n\n all_stations=['TRN180', 'SVB150', 'TOH147', 'SMR125', 'LUT', 'KRE250', 'IPR100', 'JFJ', 'KIT200', 'GAT344']\n \n if selected_station not in all_stations:\n all_stations.append(selected_station)\n \n start_date=min(date_range)\n end_date=max(date_range)\n\n #if the user selection is to use all footprints of 2017 or 2018, use saved values for all the \n #reference stations (and a few more- all the stations used in Storm(2020))\n if start_date==pd.Timestamp(2018, 1, 1, 0) and end_date==pd.Timestamp(2018,12,31,0) and len(timeselect_list)==8:\n df_saved=pd.read_csv(stcDataPath + 'multiple_variables_graph_values_2018.csv')\n predefined=True\n \n elif start_date==pd.Timestamp(2017, 1, 1, 0) and end_date==pd.Timestamp(2017,12,31,0) and len(timeselect_list)==8:\n df_saved=pd.read_csv(stcDataPath + 'multiple_variables_graph_values_2017.csv')\n predefined=True\n\n #if different date-range, need to compute variable values for all.\n else:\n \n #what would have gotten from saved file. Here create empty datafram which\n #will be appended to in compute_values_multiple_varaible_graph_upd\n df_saved=pd.DataFrame(columns=['Station','Sensitivity','GEE','Respiration','Anthro','Point source','Population'])\n \n #\"all_stations\" contains all reference stations as well as the selected station (possibly one of the reference stations). Selected station needed seperate also. \n df_saved= values_multiple_variable_graph(all_stations, selected_station, date_range, timeselect_list, df_saved)\n \n predefined=False\n \n #if all of 2017 or all of 2018 (predefined) - create the list_of_lists_all mainly from the saved dataframes \n if predefined:\n\n list_stations_from_saved=df_saved['Station'].tolist() \n \n #check so all stations we want. defined in all_stations (+selected station if not in there)\n for station in all_stations:\n \n #if a station is not in the list with pre-computed data, need to compute it\n if station not in list_stations_from_saved:\n \n #also update df_saved... used for 1st, 2nd, 3rd quartile\n df_saved= values_multiple_variable_graph([station],selected_station,date_range, timeselect_list, df_saved)\n \n #only the selected station (and later append selected station if not already among the saved. \n df_saved_upd = df_saved[df_saved['Station'].isin(all_stations)]\n \n #DONE GETTING ALL THE DATA: \n #sensitivity is the first attribut (list_item[0]) in the each of the lists (one list per station)\n min_sens=min(df_saved_upd['Sensitivity'])\n range_sens=max(df_saved_upd['Sensitivity'])-min_sens\n\n #these lists (list_sensitivity, list_population, list_point_source) will be used to generate texts \n #for the station characterization PDFs (if choose to create a PDF)\n #--> hence into list here, and not for GEE, respiration and anthropogenic contribution\n min_gee=max(df_saved_upd['GEE'])\n range_gee=abs(min_gee-min(df_saved_upd['GEE']))\n\n min_resp=min(df_saved_upd['Respiration'])\n range_resp=max(df_saved_upd['Respiration'])-min_resp\n \n min_anthro=min(df_saved_upd['Anthro'])\n range_anthro=max(df_saved_upd['Anthro'])-min_anthro\n \n min_pointsource=min(df_saved_upd['Point source'])\n range_pointsource=max(df_saved_upd['Point source'])-min_pointsource\n \n min_population=min(df_saved_upd['Population'])\n range_population=max(df_saved_upd['Population'])-min_population\n\n df_saved_for_normalized=df_saved_upd.copy()\n\n #for station in all_stations:\n \n for station in df_saved_for_normalized['Station']:\n \n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Sensitivity', min_sens, range_sens)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'GEE', min_gee, range_gee)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Respiration', min_resp, range_resp)\n\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Anthro', min_anthro, range_anthro)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Point source', min_pointsource, range_pointsource)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Population', min_population, range_population)\n\n #create the figure\n matplotlib.rcParams.update({'font.size': 14})\n fig = plt.figure(figsize=(10,9)) \n ax = fig.add_subplot(111)\n\n #added - get on the right side of the plot\n ax.yaxis.tick_right()\n\n ax.yaxis.set_label_position(\"right\")\n\n #remove the ticks (lenght 0 - keep the names)\n ax.tick_params(axis='both', which='both', length=0)\n \n #what will go as labels along the x-axis. Blank space next to station for more room. \n list_attributes=['Station', '', 'Sensitivity', 'Population', 'Point source contribution', 'GEE (uptake)', 'Respiration', 'Anthropogenic contribution']\n\n #max 15 characters in lable\n list_attributes=[textwrap.fill(text,15) for text in list_attributes]\n\n #incremented for each station.. 0, 10, 20... etc. Where station name and \"line\" should start along the y-axis. \n place_on_axis=0\n for station in df_saved_for_normalized['Station']:\n \n #get all the values for station (row in dataframe)\n station_values=df_saved_for_normalized.loc[df_saved_for_normalized['Station'] == station]\n\n #place them in the order we want them in the graph. List that will be used for the line. (one per station)\n station_values_list =[place_on_axis, place_on_axis, station_values['Sensitivity'].values[0], \n station_values['Population'].values[0], station_values['Point source'].values[0],\n station_values['GEE'].values[0], station_values['Respiration'].values[0], \n station_values['Anthro'].values[0]]\n \n if station==selected_station:\n\n plt.plot(list_attributes, station_values_list, linestyle='-', marker='o', lw=3, color= 'black', label=station_name)\n \n #on 'Stationä position (along the x-axis). \n #station_values_list[0] --> where on y-axis (place_on_axis). +1 for selected_station (bold text, need more room)\n ax.text('Station', station_values_list[0]+1, station)\n \n else:\n plt.plot(list_attributes, station_values_list, linestyle=':', lw=0.6, color= 'blue', label=station_name)\n \n ax.text('Station', station_values_list[0], station)\n \n place_on_axis=place_on_axis+10\n\n ax.set_ylabel('% of max')\n\n ax.tick_params(axis='y')\n\n #vertical labels except \"station\" which also has different font\n list_attributes_upd=list_attributes\n list_attributes_upd[0]=''\n ax.set_xticklabels(list_attributes_upd, rotation='vertical')\n\n #label for station (furthest to the left in graph\n ax.text(0, -10, 'Station', fontsize=15,weight = 'bold')\n\n ax.yaxis.grid(True)\n \n columns_need_quartiles=['Sensitivity','Population','Point source']\n \n for column in columns_need_quartiles:\n \n #df saved - the original values. \n quartile_df=df_saved[column].quantile([0.25,0.5,0.75])\n\n q1=quartile_df[0.25]\n q2=quartile_df[0.5]\n q3=quartile_df[0.75]\n \n value_selected_station = df_saved.loc[df_saved['Station'] == selected_station, column]\n value_selected_station= value_selected_station.values[0]\n \n if value_selected_station<q1:\n pdf_text='first quartile'\n elif value_selected_station>=q1 and value_selected_station<q2:\n pdf_text='second quartile'\n elif value_selected_station>=q2 and value_selected_station<q3:\n pdf_text='third quartile'\n else:\n pdf_text='fourth quartile'\n\n myStation.settings[column] = pdf_text\n \n caption=('Selected station relative to reference atmospheric stations')\n\n return fig, caption\n\ndef multiple_variables_graph_land_cover(myStation):\n selected_station=myStation.stationId\n station_name=[myStation.stationName]\n timeselect_list=myStation.settings['timeOfDay'] \n date_range=myStation.dateRange\n\n all_stations=['TRN180', 'SVB150', 'TOH147', 'SMR125', 'LUT', 'KRE250', 'IPR100', 'JFJ', 'KIT200', 'GAT344']\n \n if selected_station not in all_stations:\n all_stations.append(selected_station)\n \n start_date=min(date_range)\n end_date=max(date_range)\n \n df_saved_upd=pd.DataFrame(columns=['Station', 'Broad leaf forest','Coniferous forest','Mixed forest','Natural grassland','Cropland','Pasture','Urban', 'Ocean', 'Unknown'])\n\n #\"all_stations\" contains all reference stations as well as the selected station (possibly one of the reference stations). Selected station needed seperate also. \n df_saved_upd= values_multiple_variable_graph_land_cover(all_stations, selected_station, date_range, timeselect_list, df_saved_upd)\n\n #DONE GETTING ALL THE DATA: \n #sensitivity is the first attribut (list_item[0]) in the each of the lists (one list per station)\n min_broad_leaf_forest=min(df_saved_upd['Broad leaf forest'])\n range_broad_leaf_forest=max(df_saved_upd['Broad leaf forest'])-min_broad_leaf_forest\n\n min_coniferous_forest=min(df_saved_upd['Coniferous forest'])\n range_coniferous_forest=max(df_saved_upd['Coniferous forest'])-min_coniferous_forest\n \n min_mixed_forest=min(df_saved_upd['Mixed forest'])\n range_mixed_forest=max(df_saved_upd['Mixed forest'])-min_mixed_forest\n \n min_natural_grassland=min(df_saved_upd['Natural grassland'])\n range_natural_grassland=max(df_saved_upd['Natural grassland'])-min_natural_grassland\n \n min_cropland=min(df_saved_upd['Cropland'])\n range_cropland=max(df_saved_upd['Cropland'])-min_cropland\n \n min_pasture=min(df_saved_upd['Pasture'])\n range_pasture=max(df_saved_upd['Pasture'])-min_pasture\n \n min_urban=min(df_saved_upd['Urban'])\n range_urban=max(df_saved_upd['Urban'])-min_urban\n \n min_ocean=min(df_saved_upd['Ocean'])\n range_ocean=max(df_saved_upd['Ocean'])-min_ocean\n \n min_unknown=min(df_saved_upd['Unknown'])\n range_unknown=max(df_saved_upd['Unknown'])-min_unknown\n\n df_saved_for_normalized=df_saved_upd.copy()\n\n #for station in all_stations: \n for station in df_saved_for_normalized['Station']:\n \n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Broad leaf forest', min_broad_leaf_forest, range_broad_leaf_forest)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Coniferous forest', min_coniferous_forest, range_coniferous_forest)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Mixed forest', min_mixed_forest, range_mixed_forest)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Natural grassland', min_natural_grassland, range_natural_grassland)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Cropland', min_cropland, range_cropland)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Pasture', min_pasture, range_pasture)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Urban', min_urban, range_urban)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Ocean', min_ocean, range_ocean)\n df_saved_for_normalized=compute_normalized(df_saved_for_normalized, station, 'Unknown', min_unknown, range_unknown)\n\n #create the figure\n matplotlib.rcParams.update({'font.size': 14})\n fig = plt.figure(figsize=(10,9)) \n ax = fig.add_subplot(111)\n\n #added - get on the right side of the plot\n ax.yaxis.tick_right()\n\n ax.yaxis.set_label_position(\"right\")\n\n #remove the ticks (lenght 0 - keep the names)\n ax.tick_params(axis='both', which='both', length=0)\n \n #what will go as labels along the x-axis. Blank space next to station for more room. \n list_attributes=['Station', '', 'Broad leaf forest','Coniferous forest','Mixed forest','Natural grassland','Cropland','Pasture','Urban', 'Ocean', 'Unknown']\n\n #max 15 characters in lable\n list_attributes=[textwrap.fill(text,15) for text in list_attributes]\n print('df_saved_for_normalized', df_saved_for_normalized)\n #incremented for each station.. 0, 10, 20... etc. Where station name and \"line\" should start along the y-axis. \n place_on_axis=0\n for station in df_saved_for_normalized['Station']:\n \n #get all the values for station (row in dataframe)\n station_values=df_saved_for_normalized.loc[df_saved_for_normalized['Station'] == station]\n\n #place them in the order we want them in the graph. List that will be used for the line. (one per station)\n station_values_list =[place_on_axis, place_on_axis, station_values['Broad leaf forest'].values[0], \n station_values['Coniferous forest'].values[0], station_values['Mixed forest'].values[0],\n station_values['Natural grassland'].values[0], station_values['Cropland'].values[0], \n station_values['Pasture'].values[0], station_values['Urban'].values[0], station_values['Ocean'].values[0], \n station_values['Unknown'].values[0]]\n \n if station==selected_station:\n\n plt.plot(list_attributes, station_values_list, linestyle='-', marker='o', lw=3, color= 'black', label=station_name)\n \n #on 'Station position (along the x-axis). \n #station_values_list[0] --> where on y-axis (place_on_axis). +1 for selected_station (bold text, need more room)\n ax.text('Station', station_values_list[0]+1, station)\n \n else:\n plt.plot(list_attributes, station_values_list, linestyle=':', lw=0.6, color= 'blue', label=station_name)\n \n ax.text('Station', station_values_list[0], station)\n \n place_on_axis=place_on_axis+10\n\n ax.set_ylabel('% of max')\n\n ax.tick_params(axis='y')\n\n #vertical labels except \"station\" which also has different font\n list_attributes_upd=list_attributes\n list_attributes_upd[0]=''\n ax.set_xticklabels(list_attributes_upd, rotation='vertical')\n\n #label for station (furthest to the left in graph\n ax.text(0, -10, 'Station', fontsize=15,weight = 'bold')\n\n ax.yaxis.grid(True)\n \n caption=('Selected station relative to reference atmospheric stations')\n\n return df_saved_for_normalized, fig, caption\n\n\n\ndef save(stc, fmt='pdf'):\n \"\"\"\n provide a station characterisation object, with all the figures.\n all figures will be saved\n \n\n Parameters\n ----------\n stc : station characterisation object format. Instance of class(stationchar)\n fmt : STR, image filename ending, used to infer format ('pdf' | 'png')\n\n Returns\n -------\n None.\n\n \"\"\"\n # stc.figures is a dictionary...like {1: [fig, caption, shortname]}\n captions = {}\n \n for f in stc.figures:\n fig, cap, name = stc.figures[f] \n \n if not fig: continue\n \n stc.figures[f][2] = name + '.' + fmt\n filename = os.path.join(stc.settings['output_folder'], (name + '.' + fmt))\n \n # keep the captions for json output\n captions[name] = cap\n \n # special settings for individual figures\n if f==4: #'landcover_rose'\n ax = fig.gca()\n ax.legend(bbox_to_anchor=(1.9, 0.25), ncol=2)\n \n if f==7: #'landcover_bar'\n fig.set_size_inches(12, 11) \n \n #common for all figures\n fig.savefig(filename,dpi=100,bbox_inches='tight')\n captions[name] = cap\n \n # save captions as json file \n file = os.path.join(stc.settings['output_folder'],'captions.json')\n with open(file, 'w') as f:\n json.dump(captions, f, indent=4)\n \n # save settings as json file\n file = os.path.join(stc.settings['output_folder'],'settings.json')\n with open(file, 'w') as f:\n json.dump(stc.settings, f, indent=4)\n \n # save PDF\n tex_string=tex.generate_full(stc)\n \n tex_file=os.path.join(stc.settings['output_folder'], (stc.settings['date/time generated']+stc.stationId+'.tex'))\n \n with open(tex_file,\"w\") as file:\n file.write(tex_string) \n \n output_folder = stc.settings['output_folder']\n\n a = os.system(('pdflatex -output-directory=' + output_folder + ' ' + tex_file))\n \n \n if a!=0:\n print('problem generating the output PDF')\n\n else:\n files_to_remove = ['.aux', '.log', '.out']\n for file_ext in files_to_remove:\n remove = stc.settings['date/time generated']+stc.stationId+ file_ext\n os.remove(output_folder + '/' + remove)\n\n \n \n\n \n " ]
[ [ "numpy.min", "pandas.Timestamp", "pandas.concat", "matplotlib.patches.Rectangle", "pandas.read_csv", "matplotlib.ticker.FuncFormatter", "matplotlib.pyplot.xticks", "numpy.max", "matplotlib.pyplot.colorbar", "pandas.DataFrame", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.sqrt", "numpy.log10", "pandas.to_datetime", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.rcParams.update", "numpy.isinf", "pandas.cut", "numpy.asarray", "pandas.date_range", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "pandas.Series", "numpy.abs", "matplotlib.pyplot.ioff" ] ]
nathanlct/FCGF
[ "d9d100aeb92d16e33b610fd1031c5861ee72d2d6" ]
[ "classify.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n\nfor voxel_size in [0.01, 0.05, 0.1, 0.15, 0.2, 0.4, 0.7, 1.0]:\n print('----------------------------------------------')\n print('TRAINING WITH VOXEL SIZE ', voxel_size)\n print('----------------------------------------------')\n\n batch_size = 64\n\n model = tf.keras.Sequential([\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(32, activation='relu', activity_regularizer=tf.keras.regularizers.l1(0.01)),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Dense(7, activation='softmax', activity_regularizer=tf.keras.regularizers.l1(0.05))\n ])\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=['sparse_categorical_accuracy'])\n\n # only take reduced versions for training\n lille1_features = np.load(f'train_data/MiniLille1_features_reduced_{str(voxel_size)}.npy')\n lille2_features = np.load(f'train_data/MiniLille2_features_{str(voxel_size)}.npy')\n paris_features = np.load(f'train_data/MiniParis1_features_reduced_{str(voxel_size)}.npy')\n\n lille1_labels = np.load(f'train_data/MiniLille1_labels_reduced_{str(voxel_size)}.npy')\n lille2_labels = np.load(f'train_data/MiniLille2_labels_{str(voxel_size)}.npy')\n paris_labels = np.load(f'train_data/MiniParis1_labels_reduced_{str(voxel_size)}.npy')\n\n train_features = np.vstack((lille1_features, paris_features))\n train_labels = np.append(lille1_labels, paris_labels)\n\n test_features = lille2_features\n test_labels = lille2_labels\n\n n_train = len(train_features) - (len(train_features) % batch_size)\n train_features = train_features[:n_train]\n train_labels = train_labels[:n_train]\n\n n_test = len(test_features) - (len(test_features) % batch_size)\n test_features = test_features[:n_test]\n test_labels = test_labels[:n_test]\n\n print('train:', train_features.shape, train_labels.shape)\n print('test:', test_features.shape, test_labels.shape)\n\n for e in range(1):\n print('STARTING EPOCH', e)\n\n # shuffle dataset\n inds = list(range(len(train_features)))\n np.random.shuffle(inds)\n train_features = train_features[inds]\n train_labels = train_labels[inds]\n del inds\n\n print('Starting training')\n for i in range(len(train_features) // batch_size):\n if i % 10000 == 0:\n print(f'batch {i}/{len(train_features)//batch_size}')\n batch_start = i * batch_size\n batch_end = (i + 1) * batch_size\n model.train_on_batch(train_features[batch_start:batch_end], train_labels[batch_start:batch_end])\n print('Training ended')\n print('Epoch stats:')\n\n y_pred = np.array([])\n for i in range(len(test_features) // batch_size):\n if i % 10000 == 0:\n print(f'batch {i}/{len(test_features)//batch_size}')\n batch_start = i * batch_size\n batch_end = (i + 1) * batch_size\n y = model.predict_on_batch(test_features[batch_start:batch_end])\n y_pred = np.append(y_pred, np.argmax(y, axis=1))\n\n # stats \n avg_precision = 0\n avg_recall = 0\n avg_FI = 0\n avg_IoU = 0\n\n for i in range(7):\n TP = np.count_nonzero(np.logical_and(y_pred == i, test_labels == i))\n TN = np.count_nonzero(np.logical_and(y_pred != i, test_labels != i))\n FP = np.count_nonzero(np.logical_and(y_pred == i, test_labels != i))\n FN = np.count_nonzero(np.logical_and(y_pred != i, test_labels == i))\n\n precision = TP / (TP + FP) if TP + FP != 0 else 0\n recall = TP / (TP + FN) if TP + FN != 0 else 0\n FI = 2 * recall * precision / (recall + precision) if recall + precision != 0 else 0\n IoU = TP / (TP + FP + FN) if TP + FP + FN != 0 else 0\n\n print(f'{i}: {np.count_nonzero(y_pred == i)} predicted, {np.count_nonzero(test_labels == i)} total, TP={TP}, TN={TN}, FP={FP}, FN={FN}')\n print(f'\\tprecision={precision}, recall={recall}, FI={FI}, IoU={IoU}')\n\n coef = np.count_nonzero(test_labels == i) / len(test_labels)\n avg_precision += coef * precision\n avg_recall += coef * recall\n avg_FI += coef * FI\n avg_IoU += coef * IoU\n\n print('Averaged stats:')\n print(f'\\tPrecision={avg_precision}, recall={avg_recall}, FI={avg_FI}, IoU={avg_IoU}')\n print('\\tAccuracy:', np.count_nonzero(y_pred == test_labels) / len(test_labels))\n\n print()\n print()\n" ]
[ [ "numpy.array", "numpy.count_nonzero", "tensorflow.keras.regularizers.l1", "numpy.random.shuffle", "numpy.logical_and", "tensorflow.keras.layers.Dropout", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.argmax", "numpy.append", "tensorflow.keras.optimizers.Adam", "numpy.vstack" ] ]
the21st/pymatting
[ "c6a48e2887e61d1f074a830e69b52c34fea9d7af" ]
[ "pymatting/laplacian/lbdm_laplacian.py" ]
[ "import numpy as np\nfrom numba import njit\nimport scipy.sparse\n\n\n@njit(\"f8[:, :](f8[:, :], f8)\")\ndef calculate_kernel_matrix(X, v):\n n, m = X.shape\n K = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n K[i, j] = np.exp(-np.sqrt(v) * np.sum(np.square(X[i] - X[j])))\n return K\n\n\n@njit(\"Tuple((f8[:], i4[:], i4[:]))(f8[:, :, :], f8, i4)\")\ndef _lbdm(image, epsilon, r):\n h, w = image.shape[:2]\n n = h * w\n\n area = (2 * r + 1) ** 2\n\n indices = np.arange(n).reshape(h, w)\n\n values = np.zeros((n, area ** 2))\n i_inds = np.zeros((n, area ** 2), dtype=np.int32)\n j_inds = np.zeros((n, area ** 2), dtype=np.int32)\n\n gray = (image[:, :, 0] + image[:, :, 1] + image[:, :, 2]) / 3.0\n v = np.std(gray)\n\n for y in range(r, h - r):\n for x in range(r, w - r):\n i = x + y * w\n\n X = np.ones((area, 3 + 1))\n\n k = 0\n for y2 in range(y - r, y + r + 1):\n for x2 in range(x - r, x + r + 1):\n for c in range(3):\n X[k, c] = image[y2, x2, c]\n k += 1\n\n window_indices = indices[y - r : y + r + 1, x - r : x + r + 1].flatten()\n\n # does not produce better results than no kernel\n # K = calculate_kernel_matrix(X, v)\n\n K = np.dot(X, X.T)\n\n f = np.linalg.solve(K + epsilon * np.eye(area), K)\n\n tmp2 = np.eye(f.shape[0]) - f\n tmp3 = tmp2.dot(tmp2.T)\n\n for k in range(area):\n i_inds[i, k::area] = window_indices\n j_inds[i, k * area : k * area + area] = window_indices\n values[i] = tmp3.ravel()\n\n return values.ravel(), i_inds.ravel(), j_inds.ravel()\n\n\ndef lbdm_laplacian(image, epsilon=1e-7, radius=1):\n \"\"\"\n Calculate a Laplacian matrix based on :cite:`zheng2009learning`.\n\n Parameters\n ----------\n image: numpy.ndarray\n Image with shape :math:`h\\\\times w \\\\times 3`\n epsilon: float\n Regularization strength\n radius: int\n Radius of local window size\n\n Returns\n -------\n L: scipy.sparse.csr_matrix\n Matting Laplacian\n \"\"\"\n h, w = image.shape[:2]\n n = h * w\n\n values, i_inds, j_inds = _lbdm(image, epsilon, radius)\n\n L = scipy.sparse.csr_matrix((values, (i_inds, j_inds)), shape=(n, n))\n\n return L\n" ]
[ [ "numpy.square", "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.eye", "numpy.std", "numpy.arange", "numpy.sqrt" ] ]
mc51/Corona-Test-Comparison
[ "e237a7138f0b1c8c293f2d388f5788bd95b72072" ]
[ "app.py" ]
[ "import json\nimport glob\nimport logging\nimport sys\nimport dateparser\nimport subprocess\nimport os\nimport pandas as pd\nfrom pathlib import Path\nfrom flask import Flask, render_template\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\napp = Flask(__name__)\n\n\ndef get_version() -> str:\n \"\"\"get latest version from git tag\"\"\"\n try:\n r = subprocess.check_output(\n [\"git\", \"describe\", \"--tags\", \"--abbrev=0\"], encoding=\"utf8\"\n )\n except subprocess.CalledProcessError:\n return \"\"\n return r\n\n\nMAIL = os.getenv(\"MAIL\")\nVERSION = get_version()\nDIR_DATA = \"./data\"\nDIR_DATA_FINAL = DIR_DATA + \"/final\"\n\n\n# Read most current file (use date as filename prefix)\ntry:\n file = sorted(glob.glob(f\"{DIR_DATA_FINAL}/*.csv\"), reverse=True)[0]\nexcept IndexError:\n log.exception(f\"\\nMake sure there is a .csv file in {DIR_DATA_FINAL}/\\n\")\n sys.exit(1)\ndf = pd.read_csv(file)\n\nlog.debug(f\"File date to parse: {Path(file).name[0:8]}\")\nDATE = dateparser.parse(Path(file).name[0:8], [\"%Y%m%d\"]).date()\nTITLE = \"Corona Antigen Test Comparison\"\nSUBTITLE = f\"Data last updated on: {DATE}\" if DATE else \"\"\n\nCOL = df.columns\nDATATABLES_CONFIG = [\n {\"name\": COL[0], \"searchable\": \"false\", \"orderable\": \"false\"},\n {\"name\": COL[1], \"searchable\": \"false\", \"orderable\": \"false\"},\n {\"name\": COL[2], \"searchable\": \"false\", \"orderable\": \"false\"},\n {\"name\": COL[3], \"searchable\": \"true\", \"orderable\": \"true\"},\n {\"name\": COL[4], \"searchable\": \"true\", \"orderable\": \"true\"},\n {\"name\": COL[5], \"searchable\": \"false\", \"orderable\": \"true\"},\n {\"name\": COL[6], \"span\": \"Cq <=25\", \"searchable\": \"false\", \"orderable\": \"true\"},\n {\"name\": COL[7], \"span\": \"Cq 25-30\", \"searchable\": \"false\", \"orderable\": \"true\"},\n {\"name\": COL[8], \"span\": \"Cq >=30\", \"searchable\": \"false\", \"orderable\": \"true\"},\n {\"name\": COL[9], \"searchable\": \"false\", \"orderable\": \"true\"},\n]\n\n\[email protected](\"/\")\ndef index():\n return render_template(\n \"index.html\",\n title=TITLE,\n subtitle=SUBTITLE,\n table_config=DATATABLES_CONFIG,\n version=VERSION,\n )\n\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\", version=VERSION, mail=MAIL)\n\n\[email protected](\"/help\")\ndef help():\n return render_template(\"help.html\", version=VERSION)\n\n\[email protected](\"/api/data\")\ndef data():\n return {\"data\": json.loads(df.to_json(orient=\"records\"))}\n\n\nif __name__ == \"__main__\":\n app.run()\n" ]
[ [ "pandas.read_csv" ] ]
OrthoDex/PCGrad-PyTorch
[ "957380c93807c43d6d7ebffa341789f0f7ac367c" ]
[ "tests/model.py" ]
[ "\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nimport numpy as np\nimport random\n\n## dummy net to test code\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 3x3 square convolution\n # kernel\n self.fc1 = nn.Linear(3, 3)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.relu(self.fc1(x))\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features" ]
[ [ "torch.nn.Linear" ] ]
ethanwhite/torchgeo
[ "cb20e1abfd9213f9ee7700df972385db13568642" ]
[ "torchgeo/datasets/nasa_marine_debris.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"NASA Marine Debris dataset.\"\"\"\n\nimport os\nfrom typing import Callable, Dict, List, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rasterio\nimport torch\nfrom torch import Tensor\nfrom torchvision.utils import draw_bounding_boxes\n\nfrom .geo import VisionDataset\nfrom .utils import download_radiant_mlhub_dataset, extract_archive\n\n\nclass NASAMarineDebris(VisionDataset):\n \"\"\"NASA Marine Debris dataset.\n\n The `NASA Marine Debris <https://mlhub.earth/data/nasa_marine_debris>`_\n dataset is a dataset for detection of floating marine debris in satellite imagery.\n\n Dataset features:\n\n * 707 patches with 3 m per pixel resolution (256x256 px)\n * three spectral bands - RGB\n * 1 object class: marine_debris\n * images taken by Planet Labs PlanetScope satellites\n * imagery taken from 2016-2019 from coasts of Greece, Honduras, and Ghana\n\n Dataset format:\n\n * images are three-channel geotiffs in uint8 format\n * labels are numpy files (.npy) containing bounding box (xyxy) coordinates\n * additional: images in jpg format and labels in geojson format\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://doi.org/10.34911/rdnt.9r6ekg\n\n .. note::\n\n This dataset requires the following additional library to be installed:\n\n * `radiant-mlhub <https://pypi.org/project/radiant-mlhub/>`_ to download the\n imagery and labels from the Radiant Earth MLHub\n\n .. versionadded:: 0.2\n \"\"\"\n\n dataset_id = \"nasa_marine_debris\"\n directories = [\"nasa_marine_debris_source\", \"nasa_marine_debris_labels\"]\n filenames = [\"nasa_marine_debris_source.tar.gz\", \"nasa_marine_debris_labels.tar.gz\"]\n md5s = [\"fe8698d1e68b3f24f0b86b04419a797d\", \"d8084f5a72778349e07ac90ec1e1d990\"]\n class_label = \"marine_debris\"\n\n def __init__(\n self,\n root: str = \"data\",\n transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,\n download: bool = False,\n api_key: Optional[str] = None,\n checksum: bool = False,\n verbose: bool = False,\n ) -> None:\n \"\"\"Initialize a new NASA Marine Debris Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n api_key: a RadiantEarth MLHub API key to use for downloading the dataset\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n verbose: if True, print messages when new tiles are loaded\n \"\"\"\n self.root = root\n self.transforms = transforms\n self.download = download\n self.api_key = api_key\n self.checksum = checksum\n self.verbose = verbose\n self._verify()\n self.files = self._load_files()\n\n def __getitem__(self, index: int) -> Dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and labels at that index\n \"\"\"\n image = self._load_image(self.files[index][\"image\"])\n boxes = self._load_target(self.files[index][\"target\"])\n sample = {\"image\": image, \"boxes\": boxes}\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.files)\n\n def _load_image(self, path: str) -> Tensor:\n \"\"\"Load a single image.\n\n Args:\n path: path to the image\n\n Returns:\n the image\n \"\"\"\n with rasterio.open(path) as f:\n array = f.read()\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n return tensor\n\n def _load_target(self, path: str) -> Tensor:\n \"\"\"Load the target bounding boxes for a single image.\n\n Args:\n path: path to the labels\n\n Returns:\n the target boxes\n \"\"\"\n array = np.load(path)\n # boxes contain unecessary value of 1 after xyxy coords\n array = array[:, :4]\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n return tensor\n\n def _load_files(self) -> List[Dict[str, str]]:\n \"\"\"Load a image and label files.\n\n Returns:\n list of dicts containing image and label files\n \"\"\"\n image_root = os.path.join(self.root, self.directories[0])\n target_root = os.path.join(self.root, self.directories[1])\n image_folders = sorted(\n [f for f in os.listdir(image_root) if not f.endswith(\"json\")]\n )\n\n files = []\n for folder in image_folders:\n files.append(\n {\n \"image\": os.path.join(image_root, folder, \"image_geotiff.tif\"),\n \"target\": os.path.join(\n target_root,\n folder.replace(\"source\", \"labels\"),\n \"pixel_bounds.npy\",\n ),\n }\n )\n return files\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the files already exist\n exists = [\n os.path.exists(os.path.join(self.root, directory))\n for directory in self.directories\n ]\n if all(exists):\n return\n\n # Check if zip file already exists (if so then extract)\n exists = []\n for filename in self.filenames:\n filepath = os.path.join(self.root, filename)\n if os.path.exists(filepath):\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n \"Dataset not found in `root` directory and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automaticaly download the dataset.\"\n )\n\n # TODO: need a checksum check in here post downloading\n # Download and extract the dataset\n download_radiant_mlhub_dataset(self.dataset_id, self.root, self.api_key)\n for filename in self.filenames:\n filepath = os.path.join(self.root, filename)\n extract_archive(filepath)\n\n def plot(\n self,\n sample: Dict[str, Tensor],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> plt.Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n ncols = 1\n\n image = draw_bounding_boxes(image=sample[\"image\"], boxes=sample[\"boxes\"])\n image = image.permute((1, 2, 0)).numpy()\n\n if \"prediction_boxes\" in sample:\n ncols += 1\n preds = draw_bounding_boxes(\n image=sample[\"image\"], boxes=sample[\"prediction_boxes\"]\n )\n preds = preds.permute((1, 2, 0)).numpy()\n\n fig, axs = plt.subplots(ncols=ncols, figsize=(ncols * 10, 10))\n if ncols < 2:\n axs.imshow(image)\n axs.axis(\"off\")\n if show_titles:\n axs.set_title(\"Ground Truth\")\n else:\n axs[0].imshow(image)\n axs[0].axis(\"off\")\n axs[1].imshow(preds)\n axs[1].axis(\"off\")\n\n if show_titles:\n axs[0].set_title(\"Ground Truth\")\n axs[1].set_title(\"Predictions\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n" ]
[ [ "matplotlib.pyplot.suptitle", "numpy.load", "matplotlib.pyplot.subplots", "torch.from_numpy" ] ]
srio/OASYS1-PHOTOLAB
[ "919dc56d3dd159a949f739feb37508b31d0309b0" ]
[ "orangecontrib/photolab/widgets/gui/ow_photolab_widget.py" ]
[ "import sys\n\nfrom oasys.widgets import widget\nfrom PyQt5 import QtWidgets\n\nfrom orangewidget import gui\nfrom orangewidget.settings import Setting\n\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import QRect\nfrom PyQt5.QtGui import QTextCursor\n\nimport oasys.widgets.gui as oasysgui\nfrom oasys.widgets.gui import ConfirmDialog\n\nfrom PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n\nfrom orangecontrib.photolab.widgets.gui.python_script import PythonScript\nfrom oasys.util.oasys_util import TriggerIn, EmittingStream\n\n# import matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.image as mpimg\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n\n\nclass OWPhotolabWidget(widget.OWWidget):\n\n want_main_area=1\n\n is_automatic_run = Setting(True)\n\n error_id = 0\n warning_id = 0\n info_id = 0\n\n\n IMAGE_WIDTH = 760\n IMAGE_HEIGHT = 545\n MAX_WIDTH = 1320\n MAX_HEIGHT = 700\n CONTROL_AREA_WIDTH = 405\n TABS_AREA_HEIGHT = 560\n\n view_type = Setting(1)\n\n def __init__(self, show_general_option_box=True, show_automatic_box=False, show_view_options=True, show_script_tab=True):\n super().__init__()\n\n self.leftWidgetPart.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding))\n self.leftWidgetPart.setMaximumWidth(self.CONTROL_AREA_WIDTH + 20)\n self.leftWidgetPart.updateGeometry()\n\n\n geom = QApplication.desktop().availableGeometry()\n self.setGeometry(QRect(round(geom.width()*0.05),\n round(geom.height()*0.05),\n round(min(geom.width()*0.98, self.MAX_WIDTH)),\n round(min(geom.height()*0.95, self.MAX_HEIGHT))))\n\n self.setMaximumHeight(self.geometry().height())\n self.setMaximumWidth(self.geometry().width())\n\n # CONTROL AREA\n self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)\n\n gui.button(self.controlArea, self, \"Process\", callback=self.process, height=45)\n gui.separator(self.controlArea)\n\n\n self.general_options_box = gui.widgetBox(self.controlArea, \"General Options\", addSpace=True, orientation=\"vertical\")\n self.general_options_box.setVisible(show_general_option_box)\n\n if show_automatic_box :\n gui.checkBox(self.general_options_box, self, 'is_automatic_run', 'Automatic Execution')\n\n\n # MAIN AREA\n self.main_tabs = oasysgui.tabWidget(self.mainArea)\n\n ##\n tab_preview = oasysgui.createTabPage(self.main_tabs, \"Preview\")\n\n\n ######################\n if show_view_options == True:\n view_box = oasysgui.widgetBox(tab_preview, \"Results Options\", addSpace=False, orientation=\"horizontal\")\n view_box_1 = oasysgui.widgetBox(view_box, \"\", addSpace=False, orientation=\"vertical\", width=350)\n\n self.view_type_combo = gui.comboBox(view_box_1, self, \"view_type\", label=\"View Results\",\n labelWidth=220,\n items=[\"No\", \"Yes\"],\n callback=self.set_ViewType, sendSelectedValue=False, orientation=\"horizontal\")\n else:\n self.view_type = 1\n\n\n\n self.preview_id = gui.widgetBox(tab_preview, \"\", addSpace=True, orientation=\"vertical\")\n\n ##\n tab_info = oasysgui.createTabPage(self.main_tabs, \"Info\")\n self.photolab_output = oasysgui.textArea() #height=self.IMAGE_HEIGHT-35)\n info_box = oasysgui.widgetBox(tab_info, \"\", addSpace=True, orientation=\"horizontal\") #, height = self.IMAGE_HEIGHT-20, width = self.IMAGE_WIDTH-20)\n info_box.layout().addWidget(self.photolab_output)\n\n #\n # add script tab to tabs panel\n #\n if show_script_tab:\n script_tab = oasysgui.createTabPage(self.main_tabs, \"Script\")\n self.photolab_python_script = PythonScript()\n self.photolab_python_script.code_area.setFixedHeight(400)\n script_box = gui.widgetBox(script_tab, \"Python script\", addSpace=True, orientation=\"horizontal\")\n script_box.layout().addWidget(self.photolab_python_script)\n\n\n def callResetSettings(self):\n if ConfirmDialog.confirmed(parent=self, message=\"Confirm Reset of the Fields?\"):\n try:\n self.resetSettings()\n except:\n pass\n\n def process(self):\n self.photolab_output.setText(\"\")\n self.progressBarInit()\n\n sys.stdout = EmittingStream(textWritten=self.writeStdOut)\n\n if self.input_data is None: raise Exception(\"No Input Data\")\n\n self.process_specific()\n\n self.progressBarFinished()\n\n def preview(self, current_image):\n\n if self.view_type == 1:\n if current_image is None:\n raise Exception(\"Please load an image....\")\n\n f = Figure()\n figure_canvas = FigureCanvasQTAgg(f)\n toolbar = NavigationToolbar(figure_canvas, self)\n ax = f.add_subplot(111)\n ax.imshow(current_image[:,:,:])\n ax.set_xticks([], minor=False)\n ax.set_yticks([], minor=False)\n\n\n try:\n self.preview_id.layout().removeItem(self.preview_id.layout().itemAt(1))\n self.preview_id.layout().removeItem(self.preview_id.layout().itemAt(0))\n except:\n pass\n self.preview_id.layout().addWidget(toolbar)\n self.preview_id.layout().addWidget(figure_canvas)\n\n\n def writeStdOut(self, text):\n cursor = self.photolab_output.textCursor()\n cursor.movePosition(QTextCursor.End)\n cursor.insertText(text)\n self.photolab_output.setTextCursor(cursor)\n self.photolab_output.ensureCursorVisible()\n\n\n def set_ViewType(self):\n try:\n self.preview_id.layout().removeItem(self.preview_id.layout().itemAt(1))\n self.preview_id.layout().removeItem(self.preview_id.layout().itemAt(0))\n\n except:\n pass" ]
[ [ "matplotlib.figure.Figure", "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg" ] ]
stefan-niculae/gplearn
[ "46b77e80b5294d95e757769ea0177544d91d5c31" ]
[ "gplearn/tests/test_genetic.py" ]
[ "\"\"\"Testing the Genetic Programming module's underlying datastructure\n(gplearn.genetic._Program) as well as the classes that use it,\ngplearn.genetic.SymbolicRegressor and gplearn.genetic.SymbolicTransformer.\"\"\"\n\n# Author: Trevor Stephens <trevorstephens.com>\n#\n# License: BSD 3 clause\n\nimport pickle\nimport sys\n\nimport numpy as np\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.externals.six.moves import StringIO\nfrom sklearn.datasets import load_boston\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.utils.estimator_checks import check_estimator\nfrom sklearn.utils.testing import assert_false, assert_true\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_equal, assert_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.validation import check_random_state\n\nfrom gplearn.genetic import SymbolicRegressor, SymbolicTransformer\nfrom gplearn.fitness import weighted_pearson, weighted_spearman\nfrom gplearn._program import _Program\nfrom gplearn.fitness import _fitness_map\nfrom gplearn.functions import (add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,\n min2)\nfrom gplearn.functions import _Function\n\n# load the boston dataset and randomly permute it\nrng = check_random_state(0)\nboston = load_boston()\nperm = rng.permutation(boston.target.size)\nboston.data = boston.data[perm]\nboston.target = boston.target[perm]\n\n\ndef test_sklearn_estimator_checks_regressor():\n \"\"\"Run the sklearn estimator validation checks on SymbolicRegressor\"\"\"\n\n check_estimator(SymbolicRegressor)\n\n\ndef test_sklearn_estimator_checks_transformer():\n \"\"\"Run the sklearn estimator validation checks on SymbolicTransformer\"\"\"\n\n check_estimator(SymbolicTransformer)\n\n\ndef test_weighted_correlations():\n \"\"\"Check weighted Pearson correlation coefficient matches scipy\"\"\"\n\n random_state = check_random_state(415)\n x1 = random_state.uniform(size=500)\n x2 = random_state.uniform(size=500)\n w1 = np.ones(500)\n w2 = random_state.uniform(size=500)\n\n # Pearson's correlation coefficient\n scipy_pearson = pearsonr(x1, x2)[0]\n # Check with constant weights (should be equal)\n gplearn_pearson = weighted_pearson(x1, x2, w1)\n assert_almost_equal(scipy_pearson, gplearn_pearson)\n # Check with irregular weights (should be different)\n gplearn_pearson = weighted_pearson(x1, x2, w2)\n assert_true(abs(scipy_pearson - gplearn_pearson) > 0.01)\n\n # Spearman's correlation coefficient\n scipy_spearman = spearmanr(x1, x2)[0]\n # Check with constant weights (should be equal)\n gplearn_spearman = weighted_spearman(x1, x2, w1)\n assert_almost_equal(scipy_spearman, gplearn_spearman)\n # Check with irregular weights (should be different)\n gplearn_spearman = weighted_pearson(x1, x2, w2)\n assert_true(abs(scipy_spearman - gplearn_spearman) > 0.01)\n\n\ndef test_program_init_method():\n \"\"\"Check 'full' creates longer and deeper programs than other methods\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,\n min2],\n 'arities': {1: [sqrt1, log1, abs1],\n 2: [add2, sub2, mul2, div2, max2, min2]},\n 'init_depth': (2, 6),\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='full',\n random_state=random_state, **params))\n full_length = np.mean([gp.length_ for gp in programs])\n full_depth = np.mean([gp.depth_ for gp in programs])\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='half and half',\n random_state=random_state, **params))\n hnh_length = np.mean([gp.length_ for gp in programs])\n hnh_depth = np.mean([gp.depth_ for gp in programs])\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='grow',\n random_state=random_state, **params))\n grow_length = np.mean([gp.length_ for gp in programs])\n grow_depth = np.mean([gp.depth_ for gp in programs])\n\n assert_greater(full_length, hnh_length)\n assert_greater(hnh_length, grow_length)\n assert_greater(full_depth, hnh_depth)\n assert_greater(hnh_depth, grow_depth)\n\n\ndef test_program_init_depth():\n \"\"\"Check 'full' creates constant depth programs for single depth limit\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,\n min2],\n 'arities': {1: [sqrt1, log1, abs1],\n 2: [add2, sub2, mul2, div2, max2, min2]},\n 'init_depth': (6, 6),\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='full',\n random_state=random_state, **params))\n full_depth = np.bincount([gp.depth_ for gp in programs])\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='half and half',\n random_state=random_state, **params))\n hnh_depth = np.bincount([gp.depth_ for gp in programs])\n programs = []\n for i in range(20):\n programs.append(_Program(init_method='grow',\n random_state=random_state, **params))\n grow_depth = np.bincount([gp.depth_ for gp in programs])\n\n assert_true(full_depth[-1] == 20)\n assert_false(hnh_depth[-1] == 20)\n assert_false(grow_depth[-1] == 20)\n\n\ndef test_validate_program():\n \"\"\"Check that valid programs are accepted & invalid ones raise error\"\"\"\n\n function_set = [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2, min2]\n arities = {1: [sqrt1, log1, abs1],\n 2: [add2, sub2, mul2, div2, max2, min2]},\n init_depth = (2, 6)\n init_method = 'half and half'\n n_features = 10\n const_range = (-1.0, 1.0)\n metric = 'mean absolute error'\n p_point_replace = 0.05\n parsimony_coefficient = 0.1\n\n random_state = check_random_state(415)\n test_gp = [sub2, abs1, sqrt1, log1, log1, sqrt1, 7, abs1, abs1, abs1, log1,\n sqrt1, 2]\n\n # This one should be fine\n _ = _Program(function_set, arities, init_depth, init_method, n_features,\n const_range, metric, p_point_replace, parsimony_coefficient,\n random_state, test_gp)\n\n # Now try a couple that shouldn't be\n assert_raises(ValueError, _Program, function_set, arities, init_depth,\n init_method, n_features, const_range, metric,\n p_point_replace, parsimony_coefficient, random_state,\n test_gp[:-1])\n assert_raises(ValueError, _Program, function_set, arities, init_depth,\n init_method, n_features, const_range, metric,\n p_point_replace, parsimony_coefficient, random_state,\n test_gp + [1])\n\n\ndef test_print_overloading():\n \"\"\"Check that printing a program object results in 'pretty' output\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n\n gp = _Program(random_state=random_state, program=test_gp, **params)\n\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(gp)\n output = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n lisp = \"mul(div(X8, X1), sub(X9, 0.500))\"\n assert_true(output == lisp)\n\n\ndef test_export_graphviz():\n \"\"\"Check output of a simple program to Graphviz\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n gp = _Program(random_state=random_state, program=test_gp, **params)\n output = gp.export_graphviz()\n tree = 'digraph program {\\n' \\\n 'node [style=filled]0 [label=\"mul\", fillcolor=\"#136ed4\"] ;\\n' \\\n '1 [label=\"div\", fillcolor=\"#136ed4\"] ;\\n' \\\n '2 [label=\"X8\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '3 [label=\"X1\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '1 -> 3 ;\\n1 -> 2 ;\\n' \\\n '4 [label=\"sub\", fillcolor=\"#136ed4\"] ;\\n' \\\n '5 [label=\"X9\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '6 [label=\"0.500\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '4 -> 6 ;\\n4 -> 5 ;\\n0 -> 4 ;\\n0 -> 1 ;\\n}'\n assert_true(output == tree)\n\n # Test with fade_nodes\n output = gp.export_graphviz(fade_nodes=[0, 1, 2, 3])\n tree = 'digraph program {\\n' \\\n 'node [style=filled]0 [label=\"mul\", fillcolor=\"#cecece\"] ;\\n' \\\n '1 [label=\"div\", fillcolor=\"#cecece\"] ;\\n' \\\n '2 [label=\"X8\", fillcolor=\"#cecece\"] ;\\n' \\\n '3 [label=\"X1\", fillcolor=\"#cecece\"] ;\\n' \\\n '1 -> 3 ;\\n1 -> 2 ;\\n' \\\n '4 [label=\"sub\", fillcolor=\"#136ed4\"] ;\\n' \\\n '5 [label=\"X9\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '6 [label=\"0.500\", fillcolor=\"#60a6f6\"] ;\\n' \\\n '4 -> 6 ;\\n4 -> 5 ;\\n0 -> 4 ;\\n0 -> 1 ;\\n}'\n assert_true(output == tree)\n\n # Test a degenerative single-node program\n test_gp = [1]\n gp = _Program(random_state=random_state, program=test_gp, **params)\n output = gp.export_graphviz()\n tree = 'digraph program {\\n' \\\n 'node [style=filled]0 [label=\"X1\", fillcolor=\"#60a6f6\"] ;\\n}'\n assert_true(output == tree)\n\n\ndef test_execute():\n \"\"\"Check executing the program works\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n gp = _Program(random_state=random_state, program=test_gp, **params)\n result = gp.execute(X)\n expected = [-0.19656208, 0.78197782, -1.70123845, -0.60175969, -0.01082618]\n assert_array_almost_equal(result, expected)\n\n\ndef test_all_metrics():\n \"\"\"Check all supported metrics work\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n gp = _Program(random_state=random_state, program=test_gp, **params)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n sample_weight = np.ones(5)\n expected = [1.48719809776, 1.82389179833, 1.76013763179, -0.2928200724,\n -0.5]\n result = []\n for m in ['mean absolute error', 'mse', 'rmse', 'pearson', 'spearman']:\n gp.metric = _fitness_map[m]\n gp.raw_fitness_ = gp.raw_fitness(X, y, sample_weight)\n result.append(gp.fitness())\n assert_array_almost_equal(result, expected)\n\n\ndef test_get_subtree():\n \"\"\"Check that get subtree does the same thing for self and new programs\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n gp = _Program(random_state=random_state, program=test_gp, **params)\n\n self_test = gp.get_subtree(check_random_state(0))\n external_test = gp.get_subtree(check_random_state(0), test_gp)\n\n assert_equal(self_test, external_test)\n\n\ndef test_genetic_operations():\n \"\"\"Check all genetic operations are stable and don't change programs\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n donor = [add2, 0.1, sub2, 2, 7]\n\n gp = _Program(random_state=random_state, program=test_gp, **params)\n\n assert_equal([f.name if isinstance(f, _Function) else f\n for f in gp.reproduce()],\n ['mul', 'div', 8, 1, 'sub', 9, 0.5])\n assert_equal(gp.program, test_gp)\n assert_equal([f.name if isinstance(f, _Function) else f\n for f in gp.crossover(donor, random_state)[0]],\n ['sub', 2, 7])\n assert_equal(gp.program, test_gp)\n assert_equal([f.name if isinstance(f, _Function) else f\n for f in gp.subtree_mutation(random_state)[0]],\n ['mul', 'div', 8, 1, 'sub', 'sub', 3, 5, 'add', 6, 3])\n assert_equal(gp.program, test_gp)\n assert_equal([f.name if isinstance(f, _Function) else f\n for f in gp.hoist_mutation(random_state)[0]],\n ['div', 8, 1])\n assert_equal(gp.program, test_gp)\n assert_equal([f.name if isinstance(f, _Function) else f\n for f in gp.point_mutation(random_state)[0]],\n ['mul', 'div', 8, 1, 'sub', 9, 0.5])\n assert_equal(gp.program, test_gp)\n\n\ndef test_program_input_validation():\n \"\"\"Check that guarded input validation raises errors\"\"\"\n\n for Symbolic in (SymbolicRegressor, SymbolicTransformer):\n # Check too much proba\n est = Symbolic(p_point_mutation=.5)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n # Check invalid init_method\n est = Symbolic(init_method='ni')\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n # Check invalid const_ranges\n est = Symbolic(const_range=2)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(const_range=[2, 2])\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(const_range=(2, 2, 2))\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(const_range='ni')\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n # And check acceptable, but strange, representations of init_depth\n est = Symbolic(const_range=(2, 2))\n est.fit(boston.data, boston.target)\n est = Symbolic(const_range=(4, 2))\n est.fit(boston.data, boston.target)\n\n # Check invalid init_depth\n est = Symbolic(init_depth=2)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(init_depth=2)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(init_depth=[2, 2])\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(init_depth=(2, 2, 2))\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(init_depth='ni')\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(init_depth=(4, 2))\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n # And check acceptable, but strange, representations of init_depth\n est = Symbolic(init_depth=(2, 2))\n est.fit(boston.data, boston.target)\n\n # Check hall_of_fame and n_components for transformer\n est = SymbolicTransformer(hall_of_fame=2000)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = SymbolicTransformer(n_components=2000)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = SymbolicTransformer(hall_of_fame=0)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = SymbolicTransformer(n_components=0)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n # Check regressor metrics\n for m in ['mean absolute error', 'mse', 'rmse']:\n est = SymbolicRegressor(generations=2, metric=m)\n est.fit(boston.data, boston.target)\n # And check the transformer metrics as well as a fake one\n for m in ['pearson', 'spearman', 'the larch']:\n est = SymbolicRegressor(generations=2, metric=m)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n # Check transformer metrics\n for m in ['pearson', 'spearman']:\n est = SymbolicTransformer(generations=2, metric=m)\n est.fit(boston.data, boston.target)\n # And check the regressor metrics as well as a fake one\n for m in ['mean absolute error', 'mse', 'rmse', 'the larch']:\n est = SymbolicTransformer(generations=2, metric=m)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n\ndef test_sample_weight():\n \"\"\"Check sample_weight param works\"\"\"\n\n # Check constant sample_weight has no effect\n sample_weight = np.ones(boston.target.shape[0])\n est1 = SymbolicRegressor(generations=2, random_state=0)\n est1.fit(boston.data, boston.target)\n est2 = SymbolicRegressor(generations=2, random_state=0)\n est2.fit(boston.data, boston.target, sample_weight=sample_weight)\n # And again with a scaled sample_weight\n est3 = SymbolicRegressor(generations=2, random_state=0)\n est3.fit(boston.data, boston.target, sample_weight=sample_weight * 1.1)\n\n assert_almost_equal(est1._program.fitness_, est2._program.fitness_)\n assert_almost_equal(est1._program.fitness_, est3._program.fitness_)\n\n # And again for the transformer\n sample_weight = np.ones(boston.target.shape[0])\n est1 = SymbolicTransformer(generations=2, random_state=0)\n est1 = est1.fit_transform(boston.data, boston.target)\n est2 = SymbolicTransformer(generations=2, random_state=0)\n est2 = est2.fit_transform(boston.data, boston.target,\n sample_weight=sample_weight)\n\n assert_array_almost_equal(est1, est2)\n\n\ndef test_trigonometric():\n \"\"\"Check that using trig functions work and that results differ\"\"\"\n\n est1 = SymbolicRegressor(random_state=0)\n est1.fit(boston.data[:400, :], boston.target[:400])\n est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),\n boston.target[400:])\n\n est2 = SymbolicRegressor(function_set=['add', 'sub', 'mul', 'div',\n 'sin', 'cos', 'tan'],\n random_state=0)\n est2.fit(boston.data[:400, :], boston.target[:400])\n est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),\n boston.target[400:])\n\n assert_true(abs(est1 - est2) > 0.01)\n\n\ndef test_subsample():\n \"\"\"Check that subsample work and that results differ\"\"\"\n\n est1 = SymbolicRegressor(max_samples=1.0, random_state=0)\n est1.fit(boston.data[:400, :], boston.target[:400])\n est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),\n boston.target[400:])\n\n est2 = SymbolicRegressor(max_samples=0.7, random_state=0)\n est2.fit(boston.data[:400, :], boston.target[:400])\n est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),\n boston.target[400:])\n\n assert_true(abs(est1 - est2) > 0.01)\n\n\ndef test_parsimony_coefficient():\n \"\"\"Check that parsimony coefficients work and that results differ\"\"\"\n\n est1 = SymbolicRegressor(parsimony_coefficient=0.001, random_state=0)\n est1.fit(boston.data[:400, :], boston.target[:400])\n est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),\n boston.target[400:])\n\n est2 = SymbolicRegressor(parsimony_coefficient=0.1, random_state=0)\n est2.fit(boston.data[:400, :], boston.target[:400])\n est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),\n boston.target[400:])\n\n est3 = SymbolicRegressor(parsimony_coefficient='auto', random_state=0)\n est3.fit(boston.data[:400, :], boston.target[:400])\n est3 = mean_absolute_error(est3.predict(boston.data[400:, :]),\n boston.target[400:])\n\n assert_true(abs(est1 - est2) > 0.01)\n assert_true(abs(est1 - est3) > 0.01)\n assert_true(abs(est2 - est3) > 0.01)\n\n\ndef test_early_stopping():\n \"\"\"Check that early stopping works\"\"\"\n\n est1 = SymbolicRegressor(stopping_criteria=10, random_state=0)\n est1.fit(boston.data[:400, :], boston.target[:400])\n assert_true(len(est1._programs) == 1)\n\n est1 = SymbolicTransformer(stopping_criteria=0.5, random_state=0)\n est1.fit(boston.data[:400, :], boston.target[:400])\n assert_true(len(est1._programs) == 1)\n\n\ndef test_verbose_output():\n \"\"\"Check verbose=1 does not cause error\"\"\"\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n est = SymbolicRegressor(random_state=0, verbose=1)\n est.fit(boston.data, boston.target)\n verbose_output = sys.stdout\n sys.stdout = old_stdout\n\n # check output\n verbose_output.seek(0)\n header1 = verbose_output.readline().rstrip()\n true_header = '%4s|%-25s|%-42s|' % (' ', 'Population Average'.center(25),\n 'Best Individual'.center(42))\n assert_equal(true_header, header1)\n\n header2 = verbose_output.readline().rstrip()\n true_header = '-' * 4 + ' ' + '-' * 25 + ' ' + '-' * 42 + ' ' + '-' * 10\n assert_equal(true_header, header2)\n\n header3 = verbose_output.readline().rstrip()\n header_fields = ('Gen', 'Length', 'Fitness', 'Length', 'Fitness',\n 'OOB Fitness', 'Time Left')\n true_header = '%4s %8s %16s %8s %16s %16s %10s' % header_fields\n assert_equal(true_header, header3)\n\n n_lines = sum(1 for l in verbose_output.readlines())\n assert_equal(20, n_lines)\n\n\ndef test_verbose_with_oob():\n \"\"\"Check oob scoring for subsample does not cause error\"\"\"\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n est = SymbolicRegressor(max_samples=0.9, random_state=0, verbose=1)\n est.fit(boston.data, boston.target)\n verbose_output = sys.stdout\n sys.stdout = old_stdout\n\n # check output\n verbose_output.seek(0)\n header1 = verbose_output.readline().rstrip()\n header2 = verbose_output.readline().rstrip()\n header3 = verbose_output.readline().rstrip()\n\n n_lines = sum(1 for l in verbose_output.readlines())\n assert_equal(20, n_lines)\n\n\ndef test_more_verbose_output():\n \"\"\"Check verbose=2 does not cause error\"\"\"\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n sys.stdout = StringIO()\n sys.stderr = StringIO()\n est = SymbolicRegressor(random_state=0, verbose=2)\n est.fit(boston.data, boston.target)\n verbose_output = sys.stdout\n joblib_output = sys.stderr\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n # check output\n verbose_output.seek(0)\n header1 = verbose_output.readline().rstrip()\n header2 = verbose_output.readline().rstrip()\n header3 = verbose_output.readline().rstrip()\n\n n_lines = sum(1 for l in verbose_output.readlines())\n assert_equal(20, n_lines)\n\n joblib_output.seek(0)\n n_lines = sum(1 for l in joblib_output.readlines())\n # New version of joblib appears to output sys.stderr \n assert_equal(0, n_lines % 10)\n\n\ndef test_parallel_train():\n \"\"\"Check predictions are the same for different n_jobs\"\"\"\n\n # Check the regressor\n ests = [\n SymbolicRegressor(population_size=100, generations=4, n_jobs=n_jobs,\n random_state=0).fit(boston.data[:100, :],\n boston.target[:100])\n for n_jobs in [1, 2, 3, 8, 16]\n ]\n\n preds = [e.predict(boston.data[500:, :]) for e in ests]\n for pred1, pred2 in zip(preds, preds[1:]):\n assert_array_almost_equal(pred1, pred2)\n lengths = np.array([[gp.length_ for gp in e._programs[-1]] for e in ests])\n for len1, len2 in zip(lengths, lengths[1:]):\n assert_array_almost_equal(len1, len2)\n\n # Check the transformer\n ests = [\n SymbolicTransformer(population_size=100, hall_of_fame=50,\n generations=4, n_jobs=n_jobs,\n random_state=0).fit(boston.data[:100, :],\n boston.target[:100])\n for n_jobs in [1, 2, 3, 8, 16]\n ]\n\n preds = [e.transform(boston.data[500:, :]) for e in ests]\n for pred1, pred2 in zip(preds, preds[1:]):\n assert_array_almost_equal(pred1, pred2)\n lengths = np.array([[gp.length_ for gp in e._programs[-1]] for e in ests])\n for len1, len2 in zip(lengths, lengths[1:]):\n assert_array_almost_equal(len1, len2)\n\n\ndef test_pickle():\n \"\"\"Check pickability\"\"\"\n\n # Check the regressor\n est = SymbolicRegressor(generations=2, random_state=0)\n est.fit(boston.data[:100, :], boston.target[:100])\n score = est.score(boston.data[500:, :], boston.target[500:])\n pickle_object = pickle.dumps(est)\n\n est2 = pickle.loads(pickle_object)\n assert_equal(type(est2), est.__class__)\n score2 = est2.score(boston.data[500:, :], boston.target[500:])\n assert_equal(score, score2)\n\n # Check the transformer\n est = SymbolicTransformer(generations=2, random_state=0)\n est.fit(boston.data[:100, :], boston.target[:100])\n X_new = est.transform(boston.data[500:, :])\n pickle_object = pickle.dumps(est)\n\n est2 = pickle.loads(pickle_object)\n assert_equal(type(est2), est.__class__)\n X_new2 = est2.transform(boston.data[500:, :])\n assert_array_almost_equal(X_new, X_new2)\n\n\ndef test_memory_layout():\n \"\"\"Check that it works no matter the memory layout\"\"\"\n\n for Symbolic in [SymbolicTransformer, SymbolicRegressor]:\n for dtype in [np.float64, np.float32]:\n est = Symbolic(generations=2, random_state=0)\n\n # Nothing\n X = np.asarray(boston.data, dtype=dtype)\n y = boston.target\n est.fit(X, y)\n\n # C-order\n X = np.asarray(boston.data, order=\"C\", dtype=dtype)\n y = boston.target\n est.fit(X, y)\n\n # F-order\n X = np.asarray(boston.data, order=\"F\", dtype=dtype)\n y = boston.target\n est.fit(X, y)\n\n # Contiguous\n X = np.ascontiguousarray(boston.data, dtype=dtype)\n y = boston.target\n est.fit(X, y)\n\n # Strided\n X = np.asarray(boston.data[::3], dtype=dtype)\n y = boston.target[::3]\n est.fit(X, y)\n\n\ndef test_input_shape():\n \"\"\"Check changed dimensions cause failure\"\"\"\n\n random_state = check_random_state(415)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n X2 = np.reshape(random_state.uniform(size=45), (5, 9))\n\n # Check the regressor\n est = SymbolicRegressor(generations=2, random_state=0)\n est.fit(X, y)\n assert_raises(ValueError, est.predict, X2)\n\n # Check the transformer\n est = SymbolicTransformer(generations=2, random_state=0)\n est.fit(X, y)\n assert_raises(ValueError, est.transform, X2)\n\n\ndef test_output_shape():\n \"\"\"Check output shape is as expected\"\"\"\n\n random_state = check_random_state(415)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n\n # Check the transformer\n est = SymbolicTransformer(n_components=5, generations=2, random_state=0)\n est.fit(X, y)\n assert_true(est.transform(X).shape == (5, 5))\n\n\ndef test_gridsearch():\n \"\"\"Check that SymbolicRegressor can be grid-searched\"\"\"\n\n # Grid search parsimony_coefficient\n parameters = {'parsimony_coefficient': [0.001, 0.1, 'auto']}\n clf = SymbolicRegressor(population_size=50, generations=5,\n tournament_size=5, random_state=0)\n grid = GridSearchCV(clf, parameters, scoring='neg_mean_absolute_error')\n grid.fit(boston.data, boston.target)\n expected = {'parsimony_coefficient': 0.001}\n assert_equal(grid.best_params_, expected)\n\n\ndef test_pipeline():\n \"\"\"Check that SymbolicRegressor/Transformer can work in a pipeline\"\"\"\n\n # Check the regressor\n est = make_pipeline(StandardScaler(),\n SymbolicRegressor(population_size=50,\n generations=5,\n tournament_size=5,\n random_state=0))\n est.fit(boston.data, boston.target)\n assert_almost_equal(est.score(boston.data, boston.target), -4.00270923)\n\n # Check the transformer\n est = make_pipeline(SymbolicTransformer(population_size=50,\n hall_of_fame=20,\n generations=5,\n tournament_size=5,\n random_state=0),\n DecisionTreeRegressor())\n est.fit(boston.data, boston.target)\n assert_almost_equal(est.score(boston.data, boston.target), 1.0)\n\n\ndef test_transformer_iterable():\n \"\"\"Check that the transformer is iterable\"\"\"\n\n random_state = check_random_state(415)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n function_set = ['add', 'sub', 'mul', 'div', 'sqrt', 'log', 'abs', 'neg',\n 'inv', 'max', 'min']\n est = SymbolicTransformer(population_size=500, generations=2,\n function_set=function_set, random_state=0)\n\n # Check unfitted\n unfitted_len = len(est)\n unfitted_iter = [gp.length_ for gp in est]\n expected_iter = []\n\n assert_true(unfitted_len == 0)\n assert_true(unfitted_iter == expected_iter)\n\n # Check fitted\n est.fit(X, y)\n fitted_len = len(est)\n fitted_iter = [gp.length_ for gp in est]\n expected_iter = [8, 12, 2, 29, 9, 33, 9, 8, 4, 22]\n\n assert_true(fitted_len == 10)\n assert_true(fitted_iter == expected_iter)\n\n # Check IndexError\n assert_raises(IndexError, est.__getitem__, 10)\n\n\ndef test_print_overloading_estimator():\n \"\"\"Check that printing a fitted estimator results in 'pretty' output\"\"\"\n\n random_state = check_random_state(415)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n\n # Check the regressor\n est = SymbolicRegressor(generations=2, random_state=0)\n\n # Unfitted\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(est)\n output_unfitted = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n # Fitted\n est.fit(X, y)\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(est)\n output_fitted = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(est._program)\n output_program = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n assert_true(output_unfitted != output_fitted)\n assert_true(output_unfitted == est.__repr__())\n assert_true(output_fitted == output_program)\n\n # Check the transformer\n est = SymbolicTransformer(generations=2, random_state=0)\n\n # Unfitted\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(est)\n output_unfitted = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n # Fitted\n est.fit(X, y)\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print(est)\n output_fitted = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n orig_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n output = str([gp.__str__() for gp in est])\n print(output.replace(\"',\", \",\\n\").replace(\"'\", \"\"))\n output_program = out.getvalue().strip()\n finally:\n sys.stdout = orig_stdout\n\n assert_true(output_unfitted != output_fitted)\n assert_true(output_unfitted == est.__repr__())\n assert_true(output_fitted == output_program)\n\n\ndef test_validate_functions():\n \"\"\"Check that valid functions are accepted & invalid ones raise error\"\"\"\n\n random_state = check_random_state(415)\n X = np.reshape(random_state.uniform(size=50), (5, 10))\n y = random_state.uniform(size=5)\n\n for Symbolic in (SymbolicRegressor, SymbolicTransformer):\n # These should be fine\n est = Symbolic(generations=2, random_state=0,\n function_set=(add2, sub2, mul2, div2))\n est.fit(boston.data, boston.target)\n est = Symbolic(generations=2, random_state=0,\n function_set=('add', 'sub', 'mul', div2))\n est.fit(boston.data, boston.target)\n\n # These should fail\n est = Symbolic(generations=2, random_state=0,\n function_set=('ni', 'sub', 'mul', div2))\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(generations=2, random_state=0,\n function_set=(7, 'sub', 'mul', div2))\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n est = Symbolic(generations=2, random_state=0, function_set=())\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n\ndef test_indices():\n \"\"\"Check that indices are stable when generated on the fly.\"\"\"\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n gp = _Program(random_state=random_state, program=test_gp, **params)\n\n assert_raises(ValueError, gp.get_all_indices)\n assert_raises(ValueError, gp._indices)\n\n def get_indices_property():\n return gp.indices_\n\n assert_raises(ValueError, get_indices_property)\n\n indices, _ = gp.get_all_indices(10, 7, random_state)\n\n assert_array_equal(indices, gp.get_all_indices()[0])\n assert_array_equal(indices, gp._indices())\n assert_array_equal(indices, gp.indices_)\n\n\ndef test_warm_start():\n \"\"\"Check the warm_start functionality works as expected.\"\"\"\n\n est = SymbolicRegressor(generations=20, random_state=415)\n est.fit(boston.data, boston.target)\n cold_fitness = est._program.fitness_\n cold_program = est._program.__str__()\n\n # Check fitting fewer generations raises error\n est.set_params(generations=5, warm_start=True)\n assert_raises(ValueError, est.fit, boston.data, boston.target)\n\n # Check fitting the same number of generations warns\n est.set_params(generations=20, warm_start=True)\n assert_warns(UserWarning, est.fit, boston.data, boston.target)\n\n # Check warm starts get the same result\n est = SymbolicRegressor(generations=10, random_state=415)\n est.fit(boston.data, boston.target)\n est.set_params(generations=20, warm_start=True)\n est.fit(boston.data, boston.target)\n warm_fitness = est._program.fitness_\n warm_program = est._program.__str__()\n assert_almost_equal(cold_fitness, warm_fitness)\n assert_equal(cold_program, warm_program)\n\n\nif __name__ == \"__main__\":\n import nose\n nose.runmodule()\n" ]
[ [ "sklearn.utils.testing.assert_false", "sklearn.model_selection.GridSearchCV", "numpy.mean", "scipy.stats.pearsonr", "sklearn.tree.DecisionTreeRegressor", "sklearn.datasets.load_boston", "numpy.bincount", "sklearn.utils.validation.check_random_state", "numpy.array", "sklearn.utils.testing.assert_almost_equal", "sklearn.utils.testing.assert_equal", "sklearn.utils.testing.assert_raises", "sklearn.utils.testing.assert_array_almost_equal", "numpy.asarray", "sklearn.preprocessing.StandardScaler", "numpy.ascontiguousarray", "sklearn.utils.testing.assert_greater", "numpy.ones", "scipy.stats.spearmanr", "sklearn.utils.testing.assert_true", "sklearn.utils.estimator_checks.check_estimator", "sklearn.utils.testing.assert_array_equal", "sklearn.utils.testing.assert_warns", "sklearn.externals.six.moves.StringIO" ] ]
bingrao/deeplearning
[ "8488478a4355a7f56d49c5126f529c21d5a95798" ]
[ "benchmarks/learning_fix/train.py" ]
[ "import torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch\r\nfrom nmt.data.batch import custom_collate_fn\r\nfrom nmt.model.transformer.model import build_model\r\nfrom nmt.utils.context import Context\r\nfrom benchmarks.learning_fix.preprocess import dataset_generation\r\nfrom torch.utils.data import DataLoader\r\nimport numpy as np\r\nimport time\r\nfrom nmt.data.batch import Batch\r\n\r\n\r\nclass NoamOpt:\r\n \"\"\"Optim wrapper that implements rate.\"\"\"\r\n\r\n def __init__(self, model, factor, warmup, optimizer):\r\n self.optimizer = optimizer\r\n self._step = 0\r\n self.warmup = warmup\r\n self.factor = factor\r\n self.model_size = model.src_embed[0].d_model\r\n self._rate = 0\r\n\r\n def step(self):\r\n \"\"\"Update parameters and rate\"\"\"\r\n self._step += 1\r\n rate = self.rate()\r\n for p in self.optimizer.param_groups:\r\n p['lr'] = rate\r\n self._rate = rate\r\n self.optimizer.step()\r\n\r\n def rate(self, step=None):\r\n \"\"\"Implement `lrate` above\"\"\"\r\n if step is None:\r\n step = self._step\r\n return self.factor * \\\r\n (self.model_size ** (-0.5) *\r\n min(step ** (-0.5), step * self.warmup ** (-1.5)))\r\n\r\n\r\ndef get_std_opt(model):\r\n return NoamOpt(model, factor=1, warmup=2000,\r\n optimizer=torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\r\n\r\n\r\nPAD_INDEX = 0\r\n\r\n\r\ndef input_target_collate_fn(batch):\r\n \"\"\"merges a list of samples to form a mini-batch.\"\"\"\r\n\r\n sources_lengths = [len(sources) for sources, targets in batch]\r\n targets_lengths = [len(targets) for sources, targets in batch]\r\n\r\n sources_max_length = max(sources_lengths)\r\n targets_max_length = max(targets_lengths)\r\n\r\n sources_padded = [sources + [PAD_INDEX] * (sources_max_length - len(sources)) for sources, targets in batch]\r\n targets_padded = [targets + [PAD_INDEX] * (targets_max_length - len(targets)) for sources, targets in batch]\r\n\r\n sources_tensor = torch.tensor(sources_padded)\r\n targets_tensor = torch.tensor(targets_padded)\r\n\r\n return Batch(sources_tensor, targets_tensor, PAD_INDEX)\r\n\r\n\r\nclass SimpleLossComputeWithLablSmoothing:\r\n \"\"\"A simple loss compute and train function.\"\"\"\r\n\r\n def __init__(self, generator, criterion, devices=None, opt=None):\r\n self.generator = generator\r\n self.criterion = criterion\r\n self.opt = opt\r\n\r\n def __call__(self, x, y, norm=1):\r\n # print(f\" Before input x {x}, y {y}\")\r\n x = self.generator(x)\r\n # print(f\" After input x {x.size()}, y {y.size()}\")\r\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)),\r\n y.contiguous().view(-1)) / norm\r\n # print(f\" Ouput x {x.size()}, loss {loss}\")\r\n loss.backward()\r\n if self.opt is not None:\r\n self.opt.step()\r\n self.opt.optimizer.zero_grad()\r\n # https://github.com/pytorch/pytorch/issues/15585\r\n # return loss.data[0] * norm\r\n return loss.data.item() * norm\r\n\r\n\r\nclass SimpleLossCompute:\r\n \"\"\"A simple loss compute and train function.\"\"\"\r\n\r\n def __init__(self, generator, criterion, opt=None):\r\n self.generator = generator\r\n self.criterion = criterion\r\n self.opt = opt\r\n self.base_loss_function = nn.CrossEntropyLoss(reduction='sum', ignore_index=0)\r\n\r\n def __call__(self, x, y, norm=1):\r\n batch_size, seq_len, vocabulary_size = x.size()\r\n\r\n outputs_flat = x.view(batch_size * seq_len, vocabulary_size)\r\n targets_flat = y.view(batch_size * seq_len)\r\n\r\n loss = self.base_loss_function(outputs_flat, targets_flat)\r\n count = (y != 0).sum().item()\r\n\r\n loss.backward()\r\n if self.opt is not None:\r\n self.opt.step()\r\n self.opt.optimizer.zero_grad()\r\n return loss, count\r\n\r\n\r\nclass LabelSmoothing(nn.Module):\r\n \"\"\"Implement label smoothing.\"\"\"\r\n\r\n def __init__(self, size, padding_idx, smoothing=0.0):\r\n super(LabelSmoothing, self).__init__()\r\n self.criterion = nn.KLDivLoss(size_average=False)\r\n self.padding_idx = padding_idx\r\n self.confidence = 1.0 - smoothing\r\n self.smoothing = smoothing\r\n self.size = size\r\n self.true_dist = None\r\n\r\n def forward(self, x, target):\r\n # print(f\" input x {x}, target {target.size()} self size {self.size}\")\r\n assert x.size(1) == self.size\r\n true_dist = x.data.clone()\r\n true_dist.fill_(self.smoothing / (self.size - 2))\r\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\r\n true_dist[:, self.padding_idx] = 0\r\n mask = torch.nonzero(target.data == self.padding_idx)\r\n if mask.dim() > 0:\r\n true_dist.index_fill_(0, mask.squeeze(), 0.0)\r\n self.true_dist = true_dist\r\n # print(f\"true_dist {true_dist}\")\r\n return self.criterion(x, Variable(true_dist, requires_grad=False))\r\n\r\n\r\nclass DataProcessEngine:\r\n def __init__(self, context):\r\n self.context = context\r\n self.logger = context.logger\r\n\r\n self.train_iter = None\r\n self.eval_iter = None\r\n self.test_iter = None\r\n\r\n self.src_vocab = None\r\n self.tgt_vocab = None\r\n\r\n self.model = None\r\n self.nums_batch = context.batch_size\r\n self.epochs = context.epochs\r\n self.padding_index = context.padding_index\r\n self.device = context.device # cpu or gpu\r\n self.device_id = context.device_id # [0, 1, 2, 3]\r\n\r\n def preprocess(self, data_source_type=\"small\"):\r\n\r\n self.logger.info(f\"Loading {data_source_type} data from disk and parse it as a bunch of batches ...\")\r\n train_dataset, eval_dataset, test_dataset = dataset_generation(self.context, data_type=data_source_type)\r\n\r\n self.logger.info(\"Build iteral dataset ... \")\r\n self.train_iter = DataLoader(train_dataset,\r\n batch_size=self.nums_batch,\r\n shuffle=True,\r\n collate_fn=input_target_collate_fn)\r\n\r\n self.eval_iter = DataLoader(eval_dataset,\r\n batch_size=self.nums_batch,\r\n shuffle=True,\r\n collate_fn=input_target_collate_fn)\r\n\r\n self.test_iter = DataLoader(test_dataset,\r\n batch_size=self.nums_batch,\r\n shuffle=True,\r\n collate_fn=custom_collate_fn)\r\n\r\n self.logger.info(\"Build src/tgt Vocabulary ...\")\r\n self.src_vocab = train_dataset.src_vocab\r\n self.tgt_vocab = train_dataset.tgt_vocab\r\n\r\n self.logger.info(\"Build transformer model ...\")\r\n self.model = build_model(self.context, len(self.src_vocab), len(self.tgt_vocab))\r\n self.model.cuda() if self.context.is_cuda else None\r\n self.logger.debug(self.model)\r\n\r\n def run(self, loss_func=None, opt=None):\r\n criterion = LabelSmoothing(size=len(self.tgt_vocab), padding_idx=self.padding_index, smoothing=0.1)\r\n criterion.cuda() if self.context.is_cuda else None\r\n\r\n self.logger.info(\"Training Process is begining ...\")\r\n for epoch in range(self.epochs):\r\n # Set model in train\r\n self.model.train()\r\n self.run_epoch(self.train_iter,\r\n loss_func(self.model.generator, criterion, opt=opt))\r\n\r\n # Evaluation Model\r\n self.model.eval()\r\n # Get loss\r\n loss = self.run_epoch(self.eval_iter,\r\n loss_func(self.model.generator, criterion, opt=None))\r\n\r\n self.logger.info(\"The model loss is %d\", loss)\r\n\r\n def run_epoch(self, data_iter, loss_compute):\r\n \"\"\"\r\n Standard Training and Logging Function\r\n \"\"\"\r\n start = time.time()\r\n total_tokens = 0\r\n total_loss = 0\r\n tokens = 0\r\n for i, batch in enumerate(data_iter):\r\n\r\n src = batch.src.to(self.context.device) if self.context.is_cuda else batch.src\r\n trg = batch.trg.to(self.context.device) if self.context.is_cuda else batch.trg\r\n trg_y = batch.trg_y.to(self.context.device) if self.context.is_cuda else batch.trg_y\r\n src_mask = batch.src_mask.to(self.context.device) if self.context.is_cuda else batch.src_mask\r\n tgt_mask = batch.trg_mask.to(self.context.device) if self.context.is_cuda else batch.trg_mask\r\n\r\n # Model forward and output result\r\n out = self.model(src, trg, src_mask, tgt_mask)\r\n\r\n # Get loss for this iteration and backward weight to model\r\n loss = loss_compute(out, trg_y, batch.ntokens)\r\n\r\n total_loss += loss\r\n total_tokens += batch.ntokens\r\n tokens += batch.ntokens\r\n if i % 50 == 1:\r\n elapsed = time.time() - start\r\n self.logger.info(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\",\r\n i, loss / batch.ntokens, tokens / elapsed)\r\n start = time.time()\r\n tokens = 0\r\n\r\n return total_loss / total_tokens\r\n\r\n def postprocess(self):\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ctx = Context(desc=\"Learning-fix based on Transformer\")\r\n logger = ctx.logger\r\n\r\n logger.info(\"Build Data Process Engine based on input parsed dataset ...\")\r\n engine = DataProcessEngine(ctx)\r\n\r\n logger.info(\"Preparing dataset and build model for trani ...\")\r\n engine.preprocess(data_source_type=\"small\")\r\n\r\n logger.info(\"Training and evaluating the model ...\")\r\n engine.run(loss_func=SimpleLossComputeWithLablSmoothing, opt=get_std_opt(engine.model))\r\n\r\n logger.info(\"Testing and data clean ...\")\r\n engine.postprocess()\r\n" ]
[ [ "torch.nonzero", "torch.autograd.Variable", "torch.tensor", "torch.utils.data.DataLoader", "torch.nn.KLDivLoss", "torch.nn.CrossEntropyLoss" ] ]
HEPonHPC/pandana
[ "8ee68071892f2a34b54a09ac54033f5d14d42019" ]
[ "pandana/core/cut.py" ]
[ "import pandas as pd\n\n\nclass Cut:\n \"\"\"Represents a selection criterion to be applied to a dataframe.\"\"\"\n\n def __init__(self, cut):\n self._cut = cut\n\n self._CurrDF = None\n self._CurrTab = None\n\n # Remember result for each instance of tables\n def __call__(self, tables):\n if tables is not self._CurrTab:\n self._CurrDF = self._cut(tables)\n self._CurrTab = tables\n return self._CurrDF\n\n def __invert__(self):\n return Cut(lambda tables: ~self(tables))\n\n def __and__(self, other):\n def AndCut(tables):\n df1 = self(tables)\n df2 = other(tables)\n\n if not df1.index.equals(df2.index):\n df2, df1 = df2.align(df1, axis=0, join=\"inner\")\n ret = df1.to_numpy() & df2.to_numpy()\n ret = pd.Series(ret, index=df1.index)\n\n return ret\n\n return Cut(AndCut)\n\n def __or__(self, other):\n def OrCut(tables):\n df1 = self(tables)\n df2 = other(tables)\n\n if not df1.index.equals(df2.index):\n df2, df1 = df2.align(df1, axis=0, join=\"inner\")\n ret = df1.to_numpy() | df2.to_numpy()\n ret = pd.Series(ret, index=df1.index)\n\n return ret\n\n return Cut(OrCut)\n" ]
[ [ "pandas.Series" ] ]
ltrottier/tensorflow-object-recognition
[ "cbf011f0241277411fb44ce78bbd09a6b4e5734f" ]
[ "stats.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n\ndef create_n_observations(network_output_tensor, modes):\n with tf.variable_scope('n_observations'):\n n_observations = tf.get_variable(\n 'variable',\n [],\n initializer=tf.initializers.zeros(),\n trainable=False,\n collections=['stats'])\n n_observations_cur = tf.cast(tf.shape(network_output_tensor)[0], tf.float32)\n n_observations_update_asgn = tf.assign_add(n_observations, n_observations_cur)\n n_observations_init = tf.variables_initializer([n_observations])\n n_observations_summary_protobuf = tf.summary.scalar('summary', n_observations)\n\n for mode in modes:\n tf.add_to_collection(\"{}_begin\".format(mode), n_observations_init)\n tf.add_to_collection(\"{}_step\".format(mode), n_observations_update_asgn)\n tf.add_to_collection(\"{}_summary\".format(mode), n_observations_summary_protobuf)\n\n\ndef create_error_rate(target_tensor, network_output_tensor, modes):\n\n # number of observations\n n_observations = tf.get_variable('n_observations/variable')\n\n with tf.variable_scope('error_rate'):\n # number of errors\n n_errors = tf.get_variable(\n 'n_errors',\n [],\n initializer=tf.initializers.zeros(),\n trainable=False,\n collections=['stats'])\n n_errors_init = tf.variables_initializer([n_errors])\n n_errors_cur = tf.argmax(network_output_tensor, 1)\n n_errors_cur = tf.not_equal(n_errors_cur, tf.reshape(target_tensor, [-1]))\n n_errors_cur = tf.reduce_sum(tf.cast(n_errors_cur, tf.float32))\n n_errors_update_asgn = tf.assign_add(n_errors, n_errors_cur)\n\n # error rate\n error_rate_tensor = n_errors / n_observations\n error_rate_summary_protobuf = tf.summary.scalar('summary', error_rate_tensor)\n\n # collection\n for mode in modes:\n tf.add_to_collection(\"{}_begin\".format(mode), n_errors_init)\n tf.add_to_collection(\"{}_step\".format(mode), n_errors_update_asgn)\n tf.add_to_collection(\"{}_summary\".format(mode), error_rate_summary_protobuf)\n\n\ndef create_loss_average(network_output_tensor, loss_tensor, modes):\n\n # number of observations\n n_observations = tf.get_variable('n_observations/variable')\n\n with tf.variable_scope('loss_average'):\n loss_sum = tf.get_variable(\n 'loss_sum',\n [],\n initializer=tf.initializers.constant(0),\n trainable=False)\n loss_sum_init = tf.variables_initializer([loss_sum])\n n_observations_cur = tf.cast(tf.shape(network_output_tensor)[0], tf.float32)\n loss_sum_update_asgn = tf.assign_add(loss_sum, loss_tensor * n_observations_cur)\n loss_average = loss_sum / n_observations\n loss_average_summary_protobuf = tf.summary.scalar('summary', loss_average)\n\n for mode in modes:\n tf.add_to_collection(\"{}_begin\".format(mode), loss_sum_init)\n tf.add_to_collection(\"{}_step\".format(mode), loss_sum_update_asgn)\n tf.add_to_collection(\"{}_summary\".format(mode), loss_average_summary_protobuf)\n\n\ndef create_input_image_visualization(input_tensor, modes):\n\n # input summary\n with tf.variable_scope('input_image_visualization'):\n input_image = tf.get_variable(\n 'variable',\n [],\n initializer=tf.initializers.zeros(),\n trainable=False,\n validate_shape=False)\n\n first_batch = tf.get_variable(\n 'condition',\n [],\n dtype=tf.bool,\n initializer=tf.initializers.constant(True),\n trainable=False)\n\n def true_fn():\n first_batch_asgn = tf.assign(first_batch, False)\n input_image_asgn = tf.assign(input_image, input_tensor, validate_shape=False)\n return [first_batch_asgn, input_image_asgn]\n\n def false_fn():\n return [first_batch, input_image]\n\n [first_batch_cond_asgn, input_image_cond_asgn] = tf.cond(tf.equal(first_batch, True), true_fn, false_fn)\n\n input_image_summary_protobuf = tf.summary.image('image', input_image, 3)\n first_batch_init = tf.variables_initializer([first_batch])\n\n for mode in modes:\n tf.add_to_collection(\"{}_begin\".format(mode), first_batch_init)\n tf.add_to_collection(\"{}_step\".format(mode), first_batch_cond_asgn)\n tf.add_to_collection(\"{}_step\".format(mode), input_image_cond_asgn)\n tf.add_to_collection(\"{}_summary\".format(mode), input_image_summary_protobuf)\n\n\n\ndef create_from_list(stats_train_list, stats_test_list, input_tensor, target_tensor, network_output_tensor, loss_tensor):\n\n # since we compute stats per epoch, always create the stats for the number of observations\n stats_train_list = ['n_observations'] + stats_train_list\n stats_test_list = ['n_observations'] + stats_test_list\n\n with tf.variable_scope('stats', reuse=tf.AUTO_REUSE):\n\n def add_stats(name, target_tensor, network_output_tensor, loss_tensor, modes):\n if name == 'n_observations':\n create_n_observations(network_output_tensor, modes)\n elif name == 'error_rate':\n create_error_rate(target_tensor, network_output_tensor, modes)\n elif name == 'loss_average':\n create_loss_average(network_output_tensor, loss_tensor, modes)\n elif name == 'input_image_visualization':\n create_input_image_visualization(input_tensor, modes)\n else:\n raise Exception(\"Invalid stats name: {}, for modes: {}\".format(name, modes))\n\n # train stats\n for stats_name in stats_train_list:\n modes = ['train', 'test'] if stats_name in stats_test_list else ['train']\n add_stats(stats_name, target_tensor, network_output_tensor, loss_tensor, modes)\n\n # test stats\n for stats_name in stats_test_list:\n if stats_name in stats_train_list:\n continue\n add_stats(stats_name, target_tensor, network_output_tensor, loss_tensor, ['test'])\n" ]
[ [ "tensorflow.initializers.zeros", "tensorflow.summary.image", "tensorflow.assign", "tensorflow.shape", "tensorflow.summary.scalar", "tensorflow.argmax", "tensorflow.equal", "tensorflow.reshape", "tensorflow.assign_add", "tensorflow.variable_scope", "tensorflow.variables_initializer", "tensorflow.get_variable", "tensorflow.initializers.constant", "tensorflow.cast" ] ]
pgiank28/jina
[ "f96030cf2e0d4393c03c206adc4717e328d069e7" ]
[ "tests/unit/test_gateway.py" ]
[ "import functools\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport pytest\nimport requests\n\nfrom jina.flow import Flow\n\nconcurrency = 10\n\n\n# @pytest.mark.skip('this tests hang up for unknown reason on github')\ndef test_rest_gateway_concurrency():\n def _request(status_codes, durations, index):\n resp = requests.post(\n f'http://0.0.0.0:{f.port_expose}/api/index',\n json={\n 'data': [\n 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',\n 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']})\n durations[index] = resp.elapsed.total_seconds()\n status_codes[index] = resp.status_code\n\n f = Flow(rest_api=True).add(\n uses='_pass',\n parallel=2)\n with f:\n concurrency = 50\n threads = []\n status_codes = [None] * concurrency\n durations = [None] * concurrency\n for i in range(concurrency):\n t = Thread(target=_request, args=(status_codes, durations, i))\n t.daemon = True\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n success = status_codes.count(200)\n failed = len(status_codes) - success\n print(\n f'\\nmin roundtrip time: {np.min(durations)}\\n',\n f'max roundtrip time: {np.max(durations)}\\n'\n f'mean roundtrip time: {np.mean(durations)}\\n'\n )\n assert success >= 1\n # In some slow environments, a certain degree of failed\n # requests will occur. Here we limit the degree of failed\n # requests.\n rate = failed / success\n assert rate < 0.1\n\n\[email protected]('raw grpc gateway is not stable enough under high concurrency')\ndef test_grpc_gateway_concurrency():\n def _input_fn():\n return iter([\n 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',\n 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC'])\n\n def _validate(req, start, status_codes, durations, index):\n end = time.time()\n durations[index] = (end - start)\n status_codes[index] = req.status.code\n\n def _request(f, status_codes, durations, index):\n start = time.time()\n f.index(\n input_fn=_input_fn,\n output_fn=functools.partial(\n _validate,\n start=start,\n status_codes=status_codes,\n durations=durations,\n index=index\n ))\n\n f = Flow().add(\n uses='_pass',\n parallel=2)\n with f:\n threads = []\n status_codes = [None] * concurrency\n durations = [None] * concurrency\n for i in range(concurrency):\n t = Thread(\n target=_request, args=(\n f, status_codes, durations, i))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n print(f'terminate {t}')\n\n success = status_codes.count(0)\n failed = len(status_codes) - success\n print(\n f'\\nmin roundtrip time: {np.min(durations)}\\n',\n f'max roundtrip time: {np.max(durations)}\\n'\n f'mean roundtrip time: {np.mean(durations)}\\n'\n )\n assert success >= 1\n # In some slow environments, a certain degree of failed\n # requests will occur. Here we limit the degree of failed\n # requests.\n rate = failed / success\n assert rate < 0.1\n" ]
[ [ "numpy.max", "numpy.min", "numpy.mean" ] ]
jiazhi412/stylegan2-ada-pytorch
[ "d6bc5a2bf8ec28970a117168cf79c3c71c032eca" ]
[ "projector.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Project given image to the latent space of pretrained network pickle.\"\"\"\n\nimport copy\nimport os\nfrom time import perf_counter\n\nimport click\nimport imageio\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torch.nn.functional as F\n\nimport dnnlib\nimport legacy\n\ndef project(\n G,\n target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution\n *,\n num_steps = 1000,\n w_avg_samples = 10000,\n initial_learning_rate = 0.1,\n initial_noise_factor = 0.05,\n lr_rampdown_length = 0.25,\n lr_rampup_length = 0.05,\n noise_ramp_length = 0.75,\n regularize_noise_weight = 1e5,\n verbose = False,\n device: torch.device\n):\n assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)\n\n def logprint(*args):\n if verbose:\n print(*args)\n\n G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore\n\n # Compute w stats.\n logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')\n z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)\n w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]\n w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]\n w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]\n w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5\n\n # Setup noise inputs.\n noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }\n\n # Load VGG16 feature detector.\n url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'\n with dnnlib.util.open_url(url) as f:\n vgg16 = torch.jit.load(f).eval().to(device)\n\n # Features for target image.\n target_images = target.unsqueeze(0).to(device).to(torch.float32)\n if target_images.shape[2] > 256:\n target_images = F.interpolate(target_images, size=(256, 256), mode='area')\n target_features = vgg16(target_images, resize_images=False, return_lpips=True)\n\n w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable\n w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)\n optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)\n\n # Init noise.\n for buf in noise_bufs.values():\n buf[:] = torch.randn_like(buf)\n buf.requires_grad = True\n\n for step in range(num_steps):\n # Learning rate schedule.\n t = step / num_steps\n w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2\n lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)\n lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)\n lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)\n lr = initial_learning_rate * lr_ramp\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Synth images from opt_w.\n w_noise = torch.randn_like(w_opt) * w_noise_scale\n ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])\n synth_images = G.synthesis(ws, noise_mode='const')\n\n # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.\n synth_images = (synth_images + 1) * (255/2)\n if synth_images.shape[2] > 256:\n synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')\n\n # Features for synth images.\n synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)\n dist = (target_features - synth_features).square().sum()\n\n # Noise regularization.\n reg_loss = 0.0\n for v in noise_bufs.values():\n noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()\n while True:\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2\n if noise.shape[2] <= 8:\n break\n noise = F.avg_pool2d(noise, kernel_size=2)\n loss = dist + reg_loss * regularize_noise_weight\n\n # Step\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')\n\n # Save projected W for each optimization step.\n w_out[step] = w_opt.detach()[0]\n\n # Normalize noise.\n with torch.no_grad():\n for buf in noise_bufs.values():\n buf -= buf.mean()\n buf *= buf.square().mean().rsqrt()\n\n return w_out.repeat([1, G.mapping.num_ws, 1])\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]('--network', 'network_pkl', help='Network pickle filename', required=True)\[email protected]('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE')\[email protected]('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)\[email protected]('--seed', help='Random seed', type=int, default=303, show_default=True)\[email protected]('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)\[email protected]('--outdir', help='Where to save the output images', required=True, metavar='DIR')\ndef run_projection(\n network_pkl: str,\n target_fname: str,\n outdir: str,\n save_video: bool,\n seed: int,\n num_steps: int\n):\n \"\"\"Project given image to the latent space of pretrained network pickle.\n\n Examples:\n\n \\b\n python projector.py --outdir=out --target=~/mytargetimg.png \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl\n \"\"\"\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # Load networks.\n print('Loading networks from \"%s\"...' % network_pkl)\n device = torch.device('cuda')\n with dnnlib.util.open_url(network_pkl) as fp:\n G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore\n\n # Load target image.\n target_pil = PIL.Image.open(target_fname).convert('RGB')\n w, h = target_pil.size\n s = min(w, h)\n target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))\n target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)\n target_uint8 = np.array(target_pil, dtype=np.uint8)\n\n # Optimize projection.\n start_time = perf_counter()\n projected_w_steps = project(\n G,\n target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable\n num_steps=num_steps,\n device=device,\n verbose=True\n )\n print (f'Elapsed: {(perf_counter()-start_time):.1f} s')\n\n img_index = target_fname.split(\"/\")[-1].split(\".\")[0]\n\n # Render debug output: optional video and projected image and W vector.\n os.makedirs(outdir, exist_ok=True)\n if save_video:\n video = imageio.get_writer(f'{outdir}/proj_{img_index}.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')\n print (f'Saving optimization progress video \"{outdir}/proj_{img_index}.mp4\"')\n for projected_w in projected_w_steps:\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n video.append_data(np.concatenate([target_uint8, synth_image], axis=1))\n video.close()\n\n # Save final projected frame and W vector.\n target_pil.save(f'{outdir}/target_{img_index}.png')\n projected_w = projected_w_steps[-1]\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj_{img_index}.png')\n np.savez(f'{outdir}/projected_w_{img_index}.npz', w=projected_w.unsqueeze(0).cpu().numpy())\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n run_projection() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.concatenate", "torch.device", "numpy.array", "torch.nn.functional.avg_pool2d", "torch.roll", "numpy.random.RandomState", "numpy.random.seed", "numpy.sum", "torch.nn.functional.interpolate", "torch.no_grad", "numpy.mean", "torch.from_numpy", "torch.manual_seed", "torch.jit.load", "torch.randn_like", "torch.tensor", "numpy.cos" ] ]
mdca-aux-loss/MDCA-Calibration
[ "de8d0986a3b8b5794df316b577bc21c8c501ef36" ]
[ "experiments/rotated_mnist.py" ]
[ "import os\nfrom utils.misc import AverageMeter\n\nimport torch\nfrom utils import Logger, parse_args\nfrom solvers.runners import test\n\nfrom models import model_dict\nfrom datasets import corrupted_dataloader_dict, dataset_nclasses_dict, dataset_classname_dict, corrupted_dataset_dict\nfrom datasets.mnist import get_rotated_set\n\nimport logging\n\nif __name__ == \"__main__\":\n \n args = parse_args()\n logging.basicConfig(level=logging.INFO, \n format=\"%(levelname)s: %(message)s\",\n handlers=[\n logging.StreamHandler()\n ])\n # set up dataset\n logging.info(f\"Using dataset : {args.dataset}\")\n num_classes = dataset_nclasses_dict[args.dataset]\n classes_name_list = dataset_classname_dict[args.dataset]\n \n # prepare model\n logging.info(f\"Using model : {args.model}\")\n\n model_path_list = open(\"tmux_runs/model_paths.txt\", \"r\").readlines()\n \n criterion = torch.nn.CrossEntropyLoss()\n\n # set up loggers\n path_to_log = \"results/ood_test_{}_{}.txt\".format(args.dataset, args.model)\n global_logger = Logger(path_to_log, resume=os.path.exists(path_to_log))\n global_logger.set_names(['method', 'test_nll', 'top1', 'top3', 'top5', 'SCE', 'ECE'])\n\n for path in model_path_list:\n path = path.rstrip(\"\\n\")\n\n assert path, \"Please provide a trained model file\"\n try:\n assert os.path.isfile(path)\n except:\n print(f\"{path} does not exist.\")\n continue\n logging.info(f'Resuming from saved checkpoint: {path}')\n \n checkpoint_folder = os.path.dirname(path)\n saved_model_dict = torch.load(path)\n\n metric_log_path = os.path.join(checkpoint_folder, \"ood_test.txt\")\n logger = Logger(metric_log_path, resume=os.path.exists(metric_log_path))\n logger.set_names(['method', 'test_nll', 'top1', 'top3', 'top5', 'SCE', 'ECE'])\n\n model = model_dict[args.model](num_classes=num_classes)\n model.load_state_dict(saved_model_dict['state_dict'])\n model.cuda()\n\n # read corruptions\n corruption_list = [0, 15, 30, 45, 60, 75]\n\n top1_avg = AverageMeter()\n top3_avg = AverageMeter()\n sce_avg = AverageMeter()\n ece_avg = AverageMeter()\n test_nll_avg = AverageMeter()\n\n for angle in corruption_list:\n testloader = get_rotated_set(args, angle)\n test_loss, top1, top3, top5, cce_score, ece_score = test(testloader, model, criterion)\n method_name = f\"angle={angle}\"\n logger.append([method_name, test_loss, top1, top3, top5, cce_score, ece_score])\n global_logger.append([f\"{path}_angle={angle}\", test_loss, top1, top3, top5, cce_score, ece_score])\n\n top1_avg.update(top1)\n top3_avg.update(top3)\n sce_avg.update(cce_score)\n ece_avg.update(ece_score)\n test_nll_avg.update(test_loss)\n\n logger.append([f\"avg_angles\", test_nll_avg.avg, top1_avg.avg, top3_avg.avg, top3_avg.avg, sce_avg.avg, ece_avg.avg])\n logger.close()\n global_logger.append([f\"{path}_avg_angles\", test_nll_avg.avg, top1_avg.avg, top3_avg.avg, top3_avg.avg, sce_avg.avg, ece_avg.avg])\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.load" ] ]
jbusecke/pygmt
[ "9ef6338dbb9bdd4c31dda94da6d4126852a6cd85" ]
[ "pygmt/clib/session.py" ]
[ "\"\"\"\nDefines the Session class to create and destroy a GMT API session and provides\naccess to the API functions.\n\nUses ctypes to wrap most of the core functions from the C API.\n\"\"\"\nimport ctypes as ctp\nimport sys\nfrom contextlib import contextmanager\n\nimport numpy as np\nimport pandas as pd\nfrom packaging.version import Version\nfrom pygmt.clib.conversion import (\n array_to_datetime,\n as_c_contiguous,\n dataarray_to_matrix,\n kwargs_to_ctypes_array,\n vectors_to_arrays,\n)\nfrom pygmt.clib.loading import load_libgmt\nfrom pygmt.exceptions import (\n GMTCLibError,\n GMTCLibNoSessionError,\n GMTInvalidInput,\n GMTVersionError,\n)\nfrom pygmt.helpers import data_kind, dummy_context, fmt_docstring, tempfile_from_geojson\n\nFAMILIES = [\n \"GMT_IS_DATASET\",\n \"GMT_IS_GRID\",\n \"GMT_IS_PALETTE\",\n \"GMT_IS_MATRIX\",\n \"GMT_IS_VECTOR\",\n]\n\nVIAS = [\"GMT_VIA_MATRIX\", \"GMT_VIA_VECTOR\"]\n\nGEOMETRIES = [\n \"GMT_IS_NONE\",\n \"GMT_IS_POINT\",\n \"GMT_IS_LINE\",\n \"GMT_IS_POLYGON\",\n \"GMT_IS_PLP\",\n \"GMT_IS_SURFACE\",\n]\n\nMETHODS = [\"GMT_IS_DUPLICATE\", \"GMT_IS_REFERENCE\"]\n\nMODES = [\"GMT_CONTAINER_ONLY\", \"GMT_IS_OUTPUT\"]\n\nREGISTRATIONS = [\"GMT_GRID_PIXEL_REG\", \"GMT_GRID_NODE_REG\"]\n\nDTYPES = {\n np.float64: \"GMT_DOUBLE\",\n np.float32: \"GMT_FLOAT\",\n np.int64: \"GMT_LONG\",\n np.int32: \"GMT_INT\",\n np.uint64: \"GMT_ULONG\",\n np.uint32: \"GMT_UINT\",\n np.datetime64: \"GMT_DATETIME\",\n np.str_: \"GMT_TEXT\",\n}\n\n\nclass Session:\n \"\"\"\n A GMT API session where most operations involving the C API happen.\n\n Works as a context manager (for use in a ``with`` block) to create a GMT C\n API session and destroy it in the end to clean up memory.\n\n Functions of the shared library are exposed as methods of this class. Most\n methods MUST be used with an open session (inside a ``with`` block). If\n creating GMT data structures to communicate data, put that code inside the\n same ``with`` block as the API calls that will use the data.\n\n By default, will let :mod:`ctypes` try to find the GMT shared library\n (``libgmt``). If the environment variable ``GMT_LIBRARY_PATH`` is set, will\n look for the shared library in the directory specified by it.\n\n A ``GMTVersionError`` exception will be raised if the GMT shared library\n reports a version older than the required minimum GMT version.\n\n The ``session_pointer`` attribute holds a ctypes pointer to the currently\n open session.\n\n Raises\n ------\n GMTCLibNotFoundError\n If there was any problem loading the library (couldn't find it or\n couldn't access the functions).\n GMTCLibNoSessionError\n If you try to call a method outside of a 'with' block.\n GMTVersionError\n If the minimum required version of GMT is not found.\n\n Examples\n --------\n\n >>> from pygmt.datasets import load_earth_relief\n >>> from pygmt.helpers import GMTTempFile\n >>> grid = load_earth_relief()\n >>> type(grid)\n <class 'xarray.core.dataarray.DataArray'>\n >>> # Create a session and destroy it automatically when exiting the \"with\"\n >>> # block.\n >>> with Session() as ses:\n ... # Create a virtual file and link to the memory block of the grid.\n ... with ses.virtualfile_from_grid(grid) as fin:\n ... # Create a temp file to use as output.\n ... with GMTTempFile() as fout:\n ... # Call the grdinfo module with the virtual file as input\n ... # and the temp file as output.\n ... ses.call_module(\"grdinfo\", f\"{fin} -C ->{fout.name}\")\n ... # Read the contents of the temp file before it's deleted.\n ... print(fout.read().strip())\n ...\n -180 180 -90 90 -8182 5651.5 1 1 360 180 1 1\n \"\"\"\n\n # The minimum version of GMT required\n required_version = \"6.3.0\"\n\n @property\n def session_pointer(self):\n \"\"\"\n The :class:`ctypes.c_void_p` pointer to the current open GMT session.\n\n Raises\n ------\n GMTCLibNoSessionError\n If trying to access without a currently open GMT session (i.e.,\n outside of the context manager).\n \"\"\"\n if not hasattr(self, \"_session_pointer\") or self._session_pointer is None:\n raise GMTCLibNoSessionError(\"No currently open GMT API session.\")\n return self._session_pointer\n\n @session_pointer.setter\n def session_pointer(self, session):\n \"\"\"\n Set the session void pointer.\n \"\"\"\n self._session_pointer = session\n\n @property\n def info(self):\n \"\"\"\n Dictionary with the GMT version and default paths and parameters.\n \"\"\"\n if not hasattr(self, \"_info\"):\n self._info = {\n \"version\": self.get_default(\"API_VERSION\"),\n \"padding\": self.get_default(\"API_PAD\"),\n \"binary dir\": self.get_default(\"API_BINDIR\"),\n \"share dir\": self.get_default(\"API_SHAREDIR\"),\n # This segfaults for some reason\n # 'data dir': self.get_default(\"API_DATADIR\"),\n \"plugin dir\": self.get_default(\"API_PLUGINDIR\"),\n \"library path\": self.get_default(\"API_LIBRARY\"),\n \"cores\": self.get_default(\"API_CORES\"),\n # API_IMAGE_LAYOUT not defined if GMT is not compiled with GDAL\n # \"image layout\": self.get_default(\"API_IMAGE_LAYOUT\"),\n \"grid layout\": self.get_default(\"API_GRID_LAYOUT\"),\n }\n return self._info\n\n def __enter__(self):\n \"\"\"\n Create a GMT API session and check the libgmt version.\n\n Calls :meth:`pygmt.clib.Session.create`.\n\n Raises\n ------\n GMTVersionError\n If the version reported by libgmt is less than\n ``Session.required_version``. Will destroy the session before\n raising the exception.\n \"\"\"\n self.create(\"pygmt-session\")\n # Need to store the version info because 'get_default' won't work after\n # the session is destroyed.\n version = self.info[\"version\"]\n if Version(version) < Version(self.required_version):\n self.destroy()\n raise GMTVersionError(\n f\"Using an incompatible GMT version {version}. \"\n f\"Must be equal or newer than {self.required_version}.\"\n )\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Destroy the currently open GMT API session.\n\n Calls :meth:`pygmt.clib.Session.destroy`.\n \"\"\"\n self.destroy()\n\n def __getitem__(self, name):\n \"\"\"\n Get the value of a GMT constant (C enum) from gmt_resources.h.\n\n Used to set configuration values for other API calls. Wraps\n ``GMT_Get_Enum``.\n\n Parameters\n ----------\n name : str\n The name of the constant (e.g., ``\"GMT_SESSION_EXTERNAL\"``)\n\n Returns\n -------\n constant : int\n Integer value of the constant. Do not rely on this value because it\n might change.\n\n Raises\n ------\n GMTCLibError\n If the constant doesn't exist.\n \"\"\"\n c_get_enum = self.get_libgmt_func(\n \"GMT_Get_Enum\", argtypes=[ctp.c_void_p, ctp.c_char_p], restype=ctp.c_int\n )\n\n # The C lib introduced the void API pointer to GMT_Get_Enum so that\n # it's consistent with other functions. It doesn't use the pointer so\n # we can pass in None (NULL pointer). We can't give it the actual\n # pointer because we need to call GMT_Get_Enum when creating a new API\n # session pointer (chicken-and-egg type of thing).\n session = None\n\n value = c_get_enum(session, name.encode())\n\n if value is None or value == -99999:\n raise GMTCLibError(f\"Constant '{name}' doesn't exist in libgmt.\")\n\n return value\n\n def get_libgmt_func(self, name, argtypes=None, restype=None):\n \"\"\"\n Get a ctypes function from the libgmt shared library.\n\n Assigns the argument and return type conversions for the function.\n\n Use this method to access a C function from libgmt.\n\n Parameters\n ----------\n name : str\n The name of the GMT API function.\n argtypes : list\n List of ctypes types used to convert the Python input arguments for\n the API function.\n restype : ctypes type\n The ctypes type used to convert the input returned by the function\n into a Python type.\n\n Returns\n -------\n function\n The GMT API function.\n\n Examples\n --------\n\n >>> from ctypes import c_void_p, c_int\n >>> with Session() as lib:\n ... func = lib.get_libgmt_func(\n ... \"GMT_Destroy_Session\", argtypes=[c_void_p], restype=c_int\n ... )\n ...\n >>> type(func)\n <class 'ctypes.CDLL.__init__.<locals>._FuncPtr'>\n \"\"\"\n if not hasattr(self, \"_libgmt\"):\n self._libgmt = load_libgmt()\n function = getattr(self._libgmt, name)\n if argtypes is not None:\n function.argtypes = argtypes\n if restype is not None:\n function.restype = restype\n return function\n\n def create(self, name):\n \"\"\"\n Create a new GMT C API session.\n\n This is required before most other methods of\n :class:`pygmt.clib.Session` can be called.\n\n .. warning::\n\n Usage of :class:`pygmt.clib.Session` as a context manager in a\n ``with`` block is preferred over calling\n :meth:`pygmt.clib.Session.create` and\n :meth:`pygmt.clib.Session.destroy` manually.\n\n Calls ``GMT_Create_Session`` and generates a new ``GMTAPI_CTRL``\n struct, which is a :class:`ctypes.c_void_p` pointer. Sets the\n ``session_pointer`` attribute to this pointer.\n\n Remember to terminate the current session using\n :meth:`pygmt.clib.Session.destroy` before creating a new one.\n\n Parameters\n ----------\n name : str\n A name for this session. Doesn't really affect the outcome.\n \"\"\"\n try:\n # Won't raise an exception if there is a currently open session\n self.session_pointer # pylint: disable=pointless-statement\n # In this case, fail to create a new session until the old one is\n # destroyed\n raise GMTCLibError(\n \"Failed to create a GMT API session: There is a currently open session.\"\n \" Must destroy it fist.\"\n )\n # If the exception is raised, this means that there is no open session\n # and we're free to create a new one.\n except GMTCLibNoSessionError:\n pass\n\n c_create_session = self.get_libgmt_func(\n \"GMT_Create_Session\",\n argtypes=[ctp.c_char_p, ctp.c_uint, ctp.c_uint, ctp.c_void_p],\n restype=ctp.c_void_p,\n )\n\n # Capture the output printed by GMT into this list. Will use it later\n # to generate error messages for the exceptions raised by API calls.\n self._error_log = []\n\n @ctp.CFUNCTYPE(ctp.c_int, ctp.c_void_p, ctp.c_char_p)\n def print_func(file_pointer, message): # pylint: disable=unused-argument\n \"\"\"\n Callback function that the GMT C API will use to print log and\n error messages.\n\n We'll capture the messages and print them to stderr so that they\n will show up on the Jupyter notebook.\n \"\"\"\n message = message.decode().strip()\n self._error_log.append(message)\n # flush to make sure the messages are printed even if we have a\n # crash.\n print(message, file=sys.stderr, flush=True)\n return 0\n\n # Need to store a copy of the function because ctypes doesn't and it\n # will be garbage collected otherwise\n self._print_callback = print_func\n\n padding = self[\"GMT_PAD_DEFAULT\"]\n session_type = self[\"GMT_SESSION_EXTERNAL\"]\n\n session = c_create_session(name.encode(), padding, session_type, print_func)\n\n if session is None:\n raise GMTCLibError(\n f\"Failed to create a GMT API session:\\n{self._error_message}\"\n )\n\n self.session_pointer = session\n\n @property\n def _error_message(self):\n \"\"\"\n A string with all error messages emitted by the C API.\n\n Only includes messages with the string ``\"[ERROR]\"`` in them.\n \"\"\"\n msg = \"\"\n if hasattr(self, \"_error_log\"):\n msg = \"\\n\".join(line for line in self._error_log if \"[ERROR]\" in line)\n return msg\n\n def destroy(self):\n \"\"\"\n Destroy the currently open GMT API session.\n\n .. warning::\n\n Usage of :class:`pygmt.clib.Session` as a context manager in a\n ``with`` block is preferred over calling\n :meth:`pygmt.clib.Session.create` and\n :meth:`pygmt.clib.Session.destroy` manually.\n\n Calls ``GMT_Destroy_Session`` to terminate and free the memory of a\n registered ``GMTAPI_CTRL`` session (the pointer for this struct is\n stored in the ``session_pointer`` attribute).\n\n Always use this method after you are done using a C API session. The\n session needs to be destroyed before creating a new one. Otherwise,\n some of the configuration files might be left behind and can influence\n subsequent API calls.\n\n Sets the ``session_pointer`` attribute to ``None``.\n \"\"\"\n c_destroy_session = self.get_libgmt_func(\n \"GMT_Destroy_Session\", argtypes=[ctp.c_void_p], restype=ctp.c_int\n )\n\n status = c_destroy_session(self.session_pointer)\n if status:\n raise GMTCLibError(\n f\"Failed to destroy GMT API session:\\n{self._error_message}\"\n )\n\n self.session_pointer = None\n\n def get_default(self, name):\n \"\"\"\n Get the value of a GMT default parameter (library version, paths, etc).\n\n Possible default parameter names include:\n\n * ``\"API_VERSION\"``: The GMT version\n * ``\"API_PAD\"``: The grid padding setting\n * ``\"API_BINDIR\"``: The binary file directory\n * ``\"API_SHAREDIR\"``: The share directory\n * ``\"API_DATADIR\"``: The data directory\n * ``\"API_PLUGINDIR\"``: The plugin directory\n * ``\"API_LIBRARY\"``: The core library path\n * ``\"API_CORES\"``: The number of cores\n * ``\"API_IMAGE_LAYOUT\"``: The image/band layout\n * ``\"API_GRID_LAYOUT\"``: The grid layout\n\n Parameters\n ----------\n name : str\n The name of the default parameter (e.g., ``\"API_VERSION\"``)\n\n Returns\n -------\n value : str\n The default value for the parameter.\n\n Raises\n ------\n GMTCLibError\n If the parameter doesn't exist.\n \"\"\"\n c_get_default = self.get_libgmt_func(\n \"GMT_Get_Default\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_char_p],\n restype=ctp.c_int,\n )\n\n # Make a string buffer to get a return value\n value = ctp.create_string_buffer(10000)\n\n status = c_get_default(self.session_pointer, name.encode(), value)\n\n if status != 0:\n raise GMTCLibError(\n f\"Error getting default value for '{name}' (error code {status}).\"\n )\n\n return value.value.decode()\n\n def call_module(self, module, args):\n \"\"\"\n Call a GMT module with the given arguments.\n\n Makes a call to ``GMT_Call_Module`` from the C API using mode\n ``GMT_MODULE_CMD`` (arguments passed as a single string).\n\n Most interactions with the C API are done through this function.\n\n Parameters\n ----------\n module : str\n Module name (``'coast'``, ``'basemap'``, etc).\n args : str\n String with the command line arguments that will be passed to the\n module (for example, ``'-R0/5/0/10 -JM'``).\n\n Raises\n ------\n GMTCLibError\n If the returned status code of the function is non-zero.\n \"\"\"\n c_call_module = self.get_libgmt_func(\n \"GMT_Call_Module\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_int, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n mode = self[\"GMT_MODULE_CMD\"]\n status = c_call_module(\n self.session_pointer, module.encode(), mode, args.encode()\n )\n if status != 0:\n raise GMTCLibError(\n f\"Module '{module}' failed with status code {status}:\\n{self._error_message}\"\n )\n\n def create_data(self, family, geometry, mode, **kwargs):\n \"\"\"\n Create an empty GMT data container.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). See the\n ``FAMILIES`` attribute for valid names.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). See the\n ``GEOMETRIES`` attribute for valid names.\n mode : str\n A valid GMT data mode (e.g., ``'GMT_IS_OUTPUT'``). See the\n ``MODES`` attribute for valid names.\n dim : list of 4 integers\n The dimensions of the dataset. See the documentation for the GMT C\n API function ``GMT_Create_Data`` (``src/gmt_api.c``) for the full\n range of options regarding 'dim'. If ``None``, will pass in the\n NULL pointer.\n ranges : list of 4 floats\n The dataset extent. Also a bit of a complicated argument. See the C\n function documentation. It's called ``range`` in the C function but\n it would conflict with the Python built-in ``range`` function.\n inc : list of 2 floats\n The increments between points of the dataset. See the C function\n documentation.\n registration : str\n The node registration (what the coordinates mean). Can be\n ``'GMT_GRID_PIXEL_REG'`` or ``'GMT_GRID_NODE_REG'``. Defaults to\n ``'GMT_GRID_NODE_REG'``.\n pad : int\n The grid padding. Defaults to ``GMT_PAD_DEFAULT``.\n\n Returns\n -------\n data_ptr : int\n A ctypes pointer (an integer) to the allocated ``GMT_Dataset``\n object.\n \"\"\"\n c_create_data = self.get_libgmt_func(\n \"GMT_Create_Data\",\n argtypes=[\n ctp.c_void_p, # API\n ctp.c_uint, # family\n ctp.c_uint, # geometry\n ctp.c_uint, # mode\n ctp.POINTER(ctp.c_uint64), # dim\n ctp.POINTER(ctp.c_double), # range\n ctp.POINTER(ctp.c_double), # inc\n ctp.c_uint, # registration\n ctp.c_int, # pad\n ctp.c_void_p,\n ], # data\n restype=ctp.c_void_p,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n mode_int = self._parse_constant(\n mode,\n valid=MODES,\n valid_modifiers=[\"GMT_GRID_IS_CARTESIAN\", \"GMT_GRID_IS_GEO\"],\n )\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n registration_int = self._parse_constant(\n kwargs.get(\"registration\", \"GMT_GRID_NODE_REG\"), valid=REGISTRATIONS\n )\n\n # Convert dim, ranges, and inc to ctypes arrays if given (will be None\n # if not given to represent NULL pointers)\n dim = kwargs_to_ctypes_array(\"dim\", kwargs, ctp.c_uint64 * 4)\n ranges = kwargs_to_ctypes_array(\"ranges\", kwargs, ctp.c_double * 4)\n inc = kwargs_to_ctypes_array(\"inc\", kwargs, ctp.c_double * 2)\n\n # Use a NULL pointer (None) for existing data to indicate that the\n # container should be created empty. Fill it in later using put_vector\n # and put_matrix.\n data_ptr = c_create_data(\n self.session_pointer,\n family_int,\n geometry_int,\n mode_int,\n dim,\n ranges,\n inc,\n registration_int,\n self._parse_pad(family, kwargs),\n None,\n )\n\n if data_ptr is None:\n raise GMTCLibError(\"Failed to create an empty GMT data pointer.\")\n\n return data_ptr\n\n def _parse_pad(self, family, kwargs):\n \"\"\"\n Parse and return an appropriate value for pad if none is given.\n\n Pad is a bit tricky because, for matrix types, pad control the matrix\n ordering (row or column major). Using the default pad will set it to\n column major and mess things up with the numpy arrays.\n \"\"\"\n pad = kwargs.get(\"pad\", None)\n if pad is None:\n if \"MATRIX\" in family:\n pad = 0\n else:\n pad = self[\"GMT_PAD_DEFAULT\"]\n return pad\n\n def _parse_constant(self, constant, valid, valid_modifiers=None):\n \"\"\"\n Parse a constant, convert it to an int, and validate it.\n\n The GMT C API takes certain defined constants, like ``'GMT_IS_GRID'``,\n that need to be validated and converted to integer values using\n :meth:`pygmt.clib.Session.__getitem__`.\n\n The constants can also take a modifier by appending another constant\n name, e.g. ``'GMT_IS_GRID|GMT_VIA_MATRIX'``. The two parts must be\n converted separately and their values are added.\n\n If valid modifiers are not given, then will assume that modifiers are\n not allowed. In this case, will raise a\n :class:`pygmt.exceptions.GMTInvalidInput` exception if given a\n modifier.\n\n Parameters\n ----------\n constant : str\n The name of a valid GMT API constant, with an optional modifier.\n valid : list of str\n A list of valid values for the constant. Will raise a\n :class:`pygmt.exceptions.GMTInvalidInput` exception if the given\n value is not on the list.\n \"\"\"\n parts = constant.split(\"|\")\n name = parts[0]\n nmodifiers = len(parts) - 1\n if nmodifiers > 1:\n raise GMTInvalidInput(\n f\"Only one modifier is allowed in constants, {nmodifiers} given: '{constant}'\"\n )\n if nmodifiers > 0 and valid_modifiers is None:\n raise GMTInvalidInput(\n \"Constant modifiers not allowed since valid values were not \"\n + f\"given: '{constant}'\"\n )\n if name not in valid:\n raise GMTInvalidInput(\n f\"Invalid constant argument '{name}'. Must be one of {str(valid)}.\"\n )\n if (\n nmodifiers > 0\n and valid_modifiers is not None\n and parts[1] not in valid_modifiers\n ):\n raise GMTInvalidInput(\n f\"Invalid constant modifier '{parts[1]}'. Must be one of {str(valid_modifiers)}.\"\n )\n integer_value = sum(self[part] for part in parts)\n return integer_value\n\n def _check_dtype_and_dim(self, array, ndim):\n \"\"\"\n Check that a numpy array has the given dimensions and is a valid data\n type.\n\n Parameters\n ----------\n array : numpy array\n The array to be tested.\n ndim : int\n The desired dimension of the array.\n\n Returns\n -------\n gmt_type : int\n The GMT constant value representing this data type.\n\n Raises\n ------\n GMTCLibError\n If the array has the wrong dimensions or is an unsupported data\n type.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> data = np.array([1, 2, 3], dtype=\"float64\")\n >>> with Session() as ses:\n ... gmttype = ses._check_dtype_and_dim(data, ndim=1)\n ... gmttype == ses[\"GMT_DOUBLE\"]\n ...\n True\n >>> data = np.ones((5, 2), dtype=\"float32\")\n >>> with Session() as ses:\n ... gmttype = ses._check_dtype_and_dim(data, ndim=2)\n ... gmttype == ses[\"GMT_FLOAT\"]\n ...\n True\n \"\"\"\n # check the array has the given dimension\n if array.ndim != ndim:\n raise GMTInvalidInput(f\"Expected a numpy 1d array, got {array.ndim}d.\")\n\n # check the array has a valid/known data type\n if array.dtype.type not in DTYPES:\n try:\n # Try to convert any unknown numpy data types to np.datetime64\n array = np.asarray(array, dtype=np.datetime64)\n except ValueError as e:\n raise GMTInvalidInput(\n f\"Unsupported numpy data type '{array.dtype.type}'.\"\n ) from e\n return self[DTYPES[array.dtype.type]]\n\n def put_vector(self, dataset, column, vector):\n r\"\"\"\n Attach a numpy 1D array as a column on a GMT dataset.\n\n Use this function to attach numpy array data to a GMT dataset and pass\n it to GMT modules. Wraps ``GMT_Put_Vector``.\n\n The dataset must be created by :meth:`pygmt.clib.Session.create_data`\n first. Use ``family='GMT_IS_DATASET|GMT_VIA_VECTOR'``.\n\n Not at all numpy dtypes are supported, only: float64, float32, int64,\n int32, uint64, uint32, datetime64 and str\\_.\n\n .. warning::\n The numpy array must be C contiguous in memory. If it comes from a\n column slice of a 2d array, for example, you will have to make a\n copy. Use :func:`numpy.ascontiguousarray` to make sure your vector\n is contiguous (it won't copy if it already is).\n\n Parameters\n ----------\n dataset : :class:`ctypes.c_void_p`\n The ctypes void pointer to a ``GMT_Dataset``. Create it with\n :meth:`pygmt.clib.Session.create_data`.\n column : int\n The column number of this vector in the dataset (starting from 0).\n vector : numpy 1d-array\n The array that will be attached to the dataset. Must be a 1d C\n contiguous array.\n\n Raises\n ------\n GMTCLibError\n If given invalid input or ``GMT_Put_Vector`` exits with status !=\n 0.\n \"\"\"\n c_put_vector = self.get_libgmt_func(\n \"GMT_Put_Vector\",\n argtypes=[ctp.c_void_p, ctp.c_void_p, ctp.c_uint, ctp.c_uint, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n gmt_type = self._check_dtype_and_dim(vector, ndim=1)\n if gmt_type in (self[\"GMT_TEXT\"], self[\"GMT_DATETIME\"]):\n vector_pointer = (ctp.c_char_p * len(vector))()\n if gmt_type == self[\"GMT_DATETIME\"]:\n vector_pointer[:] = np.char.encode(\n np.datetime_as_string(array_to_datetime(vector))\n )\n else:\n vector_pointer[:] = np.char.encode(vector)\n else:\n vector_pointer = vector.ctypes.data_as(ctp.c_void_p)\n status = c_put_vector(\n self.session_pointer, dataset, column, gmt_type, vector_pointer\n )\n if status != 0:\n raise GMTCLibError(\n (\n f\"Failed to put vector of type {vector.dtype} \"\n f\"in column {column} of dataset.\"\n )\n )\n\n def put_strings(self, dataset, family, strings):\n \"\"\"\n Attach a numpy 1D array of dtype str as a column on a GMT dataset.\n\n Use this function to attach string type numpy array data to a GMT\n dataset and pass it to GMT modules. Wraps ``GMT_Put_Strings``.\n\n The dataset must be created by :meth:`pygmt.clib.Session.create_data`\n first.\n\n .. warning::\n The numpy array must be C contiguous in memory. If it comes from a\n column slice of a 2d array, for example, you will have to make a\n copy. Use :func:`numpy.ascontiguousarray` to make sure your vector\n is contiguous (it won't copy if it already is).\n\n Parameters\n ----------\n dataset : :class:`ctypes.c_void_p`\n The ctypes void pointer to a ``GMT_Dataset``. Create it with\n :meth:`pygmt.clib.Session.create_data`.\n family : str\n The family type of the dataset. Can be either ``GMT_IS_VECTOR`` or\n ``GMT_IS_MATRIX``.\n strings : numpy 1d-array\n The array that will be attached to the dataset. Must be a 1d C\n contiguous array.\n\n Raises\n ------\n GMTCLibError\n If given invalid input or ``GMT_Put_Strings`` exits with status !=\n 0.\n \"\"\"\n c_put_strings = self.get_libgmt_func(\n \"GMT_Put_Strings\",\n argtypes=[\n ctp.c_void_p,\n ctp.c_uint,\n ctp.c_void_p,\n ctp.POINTER(ctp.c_char_p),\n ],\n restype=ctp.c_int,\n )\n\n strings_pointer = (ctp.c_char_p * len(strings))()\n strings_pointer[:] = np.char.encode(strings)\n\n family_int = self._parse_constant(\n family, valid=FAMILIES, valid_modifiers=METHODS\n )\n\n status = c_put_strings(\n self.session_pointer, family_int, dataset, strings_pointer\n )\n if status != 0:\n raise GMTCLibError(\n f\"Failed to put strings of type {strings.dtype} into dataset\"\n )\n\n def put_matrix(self, dataset, matrix, pad=0):\n \"\"\"\n Attach a numpy 2D array to a GMT dataset.\n\n Use this function to attach numpy array data to a GMT dataset and pass\n it to GMT modules. Wraps ``GMT_Put_Matrix``.\n\n The dataset must be created by :meth:`pygmt.clib.Session.create_data`\n first. Use ``|GMT_VIA_MATRIX'`` in the family.\n\n Not at all numpy dtypes are supported, only: float64, float32, int64,\n int32, uint64, and uint32.\n\n .. warning::\n The numpy array must be C contiguous in memory. Use\n :func:`numpy.ascontiguousarray` to make sure your vector is\n contiguous (it won't copy if it already is).\n\n Parameters\n ----------\n dataset : :class:`ctypes.c_void_p`\n The ctypes void pointer to a ``GMT_Dataset``. Create it with\n :meth:`pygmt.clib.Session.create_data`.\n matrix : numpy 2d-array\n The array that will be attached to the dataset. Must be a 2d C\n contiguous array.\n pad : int\n The amount of padding that should be added to the matrix. Use when\n creating grids for modules that require padding.\n\n Raises\n ------\n GMTCLibError\n If given invalid input or ``GMT_Put_Matrix`` exits with status !=\n 0.\n \"\"\"\n c_put_matrix = self.get_libgmt_func(\n \"GMT_Put_Matrix\",\n argtypes=[ctp.c_void_p, ctp.c_void_p, ctp.c_uint, ctp.c_int, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n gmt_type = self._check_dtype_and_dim(matrix, ndim=2)\n matrix_pointer = matrix.ctypes.data_as(ctp.c_void_p)\n status = c_put_matrix(\n self.session_pointer, dataset, gmt_type, pad, matrix_pointer\n )\n if status != 0:\n raise GMTCLibError(f\"Failed to put matrix of type {matrix.dtype}.\")\n\n def write_data(self, family, geometry, mode, wesn, output, data):\n \"\"\"\n Write a GMT data container to a file.\n\n The data container should be created by\n :meth:`pygmt.clib.Session.create_data`.\n\n Wraps ``GMT_Write_Data`` but only allows writing to a file. So the\n ``method`` argument is omitted.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). See the\n ``FAMILIES`` attribute for valid names. Don't use the\n ``GMT_VIA_VECTOR`` or ``GMT_VIA_MATRIX`` constructs for this. Use\n ``GMT_IS_VECTOR`` and ``GMT_IS_MATRIX`` instead.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). See the\n ``GEOMETRIES`` attribute for valid names.\n mode : str\n How the data is to be written to the file. This option varies\n depending on the given family. See the GMT API documentation for\n details.\n wesn : list or numpy array\n [xmin, xmax, ymin, ymax, zmin, zmax] of the data. Must have 6\n elements.\n output : str\n The output file name.\n data : :class:`ctypes.c_void_p`\n Pointer to the data container created by\n :meth:`pygmt.clib.Session.create_data`.\n\n Raises\n ------\n GMTCLibError\n For invalid input arguments or if the GMT API functions returns a\n non-zero status code.\n \"\"\"\n c_write_data = self.get_libgmt_func(\n \"GMT_Write_Data\",\n argtypes=[\n ctp.c_void_p,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.POINTER(ctp.c_double),\n ctp.c_char_p,\n ctp.c_void_p,\n ],\n restype=ctp.c_int,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n status = c_write_data(\n self.session_pointer,\n family_int,\n self[\"GMT_IS_FILE\"],\n geometry_int,\n self[mode],\n (ctp.c_double * 6)(*wesn),\n output.encode(),\n data,\n )\n if status != 0:\n raise GMTCLibError(f\"Failed to write dataset to '{output}'\")\n\n @contextmanager\n def open_virtual_file(self, family, geometry, direction, data):\n \"\"\"\n Open a GMT Virtual File to pass data to and from a module.\n\n GMT uses a virtual file scheme to pass in data to API modules. Use it\n to pass in your GMT data structure (created using\n :meth:`pygmt.clib.Session.create_data`) to a module that expects an\n input or output file.\n\n Use in a ``with`` block. Will automatically close the virtual file when\n leaving the ``with`` block. Because of this, no wrapper for\n ``GMT_Close_VirtualFile`` is provided.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). Should\n be the same as the one you used to create your data structure.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). Should\n be the same as the one you used to create your data structure.\n direction : str\n Either ``'GMT_IN'`` or ``'GMT_OUT'`` to indicate if passing data to\n GMT or getting it out of GMT, respectively.\n By default, GMT can modify the data you pass in. Add modifier\n ``'GMT_IS_REFERENCE'`` to tell GMT the data are read-only, or\n ``'GMT_IS_DUPLICATE'`` to tell GMT to duplicate the data.\n data : int\n The ctypes void pointer to your GMT data structure.\n\n Yields\n ------\n vfname : str\n The name of the virtual file that you can pass to a GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import os\n >>> import numpy as np\n >>> x = np.array([0, 1, 2, 3, 4])\n >>> y = np.array([5, 6, 7, 8, 9])\n >>> with Session() as lib:\n ... family = \"GMT_IS_DATASET|GMT_VIA_VECTOR\"\n ... geometry = \"GMT_IS_POINT\"\n ... dataset = lib.create_data(\n ... family=family,\n ... geometry=geometry,\n ... mode=\"GMT_CONTAINER_ONLY\",\n ... dim=[2, 5, 1, 0], # columns, lines, segments, type\n ... )\n ... lib.put_vector(dataset, column=0, vector=x)\n ... lib.put_vector(dataset, column=1, vector=y)\n ... # Add the dataset to a virtual file\n ... vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset)\n ... with lib.open_virtual_file(*vfargs) as vfile:\n ... # Send the output to a temp file so that we can read it\n ... with GMTTempFile() as ofile:\n ... args = f\"{vfile} ->{ofile.name}\"\n ... lib.call_module(\"info\", args)\n ... print(ofile.read().strip())\n ...\n <vector memory>: N = 5 <0/4> <5/9>\n \"\"\"\n c_open_virtualfile = self.get_libgmt_func(\n \"GMT_Open_VirtualFile\",\n argtypes=[\n ctp.c_void_p,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_void_p,\n ctp.c_char_p,\n ],\n restype=ctp.c_int,\n )\n\n c_close_virtualfile = self.get_libgmt_func(\n \"GMT_Close_VirtualFile\",\n argtypes=[ctp.c_void_p, ctp.c_char_p],\n restype=ctp.c_int,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n direction_int = self._parse_constant(\n direction, valid=[\"GMT_IN\", \"GMT_OUT\"], valid_modifiers=METHODS\n )\n\n buff = ctp.create_string_buffer(self[\"GMT_VF_LEN\"])\n\n status = c_open_virtualfile(\n self.session_pointer, family_int, geometry_int, direction_int, data, buff\n )\n\n if status != 0:\n raise GMTCLibError(\"Failed to create a virtual file.\")\n\n vfname = buff.value.decode()\n\n try:\n yield vfname\n finally:\n status = c_close_virtualfile(self.session_pointer, vfname.encode())\n if status != 0:\n raise GMTCLibError(f\"Failed to close virtual file '{vfname}'.\")\n\n @contextmanager\n def virtualfile_from_vectors(self, *vectors):\n \"\"\"\n Store 1d arrays as columns of a table inside a virtual file.\n\n Use the virtual file name to pass in the data in your vectors to a GMT\n module.\n\n Context manager (use in a ``with`` block). Yields the virtual file name\n that you can pass as an argument to a GMT module call. Closes the\n virtual file upon exit of the ``with`` block.\n\n Use this instead of creating the data container and virtual file by\n hand with :meth:`pygmt.clib.Session.create_data`,\n :meth:`pygmt.clib.Session.put_vector`, and\n :meth:`pygmt.clib.Session.open_virtual_file`.\n\n If the arrays are C contiguous blocks of memory, they will be passed\n without copying to GMT. If they are not (e.g., they are columns of a 2D\n array), they will need to be copied to a contiguous block.\n\n Parameters\n ----------\n vectors : 1d arrays\n The vectors that will be included in the array. All must be of the\n same size.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a\n GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import numpy as np\n >>> import pandas as pd\n >>> x = [1, 2, 3]\n >>> y = np.array([4, 5, 6])\n >>> z = pd.Series([7, 8, 9])\n >>> with Session() as ses:\n ... with ses.virtualfile_from_vectors(x, y, z) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... ses.call_module(\"info\", f\"{fin} ->{fout.name}\")\n ... print(fout.read().strip())\n ...\n <vector memory>: N = 3 <1/3> <4/6> <7/9>\n \"\"\"\n # Conversion to a C-contiguous array needs to be done here and not in\n # put_vector or put_strings because we need to maintain a reference to\n # the copy while it is being used by the C API. Otherwise, the array\n # would be garbage collected and the memory freed. Creating it in this\n # context manager guarantees that the copy will be around until the\n # virtual file is closed. The conversion is implicit in\n # vectors_to_arrays.\n arrays = vectors_to_arrays(vectors)\n\n columns = len(arrays)\n # Find arrays that are of string dtype from column 3 onwards\n # Assumes that first 2 columns contains coordinates like longitude\n # latitude, or datetime string types.\n for col, array in enumerate(arrays[2:]):\n if pd.api.types.is_string_dtype(array.dtype):\n columns = col + 2\n break\n\n rows = len(arrays[0])\n if not all(len(i) == rows for i in arrays):\n raise GMTInvalidInput(\"All arrays must have same size.\")\n\n family = \"GMT_IS_DATASET|GMT_VIA_VECTOR\"\n geometry = \"GMT_IS_POINT\"\n\n dataset = self.create_data(\n family, geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[columns, rows, 1, 0]\n )\n\n # Use put_vector for columns with numerical type data\n for col, array in enumerate(arrays[:columns]):\n self.put_vector(dataset, column=col, vector=array)\n\n # Use put_strings for last column(s) with string type data\n # Have to use modifier \"GMT_IS_DUPLICATE\" to duplicate the strings\n string_arrays = arrays[columns:]\n if string_arrays:\n if len(string_arrays) == 1:\n strings = string_arrays[0]\n elif len(string_arrays) > 1:\n strings = np.apply_along_axis(\n func1d=\" \".join, axis=0, arr=string_arrays\n )\n strings = np.asanyarray(a=strings, dtype=str)\n self.put_strings(\n dataset, family=\"GMT_IS_VECTOR|GMT_IS_DUPLICATE\", strings=strings\n )\n\n with self.open_virtual_file(\n family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset\n ) as vfile:\n yield vfile\n\n @contextmanager\n def virtualfile_from_matrix(self, matrix):\n \"\"\"\n Store a 2d array as a table inside a virtual file.\n\n Use the virtual file name to pass in the data in your matrix to a GMT\n module.\n\n Context manager (use in a ``with`` block). Yields the virtual file name\n that you can pass as an argument to a GMT module call. Closes the\n virtual file upon exit of the ``with`` block.\n\n The virtual file will contain the array as a ``GMT_MATRIX`` pretending\n to be a ``GMT_DATASET``.\n\n **Not meant for creating ``GMT_GRID``**. The grid requires more\n metadata than just the data matrix. Use\n :meth:`pygmt.clib.Session.virtualfile_from_grid` instead.\n\n Use this instead of creating the data container and virtual file by\n hand with :meth:`pygmt.clib.Session.create_data`,\n :meth:`pygmt.clib.Session.put_matrix`, and\n :meth:`pygmt.clib.Session.open_virtual_file`\n\n The matrix must be C contiguous in memory. If it is not (e.g., it is a\n slice of a larger array), the array will be copied to make sure it is.\n\n Parameters\n ----------\n matrix : 2d array\n The matrix that will be included in the GMT data container.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a\n GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import numpy as np\n >>> data = np.arange(12).reshape((4, 3))\n >>> print(data)\n [[ 0 1 2]\n [ 3 4 5]\n [ 6 7 8]\n [ 9 10 11]]\n >>> with Session() as ses:\n ... with ses.virtualfile_from_matrix(data) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... ses.call_module(\"info\", f\"{fin} ->{fout.name}\")\n ... print(fout.read().strip())\n ...\n <matrix memory>: N = 4 <0/9> <1/10> <2/11>\n \"\"\"\n # Conversion to a C-contiguous array needs to be done here and not in\n # put_matrix because we need to maintain a reference to the copy while\n # it is being used by the C API. Otherwise, the array would be garbage\n # collected and the memory freed. Creating it in this context manager\n # guarantees that the copy will be around until the virtual file is\n # closed.\n matrix = as_c_contiguous(matrix)\n rows, columns = matrix.shape\n\n family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\"\n geometry = \"GMT_IS_POINT\"\n\n dataset = self.create_data(\n family, geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[columns, rows, 1, 0]\n )\n\n self.put_matrix(dataset, matrix)\n\n with self.open_virtual_file(\n family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset\n ) as vfile:\n yield vfile\n\n @contextmanager\n def virtualfile_from_grid(self, grid):\n \"\"\"\n Store a grid in a virtual file.\n\n Use the virtual file name to pass in the data in your grid to a GMT\n module. Grids must be :class:`xarray.DataArray` instances.\n\n Context manager (use in a ``with`` block). Yields the virtual file name\n that you can pass as an argument to a GMT module call. Closes the\n virtual file upon exit of the ``with`` block.\n\n The virtual file will contain the grid as a ``GMT_MATRIX`` with extra\n metadata.\n\n Use this instead of creating a data container and virtual file by hand\n with :meth:`pygmt.clib.Session.create_data`,\n :meth:`pygmt.clib.Session.put_matrix`, and\n :meth:`pygmt.clib.Session.open_virtual_file`\n\n The grid data matrix must be C contiguous in memory. If it is not\n (e.g., it is a slice of a larger array), the array will be copied to\n make sure it is.\n\n Parameters\n ----------\n grid : :class:`xarray.DataArray`\n The grid that will be included in the virtual file.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a\n GMT module.\n\n Examples\n --------\n\n >>> from pygmt.datasets import load_earth_relief\n >>> from pygmt.helpers import GMTTempFile\n >>> data = load_earth_relief(resolution=\"01d\")\n >>> print(data.shape)\n (180, 360)\n >>> print(data.lon.values.min(), data.lon.values.max())\n -179.5 179.5\n >>> print(data.lat.values.min(), data.lat.values.max())\n -89.5 89.5\n >>> print(data.values.min(), data.values.max())\n -8182.0 5651.5\n >>> with Session() as ses:\n ... with ses.virtualfile_from_grid(data) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... args = f\"{fin} -L0 -Cn ->{fout.name}\"\n ... ses.call_module(\"grdinfo\", args)\n ... print(fout.read().strip())\n ...\n -180 180 -90 90 -8182 5651.5 1 1 360 180 1 1\n >>> # The output is: w e s n z0 z1 dx dy n_columns n_rows reg gtype\n \"\"\"\n _gtype = {0: \"GMT_GRID_IS_CARTESIAN\", 1: \"GMT_GRID_IS_GEO\"}[grid.gmt.gtype]\n _reg = {0: \"GMT_GRID_NODE_REG\", 1: \"GMT_GRID_PIXEL_REG\"}[grid.gmt.registration]\n\n # Conversion to a C-contiguous array needs to be done here and not in\n # put_matrix because we need to maintain a reference to the copy while\n # it is being used by the C API. Otherwise, the array would be garbage\n # collected and the memory freed. Creating it in this context manager\n # guarantees that the copy will be around until the virtual file is\n # closed. The conversion is implicit in dataarray_to_matrix.\n matrix, region, inc = dataarray_to_matrix(grid)\n\n family = \"GMT_IS_GRID|GMT_VIA_MATRIX\"\n geometry = \"GMT_IS_SURFACE\"\n gmt_grid = self.create_data(\n family,\n geometry,\n mode=f\"GMT_CONTAINER_ONLY|{_gtype}\",\n ranges=region,\n inc=inc,\n registration=_reg,\n )\n self.put_matrix(gmt_grid, matrix)\n args = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", gmt_grid)\n with self.open_virtual_file(*args) as vfile:\n yield vfile\n\n @fmt_docstring\n def virtualfile_from_data(\n self,\n check_kind=None,\n data=None,\n x=None,\n y=None,\n z=None,\n extra_arrays=None,\n required_z=False,\n ):\n \"\"\"\n Store any data inside a virtual file.\n\n This convenience function automatically detects the kind of data passed\n into it, and produces a virtualfile that can be passed into GMT later\n on.\n\n Parameters\n ----------\n check_kind : str\n Used to validate the type of data that can be passed in. Choose\n from 'raster', 'vector' or None. Default is None (no validation).\n data : str or pathlib.Path or xarray.DataArray or {table-like} or None\n Any raster or vector data format. This could be a file name or\n path, a raster grid, a vector matrix/arrays, or other supported\n data input.\n x/y/z : 1d arrays or None\n x, y and z columns as numpy arrays.\n extra_arrays : list of 1d arrays\n Optional. A list of numpy arrays in addition to x, y and z. All\n of these arrays must be of the same size as the x/y/z arrays.\n required_z : bool\n State whether the 'z' column is required.\n\n Returns\n -------\n file_context : contextlib._GeneratorContextManager\n The virtual file stored inside a context manager. Access the file\n name of this virtualfile using ``with file_context as fname: ...``.\n\n Examples\n --------\n >>> from pygmt.helpers import GMTTempFile\n >>> import xarray as xr\n >>> data = xr.Dataset(\n ... coords=dict(index=[0, 1, 2]),\n ... data_vars=dict(\n ... x=(\"index\", [9, 8, 7]),\n ... y=(\"index\", [6, 5, 4]),\n ... z=(\"index\", [3, 2, 1]),\n ... ),\n ... )\n >>> with Session() as ses:\n ... with ses.virtualfile_from_data(\n ... check_kind=\"vector\", data=data\n ... ) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... ses.call_module(\"info\", fin + \" ->\" + fout.name)\n ... print(fout.read().strip())\n ...\n <vector memory>: N = 3 <7/9> <4/6> <1/3>\n \"\"\"\n kind = data_kind(data, x, y, z, required_z=required_z)\n\n if check_kind == \"raster\" and kind not in (\"file\", \"grid\"):\n raise GMTInvalidInput(f\"Unrecognized data type for grid: {type(data)}\")\n if check_kind == \"vector\" and kind not in (\n \"file\",\n \"matrix\",\n \"vectors\",\n \"geojson\",\n ):\n raise GMTInvalidInput(f\"Unrecognized data type for vector: {type(data)}\")\n\n # Decide which virtualfile_from_ function to use\n _virtualfile_from = {\n \"file\": dummy_context,\n \"geojson\": tempfile_from_geojson,\n \"grid\": self.virtualfile_from_grid,\n # Note: virtualfile_from_matrix is not used because a matrix can be\n # converted to vectors instead, and using vectors allows for better\n # handling of string type inputs (e.g. for datetime data types)\n \"matrix\": self.virtualfile_from_vectors,\n \"vectors\": self.virtualfile_from_vectors,\n }[kind]\n\n # Ensure the data is an iterable (Python list or tuple)\n if kind in (\"geojson\", \"grid\"):\n _data = (data,)\n elif kind == \"file\":\n # Useful to handle `pathlib.Path` and string file path alike\n _data = (str(data),)\n elif kind == \"vectors\":\n _data = [np.atleast_1d(x), np.atleast_1d(y)]\n if z is not None:\n _data.append(np.atleast_1d(z))\n if extra_arrays:\n _data.extend(extra_arrays)\n elif kind == \"matrix\": # turn 2D arrays into list of vectors\n try:\n # pandas.Series will be handled below like a 1d numpy ndarray\n assert not hasattr(data, \"to_frame\")\n # pandas.DataFrame and xarray.Dataset types\n _data = [array for _, array in data.items()]\n except (AttributeError, AssertionError):\n try:\n # Just use virtualfile_from_matrix for 2D numpy.ndarray\n # which are signed integer (i), unsigned integer (u) or\n # floating point (f) types\n assert data.ndim == 2 and data.dtype.kind in \"iuf\"\n _virtualfile_from = self.virtualfile_from_matrix\n _data = (data,)\n except (AssertionError, AttributeError):\n # Python list, tuple, numpy ndarray and pandas.Series types\n _data = np.atleast_2d(np.asanyarray(data).T)\n\n # Finally create the virtualfile from the data, to be passed into GMT\n file_context = _virtualfile_from(*_data)\n\n return file_context\n\n def extract_region(self):\n \"\"\"\n Extract the WESN bounding box of the currently active figure.\n\n Retrieves the information from the PostScript file, so it works for\n country codes as well.\n\n Returns\n -------\n * wesn : 1d array\n A 1D numpy array with the west, east, south, and north dimensions\n of the current figure.\n\n Examples\n --------\n\n >>> import pygmt\n >>> fig = pygmt.Figure()\n >>> fig.coast(\n ... region=[0, 10, -20, -10],\n ... projection=\"M6i\",\n ... frame=True,\n ... land=\"black\",\n ... )\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n ...\n >>> print(\", \".join([f\"{x:.2f}\" for x in wesn]))\n 0.00, 10.00, -20.00, -10.00\n\n Using ISO country codes for the regions (for example ``'US.HI'`` for\n Hawaii):\n\n >>> fig = pygmt.Figure()\n >>> fig.coast(\n ... region=\"US.HI\", projection=\"M6i\", frame=True, land=\"black\"\n ... )\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n ...\n >>> print(\", \".join([f\"{x:.2f}\" for x in wesn]))\n -164.71, -154.81, 18.91, 23.58\n\n The country codes can have an extra argument that rounds the region a\n multiple of the argument (for example, ``'US.HI+r5'`` will round the\n region to multiples of 5):\n\n >>> fig = pygmt.Figure()\n >>> fig.coast(\n ... region=\"US.HI+r5\", projection=\"M6i\", frame=True, land=\"black\"\n ... )\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n ...\n >>> print(\", \".join([f\"{x:.2f}\" for x in wesn]))\n -165.00, -150.00, 15.00, 25.00\n \"\"\"\n c_extract_region = self.get_libgmt_func(\n \"GMT_Extract_Region\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.POINTER(ctp.c_double)],\n restype=ctp.c_int,\n )\n\n wesn = np.empty(4, dtype=np.float64)\n wesn_pointer = wesn.ctypes.data_as(ctp.POINTER(ctp.c_double))\n # The second argument to GMT_Extract_Region is a file pointer to a\n # PostScript file. It's only valid in classic mode. Use None to get a\n # NULL pointer instead.\n status = c_extract_region(self.session_pointer, None, wesn_pointer)\n if status != 0:\n raise GMTCLibError(\"Failed to extract region from current figure.\")\n return wesn\n" ]
[ [ "numpy.empty", "numpy.asarray", "numpy.apply_along_axis", "numpy.atleast_1d", "pandas.api.types.is_string_dtype", "numpy.char.encode", "numpy.asanyarray" ] ]
NemesiP/volatiltiy-forecasting
[ "a65899d70e64b3a884c96214b0f337e88c918169" ]
[ "Examples/Garch_example.py" ]
[ "import numpy as np\nfrom scipy.optimize import minimize\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('C:/Users/peter/Desktop/volatility-forecasting/data/Stocks/AMD.csv')\ndf = df.iloc[-1500:, :]\ndf['Chg'] = np.log(df.close).diff().fillna(0)\nreturns = df.Chg[1:].values\ndf['Date'] = pd.to_datetime(df.iloc[:, 0])\n\n\ndef garch_filter(alpha0, alpha1, beta1, omega, eps):\n iT = len(eps)\n sigma_2 = np.zeros(iT)\n \n for i in range(iT):\n if i == 0:\n sigma_2[i] = alpha0/(1 - alpha1 - beta1)\n else:\n sigma_2[i] = alpha0 + alpha1*eps[i - 1]**2 + beta1*sigma_2[i - 1] + omega * eps[i - 1]**2 * (eps[i - 1] < 0)\n \n return sigma_2\n\ndef garch_loglike(vP, eps):\n alpha0 = vP[0]\n alpha1 = vP[1]\n beta1 = vP[2]\n omega = vP[3]\n \n sigma_2 = garch_filter(alpha0, alpha1, beta1, omega, eps)\n \n logL = -np.sum(-np.log(sigma_2) - eps**2/sigma_2)\n \n return logL\n\ncons = ({'type': 'ineq', 'func': lambda x: np.array(x)})\nvP0 = (0.1, 0.05, 0.92, 0.2)\n\nres = minimize(garch_loglike, vP0, args = (returns), \n bounds = ((0.0001, None), (0.0001, None), (0.0001, None), (0.0001, None)), \n options = {'disp': True})\n\nalpha0_est = res.x[0]\nalpha1_est = res.x[1]\nbeta1_est = res.x[2]\nomega_est = res.x[3]\nsigma2 = garch_filter(alpha0_est, alpha1_est, beta1_est, omega_est, returns)\n\nplt.plot(df.Date[1:], sigma2, label = 'GJR-GARCH')\nplt.legend(loc = 'best')\nplt.show()\n\ndef garch_filter2(alpha0, alpha1, beta1, eps):\n iT = len(eps)\n sigma_2 = np.zeros(iT)\n \n for i in range(iT):\n if i == 0:\n sigma_2[i] = alpha0/(1 - alpha1 - beta1)\n else:\n sigma_2[i] = alpha0 + alpha1*eps[i - 1]**2 + beta1*sigma_2[i - 1]\n \n return sigma_2\n\ndef garch_loglike2(vP, eps):\n alpha0 = vP[0]\n alpha1 = vP[1]\n beta1 = vP[2]\n \n sigma_2 = garch_filter2(alpha0, alpha1, beta1, eps)\n \n logL = -np.sum(-np.log(sigma_2) - eps**2/sigma_2)\n \n return logL\n\ncons = ({'type': 'ineq', 'func': lambda x: np.array(x)})\nvP0 = (0.1, 0.05, 0.92)\n\nres2 = minimize(garch_loglike2, vP0, args = (returns), \n bounds = ((0.0001, None), (0.0001, None), (0.0001, None)), \n options = {'disp': True})\n\nalpha0_est2 = res2.x[0]\nalpha1_est2 = res2.x[1]\nbeta1_est2 = res2.x[2]\nsigma22 = garch_filter2(alpha0_est2, alpha1_est2, beta1_est2, returns)\n\nplt.plot(df.Date[1:], sigma22, label = 'GARCH')\nplt.legend(loc = 'best')\nplt.show()\n\nplt.plot(df.Date[1:], sigma22, label = 'GARCH')\nplt.plot(df.Date[1:], sigma2, label = 'GJR-GARCH')\nplt.legend(loc = 'best')\nplt.show()\n\nplt.scatter(sigma2, sigma22)\nplt.show()" ]
[ [ "pandas.to_datetime", "numpy.array", "numpy.zeros", "numpy.log", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "matplotlib.pyplot.show", "pandas.read_csv", "scipy.optimize.minimize" ] ]
omerholtzman/IML.HUJI
[ "2ac59cbd8cf27851af890f122a9ca87894bf8fdd" ]
[ "exercises/classifiers_evaluation.py" ]
[ "from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes\nfrom IMLearn.learners.classifiers.perceptron import default_callback\nimport numpy as np\nfrom typing import Tuple\nimport plotly.graph_objects as go\nimport plotly.io as pio\nimport plotly.express as px\nimport pandas as pd\nfrom plotly.subplots import make_subplots\n\nimport matplotlib.pyplot as plt\npio.templates.default = \"simple_white\"\n\n\ndef load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an\n ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class\n\n Parameters\n ----------\n filename: str\n Path to .npy data file\n\n Returns\n -------\n X: ndarray of shape (n_samples, 2)\n Design matrix to be used\n\n y: ndarray of shape (n_samples,)\n Class vector specifying for each sample its class\n\n \"\"\"\n data = np.load(filename)\n X = data[:, [0, 1]]\n Y = data[: , [2]]\n return X, Y\n\ndef run_perceptron():\n \"\"\"\n Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets\n\n Create a line plot that shows the perceptron algorithm's training loss values (y-axis)\n as a function of the training iterations (x-axis).\n \"\"\"\n for n, f in [(\"Linearly Separable\", \"linearly_separable.npy\"), (\"Linearly Inseparable\", \"linearly_inseparable.npy\")]:\n # Load dataset\n X, Y = load_dataset(\"../datasets/\" + f)\n\n # Fit Perceptron and record loss in each fit iteration\n losses = []\n def our_callback(fit: Perceptron, x: np.ndarray, y: int):\n losses.append(fit._loss(X, Y))\n perceptron = Perceptron(callback=our_callback)\n\n perceptron._fit(X, Y)\n\n # Plot figure\n plt.plot(list(range(1, len(losses) + 1)), losses)\n plt.xlabel(\"Iteration number\")\n plt.ylabel(\"Misclassification Loss\")\n plt.title(n)\n plt.show()\n\ndef compare_gaussian_classifiers():\n \"\"\"\n Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets\n \"\"\"\n for f in [\"gaussian1.npy\", \"gaussian2.npy\"]:\n # Load dataset\n X, Y = load_dataset(\"../datasets/\" + f)\n\n # Fit models and predict over training set\n\n from math import atan2, pi\n def get_ellipse(mu: np.ndarray, cov: np.ndarray):\n \"\"\"\n Draw an ellipse centered at given location and according to specified covariance matrix\n Parameters\n ----------\n mu : ndarray of shape (2,)\n Center of ellipse\n cov: ndarray of shape (2,2)\n Covariance of Gaussian\n Returns\n -------\n scatter: A plotly trace object of the ellipse\n \"\"\"\n l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])\n theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)\n t = np.linspace(0, 2 * pi, 100)\n xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))\n ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))\n\n return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode=\"lines\", marker_color=\"black\")\n\n gaussian = GaussianNaiveBayes()\n lda = LDA()\n gaussian._fit(X, Y)\n lda._fit(X, Y)\n\n GNB_y_pred = gaussian._predict(X)\n LDA_y_pred = lda.predict(X)\n\n from IMLearn.metrics import accuracy\n GNB_pred_accuracy = accuracy(GNB_y_pred, Y)\n LDA_pred_accuracy = accuracy(LDA_y_pred, Y)\n\n # Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions\n # on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy\n\n fig = make_subplots(rows=1, cols=3, subplot_titles=\n (f\"GNB model. accuracy = {GNB_pred_accuracy}\", f\"LDA model. accuracy = {LDA_pred_accuracy}\", \"True Data\"))\n\n fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1],marker=dict(color=GNB_y_pred), mode=\"markers\"),row=1, col=1)\n fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1],marker=dict(color=LDA_y_pred), mode=\"markers\"),row=1, col=2)\n fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1],marker=dict(color=np.reshape(Y, newshape=len(Y))), mode=\"markers\"),row=1, col=3)\n\n for i in range(len(gaussian.mu_)):\n fig.add_trace(get_ellipse(gaussian.mu_[i], np.diag(gaussian.vars_[i])), row=1, col=1)\n fig.add_trace(get_ellipse(lda.mu_[i], lda.cov_), row=1, col=2)\n\n fig.add_trace(go.Scatter(x=np.asarray([lda.mu_[i][0] for i in range(len(lda.mu_))]), y=np.asarray([lda.mu_[i][1] for i in range(len(lda.mu_))]), mode=\"markers\", marker = dict(color = \"black\", symbol='x', size=10)), row=1, col=2)\n fig.add_trace(go.Scatter(x=np.asarray([gaussian.mu_[i][0] for i in range(len(gaussian.mu_))]), y=np.asarray([gaussian.mu_[i][1] for i in range(len(gaussian.mu_))]), mode=\"markers\", marker = dict(color = \"black\", symbol='x', size=10)), row=1, col=1)\n fig.show()\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n run_perceptron()\n compare_gaussian_classifiers()\n" ]
[ [ "numpy.sin", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.load", "numpy.linalg.eigvalsh", "matplotlib.pyplot.ylabel", "numpy.cos", "matplotlib.pyplot.show", "numpy.linspace", "numpy.diag" ] ]
esquije/caffe_jessedits
[ "5bf52f763828b31418eaee942cba3f91a7aca0a5" ]
[ "python/caffe/classifier.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nClassifier is an image classifier specialization of Net.\n\"\"\"\n\nimport numpy as np\n\nimport caffe\n\n\nclass Classifier(caffe.Net):\n \"\"\"\n Classifier extends Net for image class prediction\n by scaling, center cropping, or oversampling.\n Parameters\n ----------\n image_dims : dimensions to scale input for cropping/sampling.\n Default is to scale to net input size for whole-image crop.\n mean, input_scale, raw_scale, channel_swap: params for\n preprocessing options.\n \"\"\"\n def __init__(self, model_file, pretrained_file, image_dims=None,\n mean=None, input_scale=None, raw_scale=None,\n channel_swap=None):\n caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)\n\n # configure pre-processing\n in_ = self.inputs[0]\n self.transformer = caffe.io.Transformer(\n {in_: self.blobs[in_].data.shape})\n self.transformer.set_transpose(in_, (2, 0, 1))\n if mean is not None:\n self.transformer.set_mean(in_, mean)\n if input_scale is not None:\n self.transformer.set_input_scale(in_, input_scale)\n if raw_scale is not None:\n self.transformer.set_raw_scale(in_, raw_scale)\n if channel_swap is not None:\n self.transformer.set_channel_swap(in_, channel_swap)\n\n self.crop_dims = np.array(self.blobs[in_].data.shape[2:])\n if not image_dims:\n image_dims = self.crop_dims\n self.image_dims = image_dims\n\n def predict(self, inputs, oversample=True):\n \"\"\"\n Predict classification probabilities of inputs.\n Parameters\n ----------\n inputs : iterable of (H x W x K) input ndarrays.\n oversample : boolean\n average predictions across center, corners, and mirrors\n when True (default). Center-only prediction when False.\n Returns\n -------\n predictions: (N x C) ndarray of class probabilities for N images and C\n classes.\n \"\"\"\n # Scale to standardize input dimensions.\n input_ = np.zeros((len(inputs),\n self.image_dims[0],\n self.image_dims[1],\n inputs[0].shape[2]),\n dtype=np.float32)\n for ix, in_ in enumerate(inputs):\n input_[ix] = caffe.io.resize_image(in_, self.image_dims)\n\n if oversample:\n # Generate center, corner, and mirrored crops.\n input_ = caffe.io.oversample(input_, self.crop_dims)\n else:\n # Take center crop.\n center = np.array(self.image_dims) / 2.0\n crop = np.tile(center, (1, 2))[0] + np.concatenate([\n -self.crop_dims / 2.0,\n self.crop_dims / 2.0\n ])\n crop = crop.astype(int)\n input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]\n\n # Classify\n caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],\n dtype=np.float32)\n for ix, in_ in enumerate(input_):\n caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)\n out = self.forward_all(**{self.inputs[0]: caffe_in})\n predictions = out[self.outputs[0]]\n\n # For oversampling, average predictions across crops.\n if oversample:\n predictions = predictions.reshape((len(predictions) / 10, 10, -1))\n predictions = predictions.mean(1)\n\n return predictions\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.tile" ] ]
FraLotito/evol-signal-comm
[ "fd06bdad06200a65a8910e8401f0de7632be3cf0" ]
[ "simple_signal/simple_signal_regression_limited/figura_zoom/backup3/plot_zoom.py" ]
[ "import graphviz\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\n\nNUMBER_OF_GENERATIONS = 34\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\nmatplotlib.rcParams[\"legend.loc\"] = 'best'\nmatplotlib.rcParams.update({'font.size': 15})\n\nf = open(\"sender.mean\", 'r')\nstats = f.readlines()\nf.close()\n\ngen = range(NUMBER_OF_GENERATIONS)\ngen_stats = []\nfor i in gen:\n gen_stats.append([])\n\nfor run in stats:\n run = list(map(float, run.split()))\n for i in range(len(run)):\n gen_stats[i].append(run[i])\n\nmean_gen = []\nstddev_gen = []\nfor i in range(len(gen_stats)):\n mean_gen.append(np.mean(gen_stats[i]))\n stddev_gen.append(np.std(gen_stats[i]))\n\nf = open(\"sender.best\", 'r')\nstats = f.readlines()\nf.close()\n\ngen = range(NUMBER_OF_GENERATIONS)\ngen_stats = []\nfor i in gen:\n gen_stats.append([])\n\nfor run in stats:\n run = list(map(float, run.split()))\n for i in range(len(run)):\n gen_stats[i].append(run[i])\n\nmean_best_gen = []\nstddev_best_gen = []\nfor i in range(len(gen_stats)):\n mean_best_gen.append(np.mean(gen_stats[i]))\n stddev_best_gen.append(np.std(gen_stats[i]))\n\n\n#plt.errorbar(gen, mean_best_gen, yerr=stddev_best_gen, label=\"best genome\", fmt='.', capsize=2, barsabove=True, errorevery=10)\n#plt.errorbar(gen, mean_gen, yerr=stddev_gen, label=\"snd population's avg\", fmt='.', capsize=2, barsabove=True, errorevery=10)\nplt.plot(gen, mean_best_gen, label=\"Best pair\")\nplt.plot(gen, mean_gen, label=\"Sender pop. avg\")\n\n\nf = open(\"receiver.mean\", 'r')\nstats = f.readlines()\nf.close()\n\ngen = range(NUMBER_OF_GENERATIONS)\ngen_stats = []\nfor i in gen:\n gen_stats.append([])\n\nfor run in stats:\n run = list(map(float, run.split()))\n for i in range(len(run)):\n gen_stats[i].append(run[i])\n\nmean_gen = []\nstddev_gen = []\nfor i in range(len(gen_stats)):\n mean_gen.append(np.mean(gen_stats[i]))\n stddev_gen.append(np.std(gen_stats[i]))\n\nf = open(\"receiver.best\", 'r')\nstats = f.readlines()\nf.close()\n\ngen = range(NUMBER_OF_GENERATIONS)\ngen_stats = []\nfor i in gen:\n gen_stats.append([])\n\nfor run in stats:\n run = list(map(float, run.split()))\n for i in range(len(run)):\n gen_stats[i].append(run[i])\n\nmean_best_gen = []\nstddev_best_gen = []\nfor i in range(len(gen_stats)):\n mean_best_gen.append(np.mean(gen_stats[i]))\n stddev_best_gen.append(np.std(gen_stats[i]))\n\n\n#plt.errorbar(gen, mean_best_gen, yerr=stddev_best_gen, label=\"best genome\", fmt='.', capsize=2, barsabove=True, errorevery=2)\n#plt.errorbar(gen, mean_gen, yerr=stddev_gen, label=\"rcv population's avg\", fmt='.', capsize=1, barsabove=True, errorevery=10)\n\nplt.plot(gen, mean_gen, label=\"Receiver pop. avg\")\n\n\nplt.xlabel(\"Generations\")\nplt.xticks(range(NUMBER_OF_GENERATIONS))\nplt.ylabel(\"Fitness\")\nlocs, labs = plt.xticks()\nplt.xticks(locs[::5]) \n#plt.grid()\nplt.legend(loc=\"best\")\nplt.savefig('fitness_trend.pdf')\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.mean", "numpy.std", "matplotlib.rcParams.update", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks" ] ]
fivebats/pensieve
[ "b6d8f914a0a3354149eb467e9bf2c517a5a63914" ]
[ "multi_video_sim/a3c.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tflearn\n\n\nGAMMA = 0.99\nENTROPY_WEIGHT = 0.1\nENTROPY_EPS = 1e-6\nEPS = 1e-6\nMAX_BR_LEVELS = 10\nMASK_DIM = 6\n\n\nclass ActorNetwork(object):\n \"\"\"\n Input to the network is the state, output is the distribution\n of all actions.\n \"\"\"\n def __init__(self, sess, state_dim, action_dim, learning_rate):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.lr_rate = learning_rate\n\n assert self.a_dim == MAX_BR_LEVELS\n\n # Placeholder for masking invalid actions\n self.mask = tf.placeholder(tf.bool, self.a_dim)\n\n # Create the actor network\n self.inputs, self.out = self.create_actor_network()\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(self.network_params[idx].assign(param))\n\n # Selected action, 0-1 vector\n # the shape of acts are not determined (only upper-bounded by a_dim)\n self.acts = tf.placeholder(tf.float32, [None, None])\n\n # This gradient will be provided by the critic network\n self.act_grad_weights = tf.placeholder(tf.float32, [None, 1])\n\n # Compute the objective (log action_vector and entropy)\n self.obj = tf.reduce_sum(tf.multiply(\n tf.log(tf.reduce_sum(tf.multiply(self.out, self.acts),\n reduction_indices=1, keep_dims=True)),\n -self.act_grad_weights)) \\\n + ENTROPY_WEIGHT * tf.reduce_sum(tf.multiply(self.out,\n tf.log(self.out + ENTROPY_EPS)))\n\n # Combine the gradients here\n self.actor_gradients = tf.gradients(self.obj, self.network_params)\n\n # Optimization Op\n self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\\\n apply_gradients(zip(self.actor_gradients, self.network_params))\n\n def create_actor_network(self):\n with tf.variable_scope('actor'):\n inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])\n\n split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')\n split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')\n split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')\n\n reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])\n split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')\n\n split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')\n split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')\n\n flatten_0 = tflearn.flatten(split_3)\n flatten_1 = tflearn.flatten(split_4)\n flatten_2 = tflearn.flatten(split_5)\n\n merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')\n\n dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')\n\n # for multiplytiple video, mask out the invalid actions\n linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')\n linear_out = tf.transpose(linear_out) # [None, a_dim] -> [a_dim, None]\n mask_out = tf.boolean_mask(linear_out, self.mask) # [a_dim, None] -> [masked, None]\n mask_out = tf.transpose(mask_out) # [masked, None] -> [None, masked]\n softmax_out = tf.nn.softmax(mask_out)\n\n return inputs, softmax_out\n\n def train(self, inputs, acts, act_grad_weights):\n # there can be only one kind of mask in a training epoch\n for i in xrange(inputs.shape[0]):\n assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \\\n inputs[i, MASK_DIM, -MAX_BR_LEVELS:])\n\n # action dimension matches with mask length\n assert acts.shape[1] == np.sum(inputs[0:1, MASK_DIM, -MAX_BR_LEVELS:])\n\n self.sess.run(self.optimize, feed_dict={\n self.inputs: inputs,\n self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:],\n self.acts: acts,\n self.act_grad_weights: act_grad_weights\n })\n\n def predict(self, inputs):\n for i in xrange(inputs.shape[0]):\n assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \\\n inputs[i, MASK_DIM, -MAX_BR_LEVELS:])\n\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs,\n self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:]\n })\n\n def get_gradients(self, inputs, acts, act_grad_weights):\n for i in xrange(inputs.shape[0]):\n assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \\\n inputs[i, MASK_DIM, -MAX_BR_LEVELS:])\n\n return self.sess.run(self.actor_gradients, feed_dict={\n self.inputs: inputs,\n self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:],\n self.acts: acts,\n self.act_grad_weights: act_grad_weights\n })\n\n def apply_gradients(self, actor_gradients):\n return self.sess.run(self.optimize, feed_dict={\n i: d for i, d in zip(self.actor_gradients, actor_gradients)\n })\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n\nclass CriticNetwork(object):\n \"\"\"\n Input to the network is the state and action, output is V(s).\n On policy: the action must be obtained from the output of the Actor network.\n \"\"\"\n def __init__(self, sess, state_dim, learning_rate):\n self.sess = sess\n self.s_dim = state_dim\n self.lr_rate = learning_rate\n\n # Create the critic network\n self.inputs, self.out = self.create_critic_network()\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(self.network_params[idx].assign(param))\n\n # Network target V(s)\n self.td_target = tf.placeholder(tf.float32, [None, 1])\n\n # Temporal Difference, will also be weights for actor_gradients\n self.td = tf.subtract(self.td_target, self.out)\n\n # Mean square error\n self.loss = tflearn.mean_square(self.td_target, self.out)\n\n # Compute critic gradient\n self.critic_gradients = tf.gradients(self.loss, self.network_params)\n\n # Optimization Op\n self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\\\n apply_gradients(zip(self.critic_gradients, self.network_params))\n\n def create_critic_network(self):\n with tf.variable_scope('critic'):\n inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])\n split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')\n split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')\n split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')\n\n reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])\n split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')\n\n split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')\n split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')\n\n flatten_0 = tflearn.flatten(split_3)\n flatten_1 = tflearn.flatten(split_4)\n flatten_2 = tflearn.flatten(split_5)\n\n merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')\n\n dense_net_0 = tflearn.fully_connected(merge_net, 100, activation='relu')\n out = tflearn.fully_connected(dense_net_0, 1, activation='linear')\n\n return inputs, out\n\n def train(self, inputs, td_target):\n return self.sess.run([self.loss, self.optimize], feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def predict(self, inputs):\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs\n })\n\n def get_td(self, inputs, td_target):\n return self.sess.run(self.td, feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def get_gradients(self, inputs, td_target):\n return self.sess.run(self.critic_gradients, feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def apply_gradients(self, critic_gradients):\n return self.sess.run(self.optimize, feed_dict={\n i: d for i, d in zip(self.critic_gradients, critic_gradients)\n })\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n\ndef compute_gradients(s_batch, a_batch, r_batch, terminal, actor, critic):\n \"\"\"\n batch of s, a, r is from samples in a sequence\n the format is in np.array([batch_size, s/a/r_dim])\n terminal is True when sequence ends as a terminal state\n \"\"\"\n assert s_batch.shape[0] == a_batch.shape[0]\n assert s_batch.shape[0] == r_batch.shape[0]\n ba_size = s_batch.shape[0]\n\n v_batch = critic.predict(s_batch)\n\n R_batch = np.zeros(r_batch.shape)\n\n if terminal:\n R_batch[-1, 0] = 0 # terminal state\n else:\n R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state\n\n for t in reversed(xrange(ba_size - 1)):\n R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]\n\n td_batch = R_batch - v_batch\n\n actor_gradients = actor.get_gradients(s_batch, a_batch, td_batch)\n critic_gradients = critic.get_gradients(s_batch, R_batch)\n\n return actor_gradients, critic_gradients, td_batch\n\n\ndef discount(x, gamma):\n \"\"\"\n Given vector x, computes a vector y such that\n y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...\n \"\"\"\n out = np.zeros(len(x))\n out[-1] = x[-1]\n for i in reversed(xrange(len(x)-1)):\n out[i] = x[i] + gamma*out[i+1]\n assert x.ndim >= 1\n # More efficient version:\n # scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]\n return out\n\n\ndef compute_entropy(x):\n \"\"\"\n Given vector x, computes the entropy\n H(x) = - sum( p * log(p))\n \"\"\"\n H = 0.0\n for i in xrange(len(x)):\n if 0 < x[i] < 1:\n H -= x[i] * np.log(x[i])\n return H\n\n\ndef build_summaries():\n td_loss = tf.Variable(0.)\n tf.summary.scalar(\"TD_loss\", td_loss)\n eps_total_reward = tf.Variable(0.)\n tf.summary.scalar(\"Eps_total_reward\", eps_total_reward)\n avg_entropy = tf.Variable(0.)\n tf.summary.scalar(\"Avg_entropy\", avg_entropy)\n\n summary_vars = [td_loss, eps_total_reward, avg_entropy]\n summary_ops = tf.summary.merge_all()\n\n return summary_ops, summary_vars\n" ]
[ [ "tensorflow.multiply", "numpy.zeros", "tensorflow.summary.scalar", "numpy.log", "tensorflow.subtract", "numpy.sum", "tensorflow.train.RMSPropOptimizer", "tensorflow.Variable", "tensorflow.gradients", "tensorflow.transpose", "tensorflow.variable_scope", "tensorflow.log", "tensorflow.placeholder", "tensorflow.nn.softmax", "numpy.all", "tensorflow.summary.merge_all", "tensorflow.boolean_mask", "tensorflow.get_collection" ] ]
ameya30/IMaX_pole_data_scripts
[ "815f9b4cf3f7c827901daa8c90bcacaaaead0e66" ]
[ "my_scripts/imax_remove_mean_box_mymod.py" ]
[ "import os\nimport glob\n\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nfrom astropy.io import fits\nfrom matplotlib.ticker import FuncFormatter\n\n###################### BEGIN ###################### \n\n# Get data\n\nfiles = glob.glob('/home/prabhu/sunrise_holly/normalize_mu_output/*.fits')\n\nfor el in files:\n\n res = fits.open(el)\n res = res[0].data\n \n split_name = el.split('_')\n if len(split_name)==6:\n save_out = '/home/prabhu/sunrise_holly/mean_rem_output/mean_rem_output_' + split_name[-1]\n else:\n save_out = '/home/prabhu/sunrise_holly/mean_rem_output/mean_rem_output_nr_' + split_name[-1]\n print (save_out)\n\n # Find ave intensity of each continuum and remove from images\n \n for stok in range(1, 4):\n mean = np.mean(res[stok, 4, 240:640, 100:700])\n print (mean)\n for wvln in range (0, 5):\n res[stok, wvln, :, :] -= mean\n \n # Save corrected data\n \n hdu = fits.PrimaryHDU(res)\n \n hdu.writeto(save_out)\n" ]
[ [ "numpy.mean" ] ]
tkameyama/incubator-mxnet
[ "47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96" ]
[ "tests/python/unittest/test_gluon_data.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport tarfile\nimport tempfile\nimport unittest\nimport mxnet as mx\nimport numpy as np\nimport random\nfrom mxnet import gluon\nimport platform\nfrom common import setup_module, with_seed, teardown_module\nfrom mxnet.gluon.data import DataLoader\nimport mxnet.ndarray as nd\nfrom mxnet import context\nfrom mxnet.gluon.data.dataset import Dataset\nfrom mxnet.gluon.data.dataset import ArrayDataset\nimport pytest\n\n@with_seed()\ndef test_array_dataset():\n X = np.random.uniform(size=(10, 20))\n Y = np.random.uniform(size=(10,))\n dataset = gluon.data.ArrayDataset(X, Y)\n loader = gluon.data.DataLoader(dataset, 2)\n for i, (x, y) in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])\n\n dataset = gluon.data.ArrayDataset(X)\n loader = gluon.data.DataLoader(dataset, 2)\n\n for i, x in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n\n\ndef prepare_record():\n if not os.path.isdir(\"data/test_images\"):\n os.makedirs('data/test_images')\n if not os.path.isdir(\"data/test_images/test_images\"):\n gluon.utils.download(\"http://data.mxnet.io/data/test_images.tar.gz\", \"data/test_images.tar.gz\")\n tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')\n if not os.path.exists('data/test.rec') or not os.path.exists('data/test.idx'):\n imgs = os.listdir('data/test_images/test_images')\n record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')\n for i, img in enumerate(imgs):\n str_img = open('data/test_images/test_images/'+img, 'rb').read()\n s = mx.recordio.pack((0, i, i, 0), str_img)\n record.write_idx(i, s)\n return 'data/test.rec'\n\n\n@with_seed()\ndef test_recordimage_dataset():\n recfile = prepare_record()\n fn = lambda x, y : (x, y)\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(fn)\n loader = gluon.data.DataLoader(dataset, 1)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n@with_seed()\ndef test_recordimage_dataset_handle():\n recfile = prepare_record()\n class TmpTransform(mx.gluon.HybridBlock):\n def hybrid_forward(self, F, x):\n return x\n fn = TmpTransform()\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(fn).__mx_handle__()\n loader = gluon.data.DataLoader(dataset, 1)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\ndef _dataset_transform_fn(x, y):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x, y\n\ndef _dataset_transform_first_fn(x):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x\n\n@with_seed()\ndef test_recordimage_dataset_with_data_loader_multiworker():\n recfile = prepare_record()\n dataset = gluon.data.vision.ImageRecordDataset(recfile)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=False)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=None)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform_first\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(_dataset_transform_first_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5, try_nopython=None)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n@with_seed()\ndef test_sampler():\n seq_sampler = gluon.data.SequentialSampler(10)\n assert list(seq_sampler) == list(range(10))\n rand_sampler = gluon.data.RandomSampler(10)\n assert sorted(list(rand_sampler)) == list(range(10))\n seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')\n assert sum(list(seq_batch_keep), []) == list(range(10))\n seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')\n assert sum(list(seq_batch_discard), []) == list(range(9))\n rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')\n assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))\n\n@with_seed()\ndef test_datasets():\n assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000\n assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000\n\n@with_seed()\[email protected]\ndef test_datasets_handles():\n assert len(gluon.data.vision.MNIST(root='data/mnist').__mx_handle__()) == 60000\n assert len(gluon.data.vision.MNIST(root='data/mnist', train=False).__mx_handle__()) == 10000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist').__mx_handle__()) == 60000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False).__mx_handle__()) == 10000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10').__mx_handle__()) == 50000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False).__mx_handle__()) == 10000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100').__mx_handle__()) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True).__mx_handle__()) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False).__mx_handle__()) == 10000\n\n@with_seed()\ndef test_image_folder_dataset():\n prepare_record()\n dataset = gluon.data.vision.ImageFolderDataset('data/test_images')\n assert dataset.synsets == ['test_images']\n assert len(dataset.items) == 16\n\n@with_seed()\ndef test_image_folder_dataset_handle():\n prepare_record()\n dataset = gluon.data.vision.ImageFolderDataset('data/test_images')\n hd = dataset.__mx_handle__()\n assert len(hd) == 16\n assert (hd[1][0] == dataset[1][0]).asnumpy().all()\n assert hd[5][1] == dataset[5][1]\n\n@with_seed()\ndef test_image_list_dataset():\n prepare_record()\n imlist = os.listdir('data/test_images/test_images')\n imglist = [(0, path) for i, path in enumerate(imlist)]\n dataset = gluon.data.vision.ImageListDataset(root='data/test_images/test_images', imglist=imglist)\n assert len(dataset) == 16, len(dataset)\n img, label = dataset[0]\n assert len(img.shape) == 3\n assert label == 0\n\n # save to file as *.lst\n imglist = ['\\t'.join((str(i), '0', path)) for i, path in enumerate(imlist)]\n with tempfile.NamedTemporaryFile('wt', delete=False) as fp:\n for line in imglist:\n fp.write(line + '\\n')\n fp.close()\n\n dataset = gluon.data.vision.ImageListDataset(root='data/test_images/test_images', imglist=fp.name)\n assert len(dataset) == 16, len(dataset)\n img, label = dataset[0]\n assert len(img.shape) == 3\n assert label == 0\n\n@with_seed()\ndef test_image_list_dataset_handle():\n prepare_record()\n imlist = os.listdir('data/test_images/test_images')\n imglist = [(0, path) for i, path in enumerate(imlist)]\n dataset = gluon.data.vision.ImageListDataset(root='data/test_images/test_images', imglist=imglist).__mx_handle__()\n assert len(dataset) == 16, len(dataset)\n img, label = dataset[0]\n assert len(img.shape) == 3\n assert label == 0\n\n # save to file as *.lst\n imglist = ['\\t'.join((str(i), '0', path)) for i, path in enumerate(imlist)]\n with tempfile.NamedTemporaryFile('wt', delete=False) as fp:\n for line in imglist:\n fp.write(line + '\\n')\n fp.close()\n\n dataset = gluon.data.vision.ImageListDataset(root='data/test_images/test_images', imglist=fp.name).__mx_handle__()\n assert len(dataset) == 16\n img, label = dataset[0]\n assert len(img.shape) == 3\n assert label == 0\n\n@with_seed()\ndef test_list_dataset():\n for num_worker in range(0, 3):\n data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker)\n for d, l in data:\n pass\n\n\nclass _Dataset(gluon.data.Dataset):\n def __len__(self):\n return 100\n def __getitem__(self, key):\n return mx.nd.full((10,), key)\n\n@with_seed()\ndef test_multi_worker():\n data = _Dataset()\n for thread_pool in [True, False]:\n loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5, thread_pool=thread_pool)\n for i, batch in enumerate(loader):\n assert (batch.asnumpy() == i).all()\n\n\n@with_seed()\ndef test_multi_worker_shape():\n for thread_pool in [True, False]:\n batch_size = 1024\n shape = (batch_size+1, 11, 12)\n\n data = ArrayDataset(np.ones(shape))\n loader = gluon.data.DataLoader(\n data, batch_size=batch_size, num_workers=5, last_batch='keep', thread_pool=thread_pool)\n for batch in loader:\n if shape[0] > batch_size:\n assert batch.shape == (batch_size, shape[1], shape[2])\n shape = (shape[0] - batch_size, shape[1], shape[2])\n else:\n assert batch.shape == shape\n\nclass _Dummy(Dataset):\n \"\"\"Dummy dataset for randomized shape arrays.\"\"\"\n def __init__(self, random_shape):\n self.random_shape = random_shape\n\n def __getitem__(self, idx):\n key = idx\n if self.random_shape:\n out = np.random.uniform(size=(random.randint(1000, 1100), 40))\n labels = np.random.uniform(size=(random.randint(10, 15)))\n else:\n out = np.random.uniform(size=(1000, 40))\n labels = np.random.uniform(size=(10))\n return key, out, labels\n\n def __len__(self):\n return 50\n\ndef _batchify_list(data):\n \"\"\"\n return list of ndarray without stack/concat/pad\n \"\"\"\n if isinstance(data, (tuple, list)):\n return list(data)\n if isinstance(data, mx.nd.NDArray):\n return [data]\n return data\n\ndef _batchify(data):\n \"\"\"\n Collate data into batch. Use shared memory for stacking.\n :param data: a list of array, with layout of 'NTC'.\n :return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths\n if labels are not supplied.\n \"\"\"\n\n # input layout is NTC\n keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \\\n [item[2] for item in data]\n\n if len(data) > 1:\n max_data_len = max([seq.shape[0] for seq in inputs])\n max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])\n else:\n max_data_len = inputs[0].shape[0]\n max_labels_len = 0 if not labels else labels[0].shape[0]\n\n x_lens = [item.shape[0] for item in inputs]\n y_lens = [item.shape[0] for item in labels]\n\n for i, seq in enumerate(inputs):\n pad_len = max_data_len - seq.shape[0]\n inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)\n labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),\n 'constant', constant_values=-1)\n\n inputs = np.asarray(inputs, dtype=np.float32)\n if labels is not None:\n labels = np.asarray(labels, dtype=np.float32)\n inputs = inputs.transpose((1, 0, 2))\n labels = labels.transpose((1, 0))\n\n return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \\\n if labels is None else (\n nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),\n nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))\n\n@with_seed()\ndef test_multi_worker_forked_data_loader():\n data = _Dummy(False)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n data = _Dummy(True)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n@with_seed()\ndef test_multi_worker_dataloader_release_pool():\n # will trigger too many open file if pool is not released properly\n if os.name == 'nt':\n print('Skip for windows since spawn on windows is too expensive.')\n return\n\n for _ in range(10):\n A = np.random.rand(999, 2000)\n D = mx.gluon.data.DataLoader(A, batch_size=8, num_workers=8)\n the_iter = iter(D)\n next(the_iter)\n del the_iter\n del D\n\n\ndef test_dataloader_context():\n X = np.random.uniform(size=(10, 20))\n dataset = gluon.data.ArrayDataset(X)\n default_dev_id = 0\n custom_dev_id = 1\n\n # use non-pinned memory\n loader1 = gluon.data.DataLoader(dataset, 8)\n for _, x in enumerate(loader1):\n assert x.context == context.cpu(default_dev_id)\n\n # use pinned memory with default device id\n loader2 = gluon.data.DataLoader(dataset, 8, pin_memory=True)\n for _, x in enumerate(loader2):\n assert x.context == context.cpu_pinned(default_dev_id)\n\n # use pinned memory with custom device id\n loader3 = gluon.data.DataLoader(dataset, 8, pin_memory=True,\n pin_device_id=custom_dev_id)\n for _, x in enumerate(loader3):\n assert x.context == context.cpu_pinned(custom_dev_id)\n\ndef batchify(a):\n return a\n\ndef test_dataset_filter():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_filtered = a.filter(lambda x: x % 10 == 0)\n assert(len(a_filtered) == 10)\n for idx, sample in enumerate(a_filtered):\n assert sample % 10 == 0\n a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)\n assert(len(a_xform_filtered) == 10)\n # the filtered data is already transformed\n for idx, sample in enumerate(a_xform_filtered):\n assert sample % 10 == 0\n\ndef test_dataset_filter_handle():\n length = 100\n a = mx.gluon.data.SimpleDataset(np.arange(length))\n a_filtered = a.filter(lambda x: x % 10 == 0).__mx_handle__()\n assert(len(a_filtered) == 10)\n for idx, sample in enumerate(a_filtered):\n assert sample % 10 == 0\n a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)\n assert(len(a_xform_filtered) == 10)\n # the filtered data is already transformed\n for idx, sample in enumerate(a_xform_filtered):\n assert sample % 10 == 0\n\ndef test_dataset_shard():\n length = 9\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n shard_0 = a.shard(4, 0)\n shard_1 = a.shard(4, 1)\n shard_2 = a.shard(4, 2)\n shard_3 = a.shard(4, 3)\n assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length\n assert len(shard_0) == 3\n assert len(shard_1) == 2\n assert len(shard_2) == 2\n assert len(shard_3) == 2\n total = 0\n for shard in [shard_0, shard_1, shard_2, shard_3]:\n for idx, sample in enumerate(shard):\n total += sample\n assert total == sum(a)\n\ndef test_dataset_shard_handle():\n length = 9\n a = mx.gluon.data.SimpleDataset(np.arange(length))\n shard_0 = a.shard(4, 0).__mx_handle__()\n shard_1 = a.shard(4, 1).__mx_handle__()\n shard_2 = a.shard(4, 2).__mx_handle__()\n shard_3 = a.shard(4, 3).__mx_handle__()\n assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length\n assert len(shard_0) == 3\n assert len(shard_1) == 2\n assert len(shard_2) == 2\n assert len(shard_3) == 2\n total = 0\n for shard in [shard_0, shard_1, shard_2, shard_3]:\n for idx, sample in enumerate(shard):\n total += sample\n assert total == sum(a)\n\ndef test_dataset_take():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_take_full = a.take(1000)\n assert len(a_take_full) == length\n a_take_full = a.take(None)\n assert len(a_take_full) == length\n count = 10\n a_take_10 = a.take(count)\n assert len(a_take_10) == count\n expected_total = sum([i for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_take_10):\n assert sample < count\n total += sample\n assert total == expected_total\n\n a_xform_take_10 = a.transform(lambda x: x * 10).take(count)\n assert len(a_xform_take_10) == count\n expected_total = sum([i * 10 for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_xform_take_10):\n assert sample < count * 10\n total += sample\n assert total == expected_total\n\ndef test_dataset_take_handle():\n length = 100\n a = mx.gluon.data.SimpleDataset(np.arange(length))\n a_take_full = a.take(1000).__mx_handle__()\n assert len(a_take_full) == length\n a_take_full = a.take(None).__mx_handle__()\n assert len(a_take_full) == length\n count = 10\n a_take_10 = a.take(count).__mx_handle__()\n assert len(a_take_10) == count\n expected_total = sum([i for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_take_10):\n assert sample < count\n total += sample\n assert total == expected_total\n\n a_xform_take_10 = a.take(count).__mx_handle__()\n assert len(a_xform_take_10) == count\n expected_total = sum([i for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_xform_take_10):\n assert sample < count\n total += sample\n assert total == expected_total\n\ndef test_dataloader_scope():\n \"\"\"\n Bug: Gluon DataLoader terminates the process pool early while\n _MultiWorkerIter is operating on the pool.\n\n Tests that DataLoader is not garbage collected while the iterator is\n in use.\n \"\"\"\n args = {'num_workers': 1, 'batch_size': 2}\n dataset = nd.ones(5)\n iterator = iter(DataLoader(\n dataset,\n batchify_fn=batchify,\n **args\n )\n )\n\n item = next(iterator)\n\n assert item is not None\n\ndef test_mx_datasets_handle():\n # _DownloadedDataset\n mnist = mx.gluon.data.vision.MNIST(train=False).__mx_handle__()\n assert len(mnist) == 10000\n cifar10 = mx.gluon.data.vision.CIFAR10(train=False).__mx_handle__()\n assert len(cifar10) == 10000\n\n # _SampledDataset\n s_mnist = mnist.take(100).__mx_handle__()\n assert len(s_mnist) == 100\n assert np.all(s_mnist[0][0].asnumpy() == mnist[0][0].asnumpy())\n assert s_mnist[0][1] == mnist[0][1]\n\n # ArrayDataset\n mc = mx.gluon.data.ArrayDataset(mnist.take(100), cifar10.take(100)).__mx_handle__()\n assert len(mc) == 100\n assert len(mc[0]) == 4 # two from mnist, two from cifar10\n assert mc[0][1] == mnist[0][1]\n assert mc[0][3] == cifar10[0][1]\n\ndef test_mx_data_loader():\n from mxnet.gluon.data.dataloader import DataLoader\n\n dataset = mx.gluon.data.vision.MNIST(train=False)\n dl = DataLoader(num_workers=0, dataset=dataset, batch_size=32)\n for _ in dl:\n pass\n\ndef test_mx_data_loader_nopython():\n from mxnet.gluon.data.dataloader import DataLoader\n from mxnet.gluon.data.vision.transforms import ToTensor\n dataset = mx.gluon.data.vision.MNIST(train=False)\n dl1 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=True, shuffle=False)\n dl2 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=False, shuffle=False)\n assert len(dl1) == len(dl2)\n assert np.all(next(iter(dl1))[1].asnumpy() == next(iter(dl2))[1].asnumpy())\n for _ in dl1:\n pass\n\ndef test_batchify_stack():\n a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\n b = np.array([[5, 6, 7, 8], [1, 2, 3, 4]])\n bf = mx.gluon.data.batchify.Stack()\n bf_handle = bf.__mx_handle__()\n c = bf([a, b])\n d = bf_handle([a, b])\n assert c.shape == d.shape\n assert mx.test_utils.almost_equal(c.asnumpy(), d.asnumpy())\n assert mx.test_utils.almost_equal(c.asnumpy(), np.stack((a, b)))\n\ndef test_batchify_pad():\n a = np.array([[1, 2, 3, 4], [11, 12, 13, 14]])\n b = np.array([[4, 5, 6]])\n c = np.array([[9, 10]])\n bf = mx.gluon.data.batchify.Pad(val=-1)\n bf_handle = bf.__mx_handle__()\n d = bf([a, b, c])\n e = bf_handle([a, b, c])\n assert d.shape == e.shape\n assert mx.test_utils.almost_equal(d.asnumpy(), e.asnumpy())\n expected = np.array([[[ 1., 2., 3., 4.], [11., 12., 13., 14.]],\n [[ 4., 5., 6., -1.], [-1., -1., -1., -1.]],\n [[ 9., 10., -1., -1.], [-1., -1., -1., -1.]]])\n assert mx.test_utils.almost_equal(d.asnumpy(), expected)\n\ndef test_batchify_group():\n a = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[1, 2, 3, 4], [11, 12, 13, 14]])]\n b = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[4, 5, 6]])]\n c = [np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), np.array([[9, 10]])]\n bf = mx.gluon.data.batchify.Group(mx.gluon.data.batchify.Stack(), mx.gluon.data.batchify.Pad(val=-1))\n bf_handle = bf.__mx_handle__()\n d = bf([a, b, c])\n e = bf_handle([a, b, c])\n assert d[0].shape == e[0].shape\n assert d[1].shape == e[1].shape\n print(d[0].asnumpy(), ',', e[0].asnumpy(), ',', e[1].asnumpy())\n assert mx.test_utils.almost_equal(d[0].asnumpy(), e[0].asnumpy())\n assert mx.test_utils.almost_equal(d[1].asnumpy(), e[1].asnumpy())\n assert mx.test_utils.almost_equal(d[0].asnumpy(), np.stack((a[0], b[0], c[0])))\n expected = np.array([[[ 1., 2., 3., 4.], [11., 12., 13., 14.]],\n [[ 4., 5., 6., -1.], [-1., -1., -1., -1.]],\n [[ 9., 10., -1., -1.], [-1., -1., -1., -1.]]])\n assert mx.test_utils.almost_equal(d[1].asnumpy(), expected)\n" ]
[ [ "numpy.pad", "numpy.array", "numpy.random.rand", "numpy.asarray", "numpy.ones", "numpy.random.uniform", "numpy.arange", "numpy.stack" ] ]
mehdidc/keras-yolo3
[ "459b08438b13b6aacd1464960b1ad7d816a601d6" ]
[ "yolo3/model.py" ]
[ "\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3,3)),\n DarknetConv2D_BN_Leaky(256, (1,1)))(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])\n\n return Model(inputs, [y1,y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n if max_boxes is None:\n max_boxes = boxes.shape[0]\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh)==0: continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]\n y_true[l][b, j, i, k, 4] = 1\n y_true[l][b, j, i, k, 5+c] = 1\n\n return y_true\n\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')\n return loss\n" ]
[ [ "numpy.array", "tensorflow.image.non_max_suppression", "numpy.minimum", "numpy.argmax", "numpy.floor", "tensorflow.boolean_mask", "numpy.expand_dims", "numpy.maximum" ] ]
houdinii/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash
[ "10e04ed5ab6e891f85417ba223706cfa7144f94b" ]
[ "chapter_05/app_v5_3.py" ]
[ "import re\n\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Output, Input\nfrom dash.exceptions import PreventUpdate\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport pandas as pd\n\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])\n\n\npoverty_data = pd.read_csv('../data/PovStatsData.csv')\npoverty = pd.read_csv('../data/poverty.csv', low_memory=False)\n\n\ngini = 'GINI index (World Bank estimate)'\ngini_df = poverty[poverty[gini].notna()]\n\n\nregions = ['East Asia & Pacific', 'Europe & Central Asia',\n 'Fragile and conflict affected situations', 'High income',\n 'IDA countries classified as fragile situations', 'IDA total',\n 'Latin America & Caribbean', 'Low & middle income', 'Low income',\n 'Lower middle income', 'Middle East & North Africa',\n 'Middle income', 'South Asia', 'Sub-Saharan Africa',\n 'Upper middle income', 'World']\n\npopulation_df = poverty_data[~poverty_data['Country Name'].isin(regions) &\n (poverty_data['Indicator Name']== 'Population, total')]\n\nincome_share_df = poverty.filter(regex='Country Name|^year$|Income share.*?20').dropna()\nincome_share_df = income_share_df.rename(columns={\n 'Income share held by lowest 20%': '1 Income share held by lowest 20%',\n 'Income share held by second 20%': '2 Income share held by second 20%',\n 'Income share held by third 20%': '3 Income share held by third 20%',\n 'Income share held by fourth 20%': '4 Income share held by fourth 20%',\n 'Income share held by highest 20%': '5 Income share held by highest 20%'\n}).sort_index(axis=1)\n\nincome_share_df.columns = [re.sub('\\d Income share held by ', '', col).title()\n for col in income_share_df.columns]\nincome_share_cols = income_share_df.columns[:-2]\n\ndef make_empty_fig():\n fig = go.Figure()\n fig.layout.paper_bgcolor = '#E5ECF6'\n fig.layout.plot_bgcolor = '#E5ECF6'\n return fig\n\napp.layout = html.Div([\n html.H1('Poverty And Equity Database'),\n html.H2('The World Bank'),\n # dcc.Dropdown(id='country',\n # options=[{'label': country, 'value': country}\n # for country in poverty_data['Country Name'].unique()]),\n # html.Br(),\n # html.Div(id='report'),\n html.Br(),\n dcc.Dropdown(id='year_dropdown',\n value='2010',\n options=[{'label': year, 'value': str(year)}\n for year in range(1974, 2019)]),\n dcc.Graph(id='population_chart'),\n html.Br(),\n html.H2('Gini Index - World Bank Data', style={'textAlign': 'center'}),\n html.Br(),\n dbc.Row([\n dbc.Col(lg=1),\n dbc.Col([\n dbc.Label('Year'),\n dcc.Dropdown(id='gini_year_dropdown',\n placeholder='Select a year',\n options=[{'label': year, 'value': year}\n for year in gini_df['year'].drop_duplicates().sort_values()]),\n html.Br(),\n dcc.Graph(id='gini_year_barchart',\n figure=make_empty_fig())\n ], md=12, lg=5),\n dbc.Col([\n dbc.Label('Countries'),\n dcc.Dropdown(id='gini_country_dropdown',\n placeholder='Select one or more countries',\n multi=True,\n options=[{'label': country, 'value': country}\n for country in gini_df['Country Name'].unique()]),\n html.Br(),\n dcc.Graph(id='gini_country_barchart',\n figure=make_empty_fig())\n ], md=12, lg=5),\n ]),\n dbc.Row([\n dbc.Col(lg=1),\n dbc.Col([\n html.Br(),\n html.H2('Income Share Distribution', style={'textAlign': 'center'}),\n html.Br(),\n dbc.Label('Country'),\n dcc.Dropdown(id='income_share_country_dropdown', \n placeholder='Select a country',\n options=[{'label': country, 'value': country}\n for country in income_share_df['Country Name'].unique()]),\n dcc.Graph(id='income_share_country_barchart',\n figure=make_empty_fig())\n ], lg=10)\n\n ]),\n\n dbc.Tabs([\n dbc.Tab([\n html.Ul([\n html.Br(),\n html.Li('Number of Economies: 170'),\n html.Li('Temporal Coverage: 1974 - 2019'),\n html.Li('Update Frequency: Quarterly'),\n html.Li('Last Updated: March 18, 2020'),\n html.Li([\n 'Source: ',\n html.A('https://datacatalog.worldbank.org/dataset/poverty-and-equity-database',\n href='https://datacatalog.worldbank.org/dataset/poverty-and-equity-database')\n ])\n ])\n\n ], label='Key Facts'),\n dbc.Tab([\n html.Ul([\n html.Br(),\n html.Li('Book title: Interactive Dashboards and Data Apps with Plotly and Dash'),\n html.Li(['GitHub repo: ',\n html.A('https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash',\n href='https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash')\n ])\n ])\n ], label='Project Info')\n ]),\n\n], style={'backgroundColor': '#E5ECF6'})\n\n\n# @app.callback(Output('report', 'children'),\n# Input('country', 'value'))\n# def display_country_report(country):\n# if country is None:\n# return ''\n\n# filtered_df = poverty_data[(poverty_data['Country Name']==country) &\n# (poverty_data['Indicator Name']=='Population, total')]\n# population = filtered_df.loc[:, '2010'].values[0]\n\n# return [html.H3(country),\n# f'The population of {country} in 2010 was {population:,.0f}.']\n\n\[email protected](Output('population_chart', 'figure'),\n Input('year_dropdown', 'value'))\ndef plot_countries_by_population(year):\n fig = go.Figure()\n year_df = population_df[['Country Name', year]].sort_values(year, ascending=False)[:20]\n fig.add_bar(x=year_df['Country Name'],\n y=year_df[year])\n fig.layout.title = f'Top twenty countries by population - {year}'\n fig.layout.paper_bgcolor = '#E5ECF6'\n return fig\n\n\[email protected](Output('gini_year_barchart', 'figure'),\n Input('gini_year_dropdown', 'value'))\ndef plot_gini_year_barchart(year):\n if not year:\n raise PreventUpdate\n df = gini_df[gini_df['year'].eq(year)].sort_values(gini).dropna(subset=[gini])\n n_countries = len(df['Country Name'])\n fig = px.bar(df,\n x=gini,\n y='Country Name', \n orientation='h',\n height=200 + (n_countries*20), \n width=650,\n title=gini + ' ' + str(year))\n fig.layout.paper_bgcolor = '#E5ECF6' \n return fig\n\n\[email protected](Output('gini_country_barchart', 'figure'), Input('gini_country_dropdown', 'value'))\ndef plot_gini_country_barchart(countries):\n if not countries:\n raise PreventUpdate\n df = gini_df[gini_df['Country Name'].isin(countries)].dropna(subset=[gini])\n fig = px.bar(df,\n x='year',\n y=gini,\n height=100 + (250*len(countries)),\n facet_row='Country Name',\n color='Country Name',\n labels={gini: 'Gini Index'},\n title=''.join([gini, '<br><b>', ', '.join(countries), '</b>']))\n fig.layout.paper_bgcolor = '#E5ECF6' \n return fig\n\n\[email protected](Output('income_share_country_barchart', 'figure'), Input('income_share_country_dropdown', 'value'))\ndef plot_income_share_barchart(country):\n if country is None:\n raise PreventUpdate\n fig = px.bar(income_share_df[income_share_df['Country Name']==country].dropna(), \n x=income_share_cols,\n y='Year',\n barmode='stack',\n height=600, \n hover_name='Country Name',\n title=f'Income Share Quintiles - {country}',\n orientation='h',\n )\n fig.layout.legend.title = None\n fig.layout.legend.orientation = 'h'\n fig.layout.legend.x = 0.2\n fig.layout.xaxis.title = 'Percent of Total Income'\n fig.layout.paper_bgcolor = '#E5ECF6'\n fig.layout.plot_bgcolor = '#E5ECF6'\n return fig\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.read_csv" ] ]
arsenal9971/DeeMicrolocalReconstruction
[ "0d5bbee86789d2c3acc6e9c872d270f46190d857" ]
[ "Task_adapted_recon_edge/learned_primal_dual_edge_detect.py" ]
[ "\"\"\"Partially learned gradient descent scheme for ellipses.\"\"\"\n\nimport os\nimport adler\nadler.util.gpu.setup_one_gpu()\n\nfrom adler.tensorflow import prelu, cosine_decay, reference_unet\n\nimport tensorflow as tf\nimport numpy as np\nimport odl\nimport odl.contrib.tensorflow\nimport scipy.ndimage\nimport lcr_data\n\n\nconst = 1e-2\n\n\nnp.random.seed(0)\nname = os.path.splitext(os.path.basename(__file__))[0] + '/' + str(const)\n\nsess = tf.InteractiveSession()\n\n# Create ODL data structures\nsize = 128\nspace = odl.uniform_discr([-64, -64], [64, 64], [size, size],\n dtype='float32')\n\ngeometry = odl.tomo.parallel_beam_geometry(space, num_angles=30)\noperator = odl.tomo.RayTransform(space, geometry)\n\n# Ensure operator has fixed operator norm for scale invariance\nopnorm = odl.power_method_opnorm(operator)\noperator = (1 / opnorm) * operator\n\n# Create tensorflow layer from odl operator\nodl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(operator,\n 'RayTransform')\nodl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(operator.adjoint,\n 'RayTransformAdjoint')\n\n# edge detection\ngrad = odl.Gradient(space)\nedge_detector = odl.PointwiseNorm(grad.range) * grad\n\n# User selected paramters\nn_data = 5\nn_iter = 10\nn_primal = 5\nn_dual = 5\n\n# User selected paramters\nhead = lcr_data.davids_head_density()\nhead_true = lcr_data.davids_head_materials() == 4\n\ndef generate_data(validation=False):\n \"\"\"Generate a set of random data.\"\"\"\n n_generate = 1 if validation else n_data\n\n y_arr_rt = np.empty((n_generate, operator.range.shape[0], operator.range.shape[1], 1), dtype='float32')\n x_tomo_true_arr = np.empty((n_generate, space.shape[0], space.shape[1], 1), dtype='float32')\n x_segment_true_arr = np.empty((n_generate, space.shape[0], space.shape[1], 1), dtype='float32')\n\n for i in range(n_generate):\n if validation:\n phantom_rt = head[..., 33]\n true = head_true[..., 33]\n else:\n idx = np.random.randint(64)\n \n if idx>=33:\n j = idx + 1\n else:\n j = idx\n phantom_rt = head[..., j]\n true = head_true[..., j]\n\n # Data augumentation\n angle = 20 * (np.random.rand() - 0.5)\n phantom_rt = scipy.ndimage.interpolation.rotate(phantom_rt, angle, reshape=False, order=1)\n true = np.round(scipy.ndimage.interpolation.rotate(true.astype(float), angle, reshape=False, order=1))\n\n roll_x = int(40 * (np.random.rand() - 0.5))\n roll_y = int(40 * (np.random.rand() - 0.5))\n phantom_rt = np.roll(phantom_rt, roll_x, axis=0)\n phantom_rt = np.roll(phantom_rt, roll_y, axis=1)\n true = np.roll(true, roll_x, axis=0)\n true = np.roll(true, roll_y, axis=1)\n\n phantom_rt = phantom_rt[::4, ::4]\n true = true[::4, ::4]\n\n data_rt = operator(phantom_rt)\n noisy_data_rt = data_rt + odl.phantom.white_noise(operator.range) * np.mean(np.abs(data_rt)) * 0.001\n\n x_tomo_true_arr[i, ..., 0] = phantom_rt\n x_segment_true_arr[i, ..., 0] = true\n y_arr_rt[i, ..., 0] = noisy_data_rt\n\n\n return y_arr_rt, x_tomo_true_arr, x_segment_true_arr\n\n\nwith tf.name_scope('placeholders'):\n x_true = tf.placeholder(tf.float32, shape=[None, size, size, 1], name=\"x_true\")\n y_rt = tf.placeholder(tf.float32, shape=[None, operator.range.shape[0], operator.range.shape[1], 1], name=\"y_rt\")\n edge_true = tf.placeholder(tf.float32, shape=[None, size, size, 1], name=\"edge_true\")\n is_training = tf.placeholder(tf.bool, shape=(), name='is_training')\n\n\ndef apply_conv(x, filters=32):\n return tf.layers.conv2d(x, filters=filters, kernel_size=3, padding='SAME',\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n\n\nwith tf.name_scope('tomography'):\n with tf.name_scope('initial_values'):\n primal = tf.concat([tf.zeros_like(x_true)] * n_primal, axis=-1)\n dual = tf.concat([tf.zeros_like(y_rt)] * n_dual, axis=-1)\n\n for i in range(n_iter):\n with tf.variable_scope('dual_iterate_{}'.format(i)):\n evalop = odl_op_layer(primal[..., 1:2])\n update = tf.concat([dual, evalop, y_rt], axis=-1)\n\n update = prelu(apply_conv(update), name='prelu_1')\n update = prelu(apply_conv(update), name='prelu_2')\n update = apply_conv(update, filters=n_dual)\n dual = dual + update\n\n with tf.variable_scope('primal_iterate_{}'.format(i)):\n evalop = odl_op_layer_adjoint(dual[..., 0:1])\n update = tf.concat([primal, evalop], axis=-1)\n\n update = prelu(apply_conv(update), name='prelu_1')\n update = prelu(apply_conv(update), name='prelu_2')\n update = apply_conv(update, filters=n_primal)\n primal = primal + update\n\n x_result = primal[..., 0:1]\n\n\nwith tf.name_scope('edge_detect'):\n edge_result = reference_unet(x_result, 1,\n ndim=2,\n features=32,\n keep_prob=0.7,\n use_batch_norm=False,\n activation='relu',\n is_training=is_training,\n name='edge_result')\n\n\nwith tf.name_scope('loss'):\n residual = x_result - x_true\n squared_error = residual ** 2\n loss_tomography = tf.reduce_mean(squared_error)\n\n seg_error = tf.nn.sigmoid_cross_entropy_with_logits(labels=edge_true,\n logits=edge_result)\n loss_seg = tf.reduce_mean(seg_error)\n\n loss = 1e-2 * loss_tomography + loss_seg\n\n\nwith tf.name_scope('optimizer'):\n # Learning rate\n global_step = tf.Variable(0, trainable=False)\n maximum_steps = 100001\n starter_learning_rate = 1e-3\n learning_rate = cosine_decay(starter_learning_rate,\n global_step,\n maximum_steps,\n name='learning_rate')\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n opt_func = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta2=0.99)\n\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 1)\n optimizer = opt_func.apply_gradients(zip(grads, tvars),\n global_step=global_step)\n\n\n# Summaries\n# tensorboard --logdir=...\n\nwith tf.name_scope('summaries'):\n tf.summary.scalar('loss', loss)\n tf.summary.scalar('loss_tomography', loss_tomography)\n tf.summary.scalar('loss_seg', loss_seg)\n tf.summary.scalar('psnr', -10 * tf.log(loss_tomography) / tf.log(10.0))\n\n tf.summary.image('x_result', x_result)\n tf.summary.image('x_true', x_true)\n tf.summary.image('x_edge_result', tf.sigmoid(edge_result))\n tf.summary.image('x_edge_true', edge_true)\n tf.summary.image('squared_error', squared_error)\n tf.summary.image('residual', residual)\n\n merged_summary = tf.summary.merge_all()\n test_summary_writer, train_summary_writer = adler.tensorflow.util.summary_writers(name, cleanup=True)\n\n# Initialize all TF variables\nsess.run(tf.global_variables_initializer())\n\n# Add op to save and restore\nsaver = tf.train.Saver()\n\n# Generate validation data\ny_arr_validate, x_true_arr_validate, edge_arr_validate = generate_data(validation=True)\n\nif 0:\n saver.restore(sess,\n adler.tensorflow.util.default_checkpoint_path(name))\n\n# Train the network\nfor i in range(0, maximum_steps):\n if i%10 == 0:\n y_arr, x_true_arr, edge_arr = generate_data()\n\n _, merged_summary_result_train, global_step_result = sess.run([optimizer, merged_summary, global_step],\n feed_dict={x_true: x_true_arr,\n y_rt: y_arr,\n edge_true: edge_arr,\n is_training: True})\n\n if i>0 and i%10 == 0:\n loss_result, merged_summary_result, global_step_result = sess.run([loss, merged_summary, global_step],\n feed_dict={x_true: x_true_arr_validate,\n y_rt: y_arr_validate,\n edge_true: edge_arr_validate,\n is_training: False})\n\n train_summary_writer.add_summary(merged_summary_result_train, global_step_result)\n test_summary_writer.add_summary(merged_summary_result, global_step_result)\n\n print('iter={}, loss={}'.format(global_step_result, loss_result))\n\n if i>0 and i%1000 == 0:\n saver.save(sess,\n adler.tensorflow.util.default_checkpoint_path(name))\n" ]
[ [ "numpy.random.rand", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.gradients", "tensorflow.zeros_like", "tensorflow.control_dependencies", "tensorflow.global_variables_initializer", "tensorflow.InteractiveSession", "tensorflow.trainable_variables", "numpy.empty", "tensorflow.concat", "tensorflow.sigmoid", "tensorflow.train.Saver", "tensorflow.Variable", "numpy.random.randint", "tensorflow.get_collection", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "numpy.roll", "tensorflow.log", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.summary.merge_all", "tensorflow.summary.image", "numpy.random.seed", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "numpy.abs", "tensorflow.reduce_mean" ] ]
jyun25/librosa
[ "a297cac125175afe57fccbf5eecb65b79d088181" ]
[ "tests/test_effects.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"Unit tests for the effects module\"\"\"\nimport warnings\n\n# Disable cache\nimport os\n\ntry:\n os.environ.pop(\"LIBROSA_CACHE_DIR\")\nexcept KeyError:\n pass\n\nfrom contextlib2 import nullcontext as dnr\nimport numpy as np\nimport pytest\n\nimport librosa\n\n__EXAMPLE_FILE = os.path.join(\"tests\", \"data\", \"test1_22050.wav\")\n\n\[email protected](scope=\"module\", params=[22050, 44100])\ndef ysr(request):\n return librosa.load(__EXAMPLE_FILE, sr=request.param)\n\n\[email protected](\n \"rate,ctx\",\n [\n (0.25, dnr()),\n (0.25, dnr()),\n (1.0, dnr()),\n (2.0, dnr()),\n (4.0, dnr()),\n (-1, pytest.raises(librosa.ParameterError)),\n (0, pytest.raises(librosa.ParameterError)),\n ],\n)\ndef test_time_stretch(ysr, rate, ctx):\n\n with ctx:\n y, sr = ysr\n ys = librosa.effects.time_stretch(y, rate)\n\n orig_duration = librosa.get_duration(y, sr=sr)\n new_duration = librosa.get_duration(ys, sr=sr)\n\n # We don't have to be too precise here, since this goes through an STFT\n assert np.allclose(orig_duration, rate * new_duration, rtol=1e-2, atol=1e-3)\n\n\[email protected](\"n_steps\", [-1.5, 1.5, 5])\[email protected](\n \"bins_per_octave,ctx\",\n [\n (12, dnr()),\n (24, dnr()),\n (-1, pytest.raises(librosa.ParameterError)),\n (0, pytest.raises(librosa.ParameterError)),\n ],\n)\ndef test_pitch_shift(ysr, n_steps, bins_per_octave, ctx):\n\n with ctx:\n y, sr = ysr\n ys = librosa.effects.pitch_shift(\n y, sr, n_steps, bins_per_octave=bins_per_octave\n )\n\n orig_duration = librosa.get_duration(y, sr=sr)\n new_duration = librosa.get_duration(ys, sr=sr)\n\n # We don't have to be too precise here, since this goes through an STFT\n assert orig_duration == new_duration\n\n\[email protected](\"align_zeros\", [False, True])\ndef test_remix_mono(align_zeros):\n\n # without zc alignment\n y = np.asarray([1, 1, -1, -1, 2, 2, -1, -1, 1, 1], dtype=np.float)\n y_t = np.asarray([-1, -1, -1, -1, 1, 1, 1, 1, 2, 2], dtype=np.float)\n intervals = np.asarray([[2, 4], [6, 8], [0, 2], [8, 10], [4, 6]])\n\n y_out = librosa.effects.remix(y, intervals, align_zeros=align_zeros)\n assert np.allclose(y_out, y_t)\n\n\[email protected](\"align_zeros\", [False, True])\ndef test_remix_stereo(align_zeros):\n\n # without zc alignment\n y = np.asarray([1, 1, -1, -1, 2, 2, -1, -1, 1, 1], dtype=np.float)\n y_t = np.asarray([-1, -1, -1, -1, 1, 1, 1, 1, 2, 2], dtype=np.float)\n y = np.vstack([y, y])\n y_t = np.vstack([y_t, y_t])\n\n intervals = np.asarray([[2, 4], [6, 8], [0, 2], [8, 10], [4, 6]])\n\n y_out = librosa.effects.remix(y, intervals, align_zeros=align_zeros)\n assert np.allclose(y_out, y_t)\n\n\ndef test_hpss(ysr):\n\n y, sr = ysr\n\n y_harm, y_perc = librosa.effects.hpss(y)\n\n # Make sure that the residual energy is generally small\n y_residual = y - y_harm - y_perc\n\n rms_orig = librosa.feature.rms(y=y)\n rms_res = librosa.feature.rms(y=y_residual)\n\n assert np.percentile(rms_orig, 0.01) > np.percentile(rms_res, 0.99)\n\n\ndef test_percussive(ysr):\n\n y, sr = ysr\n\n yh1, yp1 = librosa.effects.hpss(y)\n\n yp2 = librosa.effects.percussive(y)\n\n assert np.allclose(yp1, yp2)\n\n\ndef test_harmonic(ysr):\n\n y, sr = ysr\n\n yh1, yp1 = librosa.effects.hpss(y)\n\n yh2 = librosa.effects.harmonic(y)\n\n assert np.allclose(yh1, yh2)\n\n\[email protected](scope=\"module\", params=[False, True], ids=[\"mono\", \"stereo\"])\ndef y_trim(request):\n # construct 5 seconds of stereo silence\n # Stick a sine wave in the middle three seconds\n\n sr = float(22050)\n trim_duration = 3.0\n y = np.sin(2 * np.pi * 440.0 * np.arange(0, trim_duration * sr) / sr)\n y = librosa.util.pad_center(y, 5 * sr)\n\n if request.param:\n y = np.vstack([y, np.zeros_like(y)])\n return y\n\n\[email protected](\"top_db\", [60, 40, 20])\[email protected](\"ref\", [1, np.max])\[email protected](\"trim_duration\", [3.0])\ndef test_trim(y_trim, top_db, ref, trim_duration):\n\n yt, idx = librosa.effects.trim(y_trim, top_db=top_db, ref=ref)\n\n # Test for index position\n fidx = [slice(None)] * y_trim.ndim\n fidx[-1] = slice(*idx.tolist())\n assert np.allclose(yt, y_trim[tuple(fidx)])\n\n # Verify logamp\n rms = librosa.feature.rms(y=librosa.to_mono(yt), center=False)\n logamp = librosa.power_to_db(rms ** 2, ref=ref, top_db=None)\n assert np.all(logamp > -top_db)\n\n # Verify logamp\n rms_all = librosa.feature.rms(y=librosa.to_mono(y_trim)).squeeze()\n logamp_all = librosa.power_to_db(rms_all ** 2, ref=ref, top_db=None)\n\n start = int(librosa.samples_to_frames(idx[0]))\n stop = int(librosa.samples_to_frames(idx[1]))\n assert np.all(logamp_all[:start] <= -top_db)\n assert np.all(logamp_all[stop:] <= -top_db)\n\n # Verify duration\n duration = librosa.get_duration(yt)\n assert np.allclose(duration, trim_duration, atol=1e-1), duration\n\n\ndef test_trim_empty():\n\n y = np.zeros(1)\n\n yt, idx = librosa.effects.trim(y, ref=1)\n\n assert yt.size == 0\n assert idx[0] == 0\n assert idx[1] == 0\n\n\[email protected](\n scope=\"module\",\n params=[0, 1, 2, 3],\n ids=[\"constant\", \"end-silent\", \"full-signal\", \"gaps\"],\n)\ndef y_split_idx(request):\n\n sr = 8192\n y = np.ones(5 * sr)\n\n if request.param == 0:\n # Constant\n idx_true = np.asarray([[0, 5 * sr]])\n\n elif request.param == 1:\n # end-silent\n y[::2] *= -1\n y[4 * sr :] = 0\n idx_true = np.asarray([[0, 4 * sr]])\n\n elif request.param == 2:\n # begin-silent\n y[::2] *= -1\n idx_true = np.asarray([[0, 5 * sr]])\n else:\n # begin and end are silent\n y[::2] *= -1\n\n # Zero out all but two intervals\n y[:sr] = 0\n y[2 * sr : 3 * sr] = 0\n y[4 * sr :] = 0\n\n # The true non-silent intervals\n idx_true = np.asarray([[sr, 2 * sr], [3 * sr, 4 * sr]])\n\n return y, idx_true\n\n\[email protected](\"frame_length\", [1024, 2048, 4096])\[email protected](\"hop_length\", [256, 512, 1024])\[email protected](\"top_db\", [20, 60, 80])\ndef test_split(y_split_idx, frame_length, hop_length, top_db):\n\n y, idx_true = y_split_idx\n\n intervals = librosa.effects.split(\n y, top_db=top_db, frame_length=frame_length, hop_length=hop_length\n )\n\n assert np.all(intervals <= y.shape[-1])\n\n int_match = librosa.util.match_intervals(intervals, idx_true)\n\n for i in range(len(intervals)):\n i_true = idx_true[int_match[i]]\n\n assert np.all(np.abs(i_true - intervals[i]) <= frame_length), intervals[i]\n\n\[email protected](\"coef\", [0.5, 0.99])\[email protected](\"zi\", [None, 0, [0]])\[email protected](\"return_zf\", [False, True])\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_preemphasis(coef, zi, return_zf, dtype):\n x = np.arange(10, dtype=dtype)\n\n y = librosa.effects.preemphasis(x, coef=coef, zi=zi, return_zf=return_zf)\n\n if return_zf:\n y, zf = y\n\n assert np.allclose(y[1:], x[1:] - coef * x[:-1])\n assert x.dtype == y.dtype\n\n\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_preemphasis_continue(dtype):\n\n # Compare pre-emphasis computed in parts to that of the whole sequence in one go\n x = np.arange(64, dtype=dtype)\n\n y1, zf1 = librosa.effects.preemphasis(x[:32], return_zf=True)\n y2, zf2 = librosa.effects.preemphasis(x[32:], return_zf=True, zi=zf1)\n\n y_all, zf_all = librosa.effects.preemphasis(x, return_zf=True)\n\n assert np.allclose(y_all, np.concatenate([y1, y2]))\n assert np.allclose(zf2, zf_all)\n assert x.dtype == y_all.dtype\n\n\[email protected](\"coef\", [0.5, 0.99])\[email protected](\"zi\", [None, 0, [0]])\[email protected](\"return_zf\", [False, True])\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_deemphasis(coef, zi, return_zf, dtype):\n x = np.arange(10, dtype=dtype)\n\n y = librosa.effects.preemphasis(x, coef=coef, zi=zi, return_zf=return_zf)\n\n if return_zf:\n y, zf = y\n\n y_deemph = librosa.effects.deemphasis(y, coef=coef, zi=zi)\n\n assert np.allclose(x, y_deemph)\n assert x.dtype == y_deemph.dtype\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.asarray", "numpy.zeros", "numpy.percentile", "numpy.ones", "numpy.allclose", "numpy.arange", "numpy.abs", "numpy.all", "numpy.vstack" ] ]
magnetar-iiith/PRIL
[ "dea35169ca823115b96b8fe88ca7ef545b353b08" ]
[ "helper.py" ]
[ "import numpy as np\n\ndef get_state_rewards(env):\n rews = [0.] * env.nS\n for i in range(env.nS):\n dictn = env.P[i]\n for a in range (env.nA):\n li = dictn[a]\n for (p,s,r,d) in li:\n rews[s] += p * r\n return rews\n\ndef get_transition_prob_matrix(env):\n\n tns_prob = np.zeros((env.nS,env.nA,env.nS))\n for i in range(env.nS):\n dicn = env.P[i]\n for a in range(env.nA):\n li = dicn[a]\n for (p,s,r,d) in li:\n tns_prob[i][a][s] += p\n return tns_prob\n\ndef to_s(row, col, ncol):\n return row*ncol + col\n\ndef pretty(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key))\n if isinstance(value, dict):\n pretty(value, indent+1)\n else:\n print('\\t' * (indent+1) + str(value))\n" ]
[ [ "numpy.zeros" ] ]
wohllab/milkyway_proteomics
[ "622969f7f4a5955ae2bff299ae7b08572d422814" ]
[ "galaxy_milkyway_files/tools/wohl-proteomics/MSGFcrux/tab_percolator_to_fido.py" ]
[ "import sys,os\nimport pandas\nfrom Bio import SeqIO\n\n\"\"\"\nConverter script for Percolator output to create Fido-readable files.\n\nBy Jose Fernandez Navarro,\nSLIGHTLY MODIFIED BY WILLIAM BARSHOP (LABORATORY OF JAMES A WOHLSCHLEGEL)\nThese modifications allow input of multiple tab delimited peptide-level percolator outputs. These outputs will\nbe merged into a pandas dataframe. Repeated peptides will have the best (1-PEP) used for Fido input.\n-- Also have added in support for All PSMs mode...\n2016-09-12\n\nusage (from galaxy):\ntab_percolator_to_fido.py percolator_output_parent_folder/ $target_fasta $decoy_fasta $graph_output $target_decoy_output <true/false for all_scores> <true/false for psm_level>\n\"\"\"\n\nimport sys\n\nfrom lxml import etree\n\nclass Hit(object):\n \"\"\"Wrapper class\"\"\"\n def __init__(self, isdecoy=False,score=0.0,pep=0.0,q=0.0,p=0.0,name=\"\",proteins=[]):\n self.isdecoy = isdecoy\n self.score = score\n self.pep = pep\n self.q = q\n self.p = p\n self.name = name\n self.proteins = proteins\n\nclass Perco2Fido(object):\n def __init__(self, args):\n in_folder = args[0]\n infiles=[]\n outfile = args[3]\n targetfasta = args[1]\n decoyfasta = args[2]\n tdoutput = args[4]\n all_scores=False\n if \"true\" in args[5] or \"True\" in args[5] or \"TRUE\" in args[5]:\n all_scores=True\n psm_level=False\n if \"psms\" in args[6] or \"PSMS\" in args[6] or \"PSM\" in args[6] or \"psm\" in args[6] or \"true\" in args[6] or \"TRUE\" in args[6]:\n psm_level=True\n\n\n\n targetfasta_seq = SeqIO.parse(open(targetfasta,'rb'),'fasta') \n decoyfasta_seq = SeqIO.parse(open(decoyfasta,'rb'),'fasta') \n\n \n targets=['{',' ']\n decoys=['{',' ']\n for each in targetfasta_seq:\n name, seq=each.id,str(each.seq)\n targets.append(each.id)\n targets.append(\" , \")\n targets=targets[:-1]\n targets.append(\" \")\n targets.append(\"}\")\n\n for each in decoyfasta_seq:\n name, seq=each.id,str(each.seq)\n decoys.append(each.id)\n decoys.append(\" , \")\n decoys=decoys[:-1]\n decoys.append(\" \")\n decoys.append(\"}\")\n\n with open(tdoutput,'w') as output:\n output.write(''.join(targets)+\"\\n\")\n output.write(''.join(decoys)+\"\\n\")\n \n\n\n\n\n\n for root, subFolders, files in os.walk(in_folder):\n for eachfile in files:\n if 'peptides.txt' in eachfile and not psm_level:\n infiles.append(str(os.path.join(root,eachfile)))\n elif 'psms.txt' in eachfile and psm_level:\n infiles.append(str(os.path.join(root,eachfile)))\n\n dataframe_vector=[]\n for eachfile in infiles:\n #newdf=pandas.DataFrame.from_csv(eachfile,sep='\\t',index_col=False)\n newdf=pandas.read_csv(eachfile,sep='\\t',index_col=False)\n if 'decoy' in eachfile:\n decoy=1\n else:\n decoy=0\n newdf.insert(0,'decoy',decoy)\n dataframe_vector.append(newdf)\n \n percolator_results=pandas.concat(dataframe_vector)\n #print dataframe_vector[0].columns.values\n del dataframe_vector\n peptides, pepunique = self.readPercolatorPeptides(percolator_results)\n if all_scores:\n self.writeFidoInputAllScores(peptides, outfile)\n else:\n self.writeFidoInput(pepunique, outfile)\n \n \n def writeFidoInput(self, peptides,fidoOutputFile):#,fidoOutputFile2): # current released fido only outputs one file\n # graph input looks like this:\n # e EEEMPEPK\n # r SW:TRP6_HUMAN\n # r GP:AJ271067_1\n # r GP:AJ271068_1\n # p 0.9849\n # e LLEIIQVR\n # r SW:TRP6_HUMAN\n # r GP:AJ271067_1\n # r GP:AJ271068_1\n # p 0.0\n # e FAFNNKPNLEWNWK\n # r gi|1574458|gb|AAC23247.1|\n # p 0.9750\n\n #{ target1 , target2 , target3 , ... }\n #{ decoy1 , decoy2 , decoy3 , ... }\n\n f = open(fidoOutputFile, \"w\")\n for peptide_key in peptides:\n peptide=peptides[peptide_key]\n #if(not peptide.isdecoy):\n prots = peptide.proteins\n pepname = peptide.name\n pepprob = 1 - peptide.pep\n f.write(\"e \" + pepname + \"\\n\")\n for prot in set(prots):\n f.write(\"r \" + prot + \"\\n\")\n f.write(\"p \" + str(pepprob) + \"\\n\") \n f.close()\n\n for peptide_key in peptides:\n peptide=peptides[peptide_key]\n #if(not peptide.isdecoy):\n prots = peptide.proteins\n\n\n '''\n for peptide in peptides:\n #if(not peptide.isdecoy):\n prots = peptide.proteins\n pepname = peptide.name\n pepprob = 1 - peptide.pep\n f.write(\"e \" + pepname + \"\\n\")\n for prot in set(prots):\n f.write(\"r \" + prot + \"\\n\")\n f.write(\"p \" + str(pepprob) + \"\\n\") \n f.close()\n '''\n def writeFidoInputAllScores(self, peptide_list,fidoOutputFile):#,fidoOutputFile2): # current released fido only outputs one file\n # graph input looks like this:\n # e EEEMPEPK\n # r SW:TRP6_HUMAN\n # r GP:AJ271067_1\n # r GP:AJ271068_1\n # p 0.9849\n # e LLEIIQVR\n # r SW:TRP6_HUMAN\n # r GP:AJ271067_1\n # r GP:AJ271068_1\n # p 0.0\n # e FAFNNKPNLEWNWK\n # r gi|1574458|gb|AAC23247.1|\n # p 0.9750\n\n #{ target1 , target2 , target3 , ... }\n #{ decoy1 , decoy2 , decoy3 , ... }\n \n\n f = open(fidoOutputFile, \"w\")\n for peptide in peptide_list:\n #peptide=peptides[peptide_key]\n #if(not peptide.isdecoy):\n prots = peptide.proteins\n pepname = peptide.name\n pepprob = 1 - peptide.pep\n f.write(\"e \" + pepname + \"\\n\")\n for prot in set(prots):\n f.write(\"r \" + prot + \"\\n\")\n f.write(\"p \" + str(pepprob) + \"\\n\") \n f.close()\n\n for peptide in peptide_list:\n prots = peptide.proteins\n\n\n '''\n for peptide in peptides:\n #if(not peptide.isdecoy):\n prots = peptide.proteins\n pepname = peptide.name\n pepprob = 1 - peptide.pep\n f.write(\"e \" + pepname + \"\\n\")\n for prot in set(prots):\n f.write(\"r \" + prot + \"\\n\")\n f.write(\"p \" + str(pepprob) + \"\\n\") \n f.close()\n '''\n######## implement this possibly with a newer fido version:\n# f = open(fidoOutputFile2, \"w\")\n# f.write(\"{ \")\n# for protein in [x for x in proteins if not x.isdecoy]:\n# f.write(printable(protein.name) + \" , \")\n# f.write(\"}\\n\")\n# f.write(\"{ \")\n# for protein in [x for x in proteins if x.isdecoy]:\n# f.write(printable(protein.name) + \" , \")\n# f.write(\"}\\n\")\n# f.close()\n\n\n\n def readPercolatorPSM(self, elems):\n ##process percolator Peptides,ptms and proteins\n percolatorPSMs = []\n percolatorPSMdict = dict()\n for elem in elems.iter(\"{http://per-colator.com/percolator_out/13}psm\"):\n decoy = True \n if (elem.get(\"{http://per-colator.com/percolator_out/13}decoy\") == \"false\"):\n decoy = False\n score = elem.findtext(\"{http://per-colator.com/percolator_out/13}svm_score\")\n pep = elem.findtext(\"{http://per-colator.com/percolator_out/13}pep\")\n q = elem.findtext(\"{http://per-colator.com/percolator_out/13}q_value\")\n p = elem.findtext(\"{http://per-colator.com/percolator_out/13}q_value\")\n name = elem.findall(\"{http://per-colator.com/percolator_out/13}peptide_seq\")[0].get(\"seq\")\n percolatorPSMs.append(Hit(decoy, float(score), float(pep), float(q), float(p), str(name), []))\n #if(not decoy): #REMOVED TO ALLOW REVERSE HITS FOR FIDO!\n if(not percolatorPSMdict.has_key(name)):\n percolatorPSMdict[name] = Hit(decoy, float(score), float(pep), float(q), float(p), str(name), [])\n elif(percolatorPSMdict[name].pep > float(pep)):\n percolatorPSMdict[name].pep = float(pep)\n percolatorPSMdict[name].score = float(score)\n percolatorPSMdict[name].q = float(q)\n percolatorPSMdict[name].p = float(p)\n elem.clear()\n while elem.getprevious() is not None:\n del elem.getparent()[0]\n \n return percolatorPSMs, percolatorPSMdict\n\n\n def readPercolatorPeptides(self, perco_dataframe):\n percolatorPeptides = []\n peptideUnique = dict()\n perco_dataframe['protein id']=perco_dataframe['protein id'].astype(str)\n for index,eachitem in perco_dataframe.iterrows():\n #print eachitem,type(eachitem),\"eachitem\"\n decoy = True\n if (eachitem['decoy']==0):\n decoy = False\n score = eachitem['percolator score']\n pep = eachitem['percolator PEP']\n q = eachitem['percolator q-value']\n p = eachitem['percolator q-value']\n name = eachitem['sequence']\n proteins = eachitem['protein id'].split(\",\")\n percolatorPeptides.append(Hit(decoy,float(score),float(pep),float(q),float(p),str(name),proteins))\n\n #if(not decoy): #CHANGED TO ALLOW DECOYS THROUGH THE ANALYSIS\n if(not peptideUnique.has_key(str(name)) ):\n peptideUnique[str(name)] = Hit(decoy,float(score),float(pep),float(q),float(p),str(name),proteins)\n else:\n #print \"Repeated peptide : \" + name + \" Found in Percolator files\"\n #print \"Checking PEP score... and decoy status\"\n if (peptideUnique[str(name)].isdecoy and not decoy) or (decoy and not peptideUnique[str(name)].isdecoy):\n prot_combined=peptideUnique[str(name)].proteins\n prot_combined.extend(proteins)\n #print prot_combined,\" after extension\"\n peptideUnique[str(name)].proteins=prot_combined\n peptideUnique[str(name)].isdecoy=False\n proteins=prot_combined\n\n if peptideUnique[str(name)].pep < pep:\n #print \"Replacing \"+str(name)+\"pep score with\",pep\n peptideUnique[str(name)].pep=float(pep)\n peptideUnique[str(name)].q=float(q)\n peptideUnique[str(name)].p=float(p)\n peptideUnique[str(name)].score=float(score)\n #c=set(peptideUnique[str(name)].proteins).union(set(proteins))\n #d=set(peptideUnique[str(name)].proteins).intersection(set(proteins))\n ##if len(list(set(peptideUnique[str(name)].proteins) - set(proteins)))>0:\n #if len(list(c-d))>0:\n # print \"THIS IS STRANGE-- PROTEIN LISTS WERE DIFFERENT IN LENGTH....\"\n # print list(c-d),\"differences\"\n # print peptideUnique[str(name)].proteins,\"CURRENT\"\n # print proteins,\"TO BE\"\n #peptideUnique[str(name)] = Hit(decoy,float(score),float(pep),float(q),float(p),str(name),proteins)\n else:\n pass \n\n \n return percolatorPeptides,peptideUnique\n\n def readPercolatorProteins(self, elems):\n percolatorProteins = []\n for elem in elems.iter(\"{http://per-colator.com/percolator_out/13}protein\"):\n decoy = True \n if (elem.get(\"{http://per-colator.com/percolator_out/13}decoy\") == \"false\"):\n decoy = False\n pep = elem.findtext(\"{http://per-colator.com/percolator_out/13}pep\")\n q = elem.findtext(\"{http://per-colator.com/percolator_out/13}q_value\")\n p = elem.findtext(\"{http://per-colator.com/percolator_out/13}p_value\")\n name = elem.get(\"{http://per-colator.com/percolator_out/13}protein_id\")\n qmp = elem.findtext(\"{http://per-colator.com/percolator_out/13}q_value_emp\")\n if (not qmp):\n qmp = 0\n percolatorProteins.append(Hit(decoy,float(qmp),float(pep),float(q),float(p),str(name),[]))\n elem.clear()\n while elem.getprevious() is not None:\n del elem.getparent()[0]\n \n return percolatorProteins\n\ndef main():\n convert = Perco2Fido(sys.argv[1:])\n \nif __name__ == '__main__':\n main()\n\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
zackers14/CudaTensorflow
[ "db30da95a71c5a94fc4a8511551d454537847f8d" ]
[ "tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Script to test TF-TensorRT integration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport itertools\nimport warnings\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib import tensorrt as trt\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\nINPUT_NAME = \"input\"\nOUTPUT_NAME = \"output\"\nINPUT_DIMS = [100, 24, 24, 2]\nMODE_FP32 = \"FP32\"\nMODE_FP16 = \"FP16\"\nMODE_INT8 = \"INT8\"\n\nif six.PY2:\n to_bytes = lambda s: s\n to_string = lambda s: s\nelse:\n to_bytes = lambda s: s.encode(\"utf-8\", errors=\"surrogateescape\")\n to_string = lambda s: s.decode(\"utf-8\")\n\n\n# TODO(aaroey): test graph with different dtypes.\ndef GetSingleEngineGraphDef(dtype=dtypes.float32):\n \"\"\"Create a graph containing single segment.\"\"\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)\n with g.device(\"/GPU:0\"):\n conv_filter = constant_op.constant(\n [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],\n name=\"weights\",\n dtype=dtype)\n conv = nn.conv2d(\n input=inp,\n filter=conv_filter,\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n name=\"conv\")\n bias = constant_op.constant(\n [4., 1.5, 2., 3., 5., 7.], name=\"bias\", dtype=dtype)\n added = nn.bias_add(conv, bias, name=\"bias_add\")\n relu = nn.relu(added, \"relu\")\n identity = array_ops.identity(relu, \"identity\")\n pool = nn_ops.max_pool(\n identity, [1, 2, 2, 1], [1, 2, 2, 1], \"VALID\", name=\"max_pool\")\n array_ops.squeeze(pool, name=OUTPUT_NAME)\n return g.as_graph_def()\n\n\n# TODO(aaroey): test graph with different dtypes.\ndef GetMultiEngineGraphDef(dtype=dtypes.float32):\n \"\"\"Create a graph containing multiple segment.\"\"\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)\n with g.device(\"/GPU:0\"):\n conv_filter = constant_op.constant(\n [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],\n name=\"weights\",\n dtype=dtype)\n conv = nn.conv2d(\n input=inp,\n filter=conv_filter,\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n name=\"conv\")\n c1 = constant_op.constant(\n np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)\n p = conv * c1\n c2 = constant_op.constant(\n np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)\n q = conv / c2\n\n edge = math_ops.sin(q)\n edge /= edge\n r = edge + edge\n\n p -= edge\n q *= edge\n s = p + q\n s -= r\n array_ops.squeeze(s, name=OUTPUT_NAME)\n return g.as_graph_def()\n\n\nTestGraph = namedtuple(\"TestGraph\",\n [\"gdef\", \"num_expected_engines\", \"expected_output_dims\"])\n\nTEST_GRAPHS = {\n \"SingleEngineGraph\":\n TestGraph(\n gdef=GetSingleEngineGraphDef(),\n num_expected_engines=1,\n expected_output_dims=(100, 6, 6, 6)),\n \"MultiEngineGraph\":\n TestGraph(\n gdef=GetMultiEngineGraphDef(),\n num_expected_engines=2,\n expected_output_dims=(100, 12, 12, 6)),\n # TODO(aaroey): add a large complex graph to test.\n}\n\n\nclass TfTrtIntegrationTest(test_util.TensorFlowTestCase):\n \"\"\"Class to test Tensorflow-TensorRT integration.\"\"\"\n\n def setUp(self):\n \"\"\"Setup method.\"\"\"\n super(TfTrtIntegrationTest, self).setUp()\n warnings.simplefilter(\"always\")\n self._input = np.random.random_sample(INPUT_DIMS)\n\n def _GetConfigProto(self,\n use_optimizer,\n precision_mode=None,\n is_dynamic_op=None):\n if use_optimizer:\n rewriter_cfg = rewriter_config_pb2.RewriterConfig()\n rewriter_cfg.optimizers.extend([\"constfold\", \"layout\"])\n custom_op = rewriter_cfg.custom_optimizers.add()\n custom_op.name = \"TensorRTOptimizer\"\n custom_op.parameter_map[\"minimum_segment_size\"].i = 3\n custom_op.parameter_map[\"max_batch_size\"].i = self._input.shape[0]\n custom_op.parameter_map[\"is_dynamic_op\"].b = is_dynamic_op\n custom_op.parameter_map[\"max_workspace_size_bytes\"].i = 1 << 25\n custom_op.parameter_map[\"precision_mode\"].s = to_bytes(precision_mode)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)\n else:\n graph_options = config_pb2.GraphOptions()\n\n gpu_options = config_pb2.GPUOptions()\n if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:\n gpu_options.per_process_gpu_memory_fraction = 0.50\n\n config = config_pb2.ConfigProto(\n gpu_options=gpu_options, graph_options=graph_options)\n return config\n\n def _RunGraph(self, graph_key, gdef, input_data, config, num_runs=2):\n \"\"\"Run given graphdef multiple times.\"\"\"\n g = ops.Graph()\n with g.as_default():\n inp, out = importer.import_graph_def(\n graph_def=gdef, return_elements=[INPUT_NAME, OUTPUT_NAME], name=\"\")\n inp = inp.outputs[0]\n out = out.outputs[0]\n with self.test_session(\n graph=g, config=config, use_gpu=True, force_gpu=True) as sess:\n val = None\n # Defaults to 2 runs to verify result across multiple runs is same.\n for _ in range(num_runs):\n new_val = sess.run(out, {inp: input_data})\n self.assertEqual(TEST_GRAPHS[graph_key].expected_output_dims,\n new_val.shape)\n if val is not None:\n self.assertAllEqual(new_val, val)\n val = new_val\n return val\n\n # Use real data that is representative of the inference dataset\n # for calibration. For this test script it is random data.\n def _RunCalibration(self, graph_key, gdef, input_data, config):\n \"\"\"Run calibration on given graph.\"\"\"\n return self._RunGraph(graph_key, gdef, input_data, config, 30)\n\n def _GetTrtGraph(self, gdef, precision_mode, is_dynamic_op):\n \"\"\"Return trt converted graph.\"\"\"\n return trt.create_inference_graph(\n input_graph_def=gdef,\n outputs=[OUTPUT_NAME],\n max_batch_size=self._input.shape[0],\n max_workspace_size_bytes=1 << 25,\n precision_mode=precision_mode,\n minimum_segment_size=2,\n is_dynamic_op=is_dynamic_op)\n\n def _VerifyGraphDef(self,\n graph_key,\n gdef,\n precision_mode=None,\n is_calibrated=None,\n dynamic_engine=None):\n num_engines = 0\n for n in gdef.node:\n if n.op == \"TRTEngineOp\":\n num_engines += 1\n self.assertNotEqual(to_bytes(\"\"), n.attr[\"serialized_segment\"].s)\n self.assertNotEqual(to_bytes(\"\"), n.attr[\"segment_funcdef_name\"].s)\n self.assertEqual(n.attr[\"precision_mode\"].s, to_bytes(precision_mode))\n self.assertEqual(n.attr[\"static_engine\"].b, not dynamic_engine)\n if precision_mode == MODE_INT8 and is_calibrated:\n self.assertNotEqual(to_bytes(\"\"), n.attr[\"calibration_data\"].s)\n else:\n self.assertEqual(to_bytes(\"\"), n.attr[\"calibration_data\"].s)\n if precision_mode is None:\n self.assertEqual(num_engines, 0)\n else:\n self.assertEqual(num_engines,\n TEST_GRAPHS[graph_key].num_expected_engines)\n\n def _RunTest(self, graph_key, use_optimizer, precision_mode,\n dynamic_infer_engine, dynamic_calib_engine):\n assert precision_mode in [MODE_FP32, MODE_FP16, MODE_INT8]\n input_gdef = TEST_GRAPHS[graph_key].gdef\n self._VerifyGraphDef(graph_key, input_gdef)\n\n # Get reference result without running trt.\n config_no_trt = self._GetConfigProto(False)\n print(\"Running original graph w/o trt, config:\\n%s\" % str(config_no_trt))\n ref_result = self._RunGraph(graph_key, input_gdef, self._input,\n config_no_trt)\n\n # Run calibration if necessary.\n if precision_mode == MODE_INT8:\n\n calib_config = self._GetConfigProto(use_optimizer, precision_mode,\n dynamic_calib_engine)\n print(\"Running calibration graph, config:\\n%s\" % str(calib_config))\n if use_optimizer:\n self.assertTrue(False)\n # TODO(aaroey): uncomment this and get infer_gdef when this mode is\n # supported.\n # result = self._RunCalibration(graph_key, input_gdef, self._input,\n # calib_config)\n else:\n calib_gdef = self._GetTrtGraph(input_gdef, precision_mode,\n dynamic_calib_engine)\n self._VerifyGraphDef(graph_key, calib_gdef, precision_mode, False,\n dynamic_calib_engine)\n result = self._RunCalibration(graph_key, calib_gdef, self._input,\n calib_config)\n infer_gdef = trt.calib_graph_to_infer_graph(calib_gdef)\n self._VerifyGraphDef(graph_key, infer_gdef, precision_mode, True,\n dynamic_calib_engine)\n self.assertAllClose(ref_result, result, rtol=1.e-03)\n else:\n infer_gdef = input_gdef\n\n # Run inference.\n infer_config = self._GetConfigProto(use_optimizer, precision_mode,\n dynamic_infer_engine)\n print(\"Running final inference graph, config:\\n%s\" % str(infer_config))\n if use_optimizer:\n result = self._RunGraph(graph_key, infer_gdef, self._input, infer_config)\n else:\n trt_infer_gdef = self._GetTrtGraph(infer_gdef, precision_mode,\n dynamic_infer_engine)\n self._VerifyGraphDef(graph_key, trt_infer_gdef, precision_mode, True,\n dynamic_infer_engine)\n result = self._RunGraph(graph_key, trt_infer_gdef, self._input,\n infer_config)\n self.assertAllClose(ref_result, result, rtol=1.e-03)\n\n def testIdempotence(self):\n # Test that applying tensorrt optimizer or offline conversion tools multiple\n # times to the same graph will result in same graph.\n #\n # TODO(aaroey): currently the conversion is not deterministic, this is\n # mainly because during tensorflow::ConvertGraphDefToGraph(), the graph uses\n # EdgeSet which use a map keyed by Edge*, so the order of input/output edges\n # of a node is nondeterministic, thus the order for segmenter to contract\n # edges is nondeterministic. Need to evaluate whether we should fix this.\n pass\n\n\ndef GetTests():\n\n def _GetTest(g, u, p, i, c):\n\n def _Test(self):\n print(\"Running test with parameters: graph_key=%s, use_optimizer=%s, \"\n \"precision_mode=%s, dynamic_infer_engine=%s, \"\n \"dynamic_calib_engine=%s\" % (g, u, p, i, c))\n self._RunTest(g, u, p, i, c)\n\n return _Test\n\n use_optimizer_options = [False, True]\n precision_mode_options = [MODE_FP32, MODE_FP16, MODE_INT8]\n dynamic_infer_engine_options = [False, True]\n dynamic_calib_engine_options = [False, True]\n for (graph_key, use_optimizer, precision_mode,\n dynamic_infer_engine, dynamic_calib_engine) in itertools.product(\n TEST_GRAPHS, use_optimizer_options, precision_mode_options,\n dynamic_infer_engine_options, dynamic_calib_engine_options):\n if precision_mode == MODE_INT8:\n if not dynamic_calib_engine and dynamic_infer_engine:\n # TODO(aaroey): test this case, the conversion from static calibration\n # engine to dynamic inference engine should be a noop.\n continue\n if use_optimizer:\n # TODO(aaroey): if use_optimizer is True we need to get the inference\n # graphdef using custom python wrapper class, which is not currently\n # supported yet.\n continue\n if not dynamic_calib_engine:\n # TODO(aaroey): construction of static calibration engine is not\n # supported yet.\n continue\n if dynamic_calib_engine and not dynamic_infer_engine:\n # TODO(aaroey): construction of static inference engine using dynamic\n # calibration engine is not supported yet.\n continue\n else: # In non int8 mode.\n if dynamic_calib_engine:\n # dynamic_calib_engine doesn't affect non-int8 modes, so just let\n # related tests run once on dynamic_calib_engine=False.\n continue\n yield _GetTest(graph_key, use_optimizer, precision_mode,\n dynamic_infer_engine, dynamic_calib_engine)\n\n\nif __name__ == \"__main__\":\n if trt.is_tensorrt_enabled():\n for index, t in enumerate(GetTests()):\n setattr(TfTrtIntegrationTest, \"testTfTRT_\" + str(index), t)\n test.main()\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.ops.array_ops.identity", "tensorflow.contrib.tensorrt.create_inference_graph", "tensorflow.contrib.tensorrt.trt_convert.get_linked_tensorrt_version", "tensorflow.python.platform.test.main", "tensorflow.python.ops.nn.relu", "tensorflow.python.ops.nn.bias_add", "tensorflow.contrib.tensorrt.calib_graph_to_infer_graph", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.contrib.tensorrt.is_tensorrt_enabled", "tensorflow.python.framework.importer.import_graph_def", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.nn.conv2d", "tensorflow.python.ops.array_ops.squeeze", "numpy.random.randn", "numpy.random.random_sample", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.python.ops.math_ops.sin", "tensorflow.core.protobuf.config_pb2.GPUOptions", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.nn_ops.max_pool" ] ]
machines-in-motion/dg_tools
[ "1c8d0c09da5185113e0dd69e6b59cd5c8270afa6" ]
[ "tests/test_memory_replay.py" ]
[ "import unittest\nimport numpy as np\n\nfrom dg_tools.dynamic_graph.dg_tools_entities import MemoryReplay\n\nclass TestMemoryReplay(unittest.TestCase):\n def test_basic(self):\n a = np.random.rand(2, 3)\n entity = MemoryReplay('')\n\n # Init from the matrix.\n entity.init(a)\n\n # Replay the memory.\n entity.sout.recompute(10)\n np.testing.assert_array_equal(entity.sout.value, a[0])\n\n entity.sout.recompute(11)\n np.testing.assert_array_equal(entity.sout.value, a[1])\n\n # The entity should keep repeating the last value.\n entity.sout.recompute(12)\n np.testing.assert_array_equal(entity.sout.value, a[1])\n\n # Check if rewinding\n entity.rewind()\n entity.sout.recompute(13)\n np.testing.assert_array_equal(entity.sout.value, a[0])\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.random.rand" ] ]
FarisHijazi/deep-learning-v2-pytorch
[ "543643095b2659ec97402a0309e0e5b90f8e003b" ]
[ "project-bikesharing/my_answers.py" ]
[ "import numpy as np\n\n\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\n\ndef relu(x):\n return np.maximum(x, 0.0)\n\n\ndebug = True\n\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** -0.5,\n (self.input_nodes + 1, self.hidden_nodes + 1))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** -0.5,\n (self.hidden_nodes + 1, self.output_nodes))\n self.lr = learning_rate\n\n #### : Set self.activation_function to your implemented sigmoid function ####\n\n # this contains both the activation function and its derivative\n self.activation_function = sigmoid # Replace 0 with your sigmoid calculation.\n\n def train(self, features, targets):\n ''' Train the network on batch of features and targets.\n\n Arguments\n ---------\n\n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n '''\n\n if debug:\n print(\"features.shape=\", features.shape)\n print(\"weights_input_to_hidden.shape=\", self.weights_input_to_hidden.shape)\n print(\"weights_hidden_to_output.shape=\", self.weights_hidden_to_output.shape)\n\n features = np.hstack((np.ones((features.shape[0], 1)), features))\n n_records = features.shape[0]\n\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n final_outputs, hidden_outputs = self.forward_pass_train(X)\n # Implement the backproagation function below\n delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,\n delta_weights_i_h, delta_weights_h_o)\n\n self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n def forward_pass_train(self, X):\n ''' Implement forward pass here\n\n Arguments\n ---------\n X: features batch\n '''\n ### Forward pass ###\n\n # : Hidden layer\n # signals into hidden layer\n hidden_inputs = np.dot(X, self.weights_input_to_hidden)\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # : Output layer -\n # signals into final output layer\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)\n final_outputs = final_inputs # signals from final output layer\n\n return final_outputs, hidden_outputs\n\n def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n ''' Implement backpropagation\n\n Arguments\n ---------\n final_outputs: output from forward pass\n y: target (i.e. label) batch\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n '''\n ## # this code is from the excercises\n ## output_error_term = error * sigmoid_prime(h)\n ## hidden_error = np.dot(output_error_term, weights_hidden_output)\n ## hidden_error_term = hidden_error * sigmoid_prime(hidden_input)\n #\n # >> output_error_term ()\n # >> hidden_error_term (2,)\n # >> correction_i_h: (6, 2)\n # >> correction_h_o: (2,)\n\n ### Backward pass ###\n\n # : Output error\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n\n # in this line only, I had a look at this solution: https://github.com/absalomhr/Predicting-Bike-Sharing-Patterns/blob/master/my_answers.py\n hidden_error = np.dot(self.weights_hidden_to_output, error)\n\n # : Backpropagated error terms -\n # f_prime_final = (final_outputs * (1 - final_outputs))\n output_error_term = error\n\n # : Calculate the hidden layer's contribution to the error\n f_prime_hidden = (hidden_outputs * (1 - hidden_outputs))\n hidden_error_term = hidden_error * f_prime_hidden\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:, None]\n return delta_weights_i_h, delta_weights_h_o\n\n def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n ''' Update weights on gradient descent step\n\n Arguments\n ---------\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n n_records: number of records\n\n '''\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step\n\n def run(self, features):\n ''' Run a forward pass through the network with input features\n\n Arguments\n ---------\n features: 1D array of feature values\n '''\n\n # #### Implement the forward pass here ####\n # exactly the same as the forward_pass_train()\n final_outputs, hidden_inputs = self.forward_pass_train(features)\n return final_outputs\n\n\n#########################################################\n# Set your hyperparameters here\n##########################################################\niterations = 1000\nlearning_rate = 0.5\nhidden_nodes = 25\noutput_nodes = 1\n" ]
[ [ "numpy.random.normal", "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.maximum" ] ]
wizofe/urus-mri-recon
[ "eab8e48dca31d2b936ce69ccc251ec5a4a10facc" ]
[ "main.py" ]
[ "# models\nimport shutil\nimport sys\n\nfrom tensorboardX import SummaryWriter\nfrom torch import optim\n\nfrom utils.myloss import *\nimport torch.nn.modules.loss as Loss\nfrom unet import UNet\nfrom utils import *\nfrom utils.cmplxBatchNorm import magnitude, normalizeComplexBatch_byMagnitudeOnly\nfrom utils.dataset import *\n\n#\n# set seed points\nseed_num = 888\n\ntorch.manual_seed(seed_num)\ntorch.cuda.manual_seed_all(seed_num)\nrandom.seed(seed_num)\nnp.random.seed(seed_num)\nparams = Parameters()\n\n####################################\n#\n# Create Data Generators\n#\n####################################\n\ntraining_DG, validation_DG, params = getDatasetGenerators(params)\n\n####################################\n#\n# Create Model\n#\n####################################\n\nnet = UNet(params.n_channels, 1)\n\nif params.multi_GPU:\n net = torch.nn.DataParallel(net, device_ids=params.device_ids).cuda()\nelse:\n net.to(params.device)\n\n####################################\n#\n# initializations\n#\n####################################\n\nif not os.path.exists(params.model_save_dir):\n os.makedirs(params.model_save_dir)\n\nif not os.path.exists(params.tensorboard_dir):\n os.makedirs(params.tensorboard_dir)\n\nwriter = SummaryWriter(params.tensorboard_dir)\n\n\ndef train(net):\n ###########################################\n #\n # INITIALIZATIONS\n #\n ############################################\n optimizer = optim.Adam(net.parameters(), lr=params.args.lr)\n # optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9)\n\n LOSS = list()\n SSIMLOSS = list()\n ssimCriterion = SSIM()\n mseCriterion = Loss.MSELoss()\n kscCriterion = KspaceConsistency()\n tvCriterion = TotalVariations()\n\n lossCrit = mseCriterion\n vld_MSE_LOSS = list()\n vld_SSIM_LOSS = list()\n\n vld_MSE_LOSS_in = list()\n vld_SSIM_LOSS_in = list()\n vi = 0\n i = 0\n bt = 0\n\n ###########################################\n #\n # LOAD LATEST (or SPECIFIC) MODEL\n #\n ############################################\n models = os.listdir(params.model_save_dir);\n s_epoch = 0\n\n def load_model(epoch):\n print('loading model at epoch ' + str(epoch))\n net.load_state_dict(torch.load(params.model_save_dir + models[0][0:11] + str(epoch) + '.pth')['state_dict'])\n optimizer.load_state_dict(\n torch.load(params.model_save_dir + models[0][0:11] + str(epoch) + '.pth')['optimizer'])\n LOSS = torch.load(params.model_save_dir + models[0][0:11] + str(epoch) + '.pth')['loss']\n\n print(len(models))\n if len(models) > 0:\n if s_epoch == -1:\n s_epoch = max([int(epo[11:-4]) for epo in models[:]])\n print(\"Loading model ...\")\n load_model(s_epoch)\n s_epoch = s_epoch - 1\n print(\"Model loaded !\")\n\n for epoch in range(s_epoch, params.epochs):\n print('epoch {}/{}...'.format(epoch + 1, params.epochs))\n\n adjust_learning_rate(optimizer, epoch)\n\n ###########################################\n #\n # Training\n #\n ############################################\n l = 0\n itt = 0\n TAG = 'Training'\n MAX = list()\n if not params.Validation_Only:\n for local_batch, local_labels, sliceID, orig_size, usr in training_DG:\n\n X = Variable(torch.FloatTensor(local_batch.float())).to(params.device)\n y = Variable(torch.FloatTensor(local_labels.float())).to(params.device)\n\n input_mag = normalizeBatch_torch(get_magnitude(X))\n LOST_mag = normalizeBatch_torch(y[:, :, :, :, 0])\n\n if params.complex_net:\n X = normalizeComplexBatch_byMagnitudeOnly(X, False)\n y = normalizeComplexBatch_byMagnitudeOnly(y, True)\n else:\n X = get_magnitude(X)\n y = y[:, :, :, :, 0]\n X = normalizeBatch_torch(X)\n y = normalizeBatch_torch(y)\n\n y_pred = net(X)\n\n if params.complex_net:\n loss = lossCrit(magnitude(y_pred).squeeze(1), y[:, :, :, :, 0].squeeze(1))\n else:\n loss = lossCrit(y_pred, y)\n simloss = ssimCriterion(y_pred, y)\n\n if params.complex_net:\n loss = lossCrit(magnitude(y_pred).squeeze(1), y[:, :, :, :, 0].squeeze(1))\n simloss = ssimCriterion(magnitude(y_pred), y[:, :, :, :, 0])\n else:\n loss = lossCrit(y_pred, y)\n simloss = ssimCriterion(y_pred, y)\n\n LOSS.append(loss.cpu().data.numpy())\n SSIMLOSS.append(simloss.cpu().data.numpy())\n\n l += loss.data[0]\n\n optimizer.zero_grad()\n loss.backward()\n i += 1\n optimizer.step()\n\n inloss = mseCriterion(input_mag, LOST_mag)\n\n print('Epoch: {0} - {1:.3f}%'.format(epoch + 1, 100 * (itt * params.batch_size) / len(\n training_DG.dataset.input_IDs))\n + ' \\tIter: ' + str(i)\n + '\\tLoss: {0:.6f}'.format(loss.data[0])\n + '\\tInputLoss: {0:.6f}'.format(inloss.data[0]))\n itt += 1\n\n if itt % 100 == 0:\n is_best = 0\n save_checkpoint({\n 'epoch': epoch + 1,\n 'loss': LOSS,\n 'arch': 'recoNet_Model1',\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, is_best, filename=params.model_save_dir + 'MODEL_EPOCH{}.pth'.format(epoch + 1))\n\n print('Model Saved!')\n avg_loss = params.batch_size * l / len(training_DG.dataset.input_IDs)\n print('Total Loss : {0:.6f} \\t Avg. Loss {1:.6f}'.format(l, avg_loss))\n\n else:\n load_model(epoch + 1)\n #####################################\n #\n # Validation\n #\n #####################################\n\n vl = 0\n vitt = 0\n vld_mse = 0\n vld_ssim = 0\n vld_psnr = 0\n vld_mse_in = 0\n vld_ssim_in = 0\n vld_psnr_in = 0\n\n TAG = 'Validation'\n with torch.no_grad():\n for local_batch, local_labels, sliceID, orig_size, usr in validation_DG:\n X = Variable(torch.FloatTensor(local_batch.float())).to(params.device)\n y = Variable(torch.FloatTensor(local_labels.float())).to(params.device)\n\n input_mag = normalizeBatch_torch(get_magnitude(X))\n LOST_mag = normalizeBatch_torch(y[:, :, :, :, 0])\n\n if params.complex_net:\n X = normalizeComplexBatch_byMagnitudeOnly(X, False)\n y = normalizeComplexBatch_byMagnitudeOnly(y, True)\n else:\n X = get_magnitude(X)\n y = y[:, :, :, :, 0]\n X = normalizeBatch_torch(X)\n y = normalizeBatch_torch(y)\n\n y_pred = net(X)\n\n if params.complex_net:\n mseloss = mseCriterion(magnitude(y_pred).squeeze(1), y[:, :, :, :, 0].squeeze(1))\n ssimloss = ssimCriterion(magnitude(y_pred), y[:, :, :, :, 0])\n\n else:\n mseloss = mseCriterion(y_pred, y)\n ssimloss = ssimCriterion(y_pred, y)\n\n\n mseloss_in = mseCriterion(input_mag, LOST_mag)\n ssimloss_in = ssimCriterion(input_mag, LOST_mag)\n\n\n vld_MSE_LOSS.append(mseloss.cpu().data.numpy())\n vld_SSIM_LOSS.append(ssimloss.cpu().data.numpy())\n\n vld_MSE_LOSS_in.append(mseloss_in.cpu().data.numpy())\n vld_SSIM_LOSS_in.append(ssimloss_in.cpu().data.numpy())\n\n vld_mse += mseloss.data[0]\n vld_ssim += ssimloss.data[0]\n\n vld_mse_in += mseloss_in.data[0]\n vld_ssim_in += ssimloss_in.data[0]\n\n vi += 1\n vitt += 1\n\n if params.complex_net:\n inloss = mseCriterion(magnitude(X).squeeze(1), y[:, :, :, :, 0].squeeze(1))\n else:\n inloss = mseCriterion(X, y)\n\n print('Epoch: {0} - {1:.3f}%'.format(epoch + 1, 100 * (vitt * params.batch_size) / len(\n validation_DG.dataset.input_IDs))\n + ' \\tIter: ' + str(vi)\n + '\\tSME: {0:.6f}'.format(mseloss.data[0])\n + '\\tSSIM: {0:.6f}'.format(ssimloss.data[0])\n + '\\tInputLoss: {0:.6f}'.format(inloss.data[0]))\n\n avg_factor = params.batch_size / len(validation_DG.dataset.input_IDs)\n print('Avg. MSE : {0:.6f}'.format(vld_mse * avg_factor)\n + '\\tAvg. SSIM : {0:.6f}'.format(vld_ssim * avg_factor)\n + '\\tAvg. PSNR : {0:.6f}'.format(vld_psnr * avg_factor)\n + 'Avg. Input_MSE : {0:.6f}'.format(vld_mse_in * avg_factor)\n + '\\tAvg. Input_SSIM : {0:.6f}'.format(vld_ssim_in * avg_factor)\n + '\\tAvg. Input_PSNR : {0:.6f}'.format(vld_psnr_in * avg_factor)\n )\n\n writer.close()\n\n\n\ndef get_magnitude(input):\n return (input[:, :, :, :, 0] ** 2 + input[:, :, :, :, 1] ** 2) ** 0.5\n\ndef normalizeBatch_torch(p, dims=[2, 3]):\n ''' normalize each slice alone'''\n if torch.std(p) == 0:\n raise ZeroDivisionError\n shape = p.shape\n if p.ndimension() == 4:\n pv = p.reshape([shape[0], shape[1], shape[2] * shape[3]])\n else:\n raise NotImplementedError\n\n mean = pv.mean(dim=2, keepdim=True).unsqueeze(p.ndimension() - 1)\n std = pv.std(dim=2, keepdim=True).unsqueeze(p.ndimension() - 1)\n\n return (p - mean) / std\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 20 epochs\"\"\"\n lr = params.args.lr * (0.1 ** (epoch // 20))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ntry:\n train(net)\n\nexcept KeyboardInterrupt:\n print('Interrupted')\n torch.save(net.state_dict(), 'MODEL_INTERRUPTED.pth')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n" ]
[ [ "torch.nn.modules.loss.MSELoss" ] ]
feizy/deeplab
[ "64bb77dd256802306d86841889bba67820f226de" ]
[ "tools/train_voc.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2018/9/26 15:48\n# @Author : HLin\n# @Email : [email protected]\n# @File : train_voc.py\n# @Software: PyCharm\n\nimport os\nimport pprint\nimport logging\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport numpy as np\nfrom math import ceil\nfrom distutils.version import LooseVersion\nfrom tensorboardX import SummaryWriter\n\nimport sys\nsys.path.append(os.path.abspath('..'))\nfrom graphs.models.sync_batchnorm.replicate import patch_replication_callback\nfrom utils.data_utils import calculate_weigths_labels\nfrom utils import Eval\nfrom graphs.models.decoder import DeepLab\nfrom datasets.NYU_Dataset import Nyud2_DataLoader\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\nclass Trainer():\n def __init__(self, args, cuda=None):\n self.args = args\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = self.args.gpu\n self.cuda = cuda and torch.cuda.is_available()\n self.device = torch.device('cuda' if self.cuda else 'cpu')\n\n self.current_MIoU = 0\n self.best_MIou = 0\n self.current_epoch = 0\n self.current_iter = 0\n\n # set TensorboardX\n self.writer = SummaryWriter()\n\n # Metric definition\n self.Eval = Eval(self.args.num_classes)\n\n # loss definition\n if self.args.loss_weight_file is not None:\n classes_weights_path = os.path.join(self.args.loss_weights_dir, self.args.loss_weight_file)\n print(classes_weights_path)\n if not os.path.isfile(classes_weights_path):\n logger.info('calculating class weights...')\n calculate_weigths_labels(self.args)\n class_weights = np.load(classes_weights_path)\n pprint.pprint(class_weights)\n weight = torch.from_numpy(class_weights.astype(np.float32))\n logger.info('loading class weights successfully!')\n else:\n weight = None\n\n self.loss = nn.CrossEntropyLoss(weight=weight, ignore_index=255)\n self.loss.to(self.device)\n\n # model\n self.model = DeepLab(output_stride=self.args.output_stride,\n class_num=self.args.num_classes,\n pretrained=self.args.imagenet_pretrained and self.args.pretrained_ckpt_file==None,\n bn_momentum=self.args.bn_momentum,\n freeze_bn=self.args.freeze_bn)\n self.model = nn.DataParallel(self.model, device_ids=range(ceil(len(self.args.gpu)/2)))\n patch_replication_callback(self.model)\n self.model.to(self.device)\n\n self.optimizer = torch.optim.SGD(\n params=[\n {\n \"params\": self.get_params(self.model.module, key=\"1x\"),\n \"lr\": self.args.lr,\n },\n {\n \"params\": self.get_params(self.model.module, key=\"10x\"),\n \"lr\": 10 * self.args.lr,\n },\n ],\n momentum=self.args.momentum,\n # dampening=self.args.dampening,\n weight_decay=self.args.weight_decay,\n # nesterov=self.args.nesterov\n )\n # dataloader\n self.dataloader = Nyud2_DataLoader(self.args)\n self.epoch_num = ceil(self.args.iter_max / self.dataloader.train_iterations)\n\n def main(self):\n # display args details\n logger.info(\"Global configuration as follows:\")\n for key, val in vars(self.args).items():\n logger.info(\"{:16} {}\".format(key, val))\n\n # choose cuda\n if self.cuda:\n # torch.cuda.set_device(4)\n current_device = torch.cuda.current_device()\n logger.info(\"This model will run on {}\".format(torch.cuda.get_device_name(current_device)))\n else:\n logger.info(\"This model will run on CPU\")\n\n # load pretrained checkpoint\n if self.args.pretrained_ckpt_file is not None:\n self.load_checkpoint(self.args.pretrained_ckpt_file)\n\n # train\n self.train()\n\n self.writer.close()\n\n def train(self):\n for epoch in tqdm(range(self.current_epoch, self.epoch_num),\n desc=\"Total {} epochs\".format(self.epoch_num)):\n self.current_epoch = epoch\n # self.scheduler.step(epoch)\n self.train_one_epoch()\n\n # validate\n if self.args.validation == True:\n PA, MPA, MIoU, FWIoU = self.validate()\n self.writer.add_scalar('PA', PA, self.current_epoch)\n self.writer.add_scalar('MPA', MPA, self.current_epoch)\n self.writer.add_scalar('MIoU', MIoU, self.current_epoch)\n self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)\n\n self.current_MIoU = MIoU\n is_best = MIoU > self.best_MIou\n if is_best:\n self.best_MIou = MIoU\n self.save_checkpoint(is_best, train_id+'best.pth')\n\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou': self.current_MIoU\n }\n logger.info(\"=>saving the final checkpoint...\")\n torch.save(state, train_id + 'final.pth')\n\n def train_one_epoch(self):\n tqdm_epoch = tqdm(self.dataloader.train_loader, total=self.dataloader.train_iterations,\n desc=\"Train Epoch-{}-\".format(self.current_epoch+1))\n logger.info(\"Training one epoch...\")\n self.Eval.reset()\n # Set the model to be in training mode (for batchnorm and dropout)\n\n train_loss = []\n self.model.train()\n # Initialize your average meters\n\n batch_idx = 0\n for x, y, depth in tqdm_epoch:\n self.poly_lr_scheduler(\n optimizer=self.optimizer,\n init_lr=self.args.lr,\n iter=self.current_iter,\n max_iter=self.args.iter_max,\n power=self.args.poly_power,\n )\n if self.current_iter >= self.args.iter_max:\n logger.info(\"iteration arrive {}!\".format(self.args.iter_max))\n break\n self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0][\"lr\"], self.current_iter)\n self.writer.add_scalar('learning_rate_10x', self.optimizer.param_groups[1][\"lr\"], self.current_iter)\n\n # y.to(torch.long)\n if self.cuda:\n x, y, depth = x.to(self.device), y.to(device=self.device, dtype=torch.long), depth.to(self.device)\n\n self.optimizer.zero_grad()\n\n # model\n pred = self.model(x,depth)\n # logger.info(\"pre:{}\".format(pred.data.cpu().numpy()))\n y = torch.squeeze(y, 1)\n # logger.info(\"y:{}\".format(y.cpu().numpy()))\n # pred_s = F.softmax(pred, dim=1)\n # loss\n cur_loss = self.loss(pred, y)\n\n # optimizer\n cur_loss.backward()\n self.optimizer.step()\n\n train_loss.append(cur_loss.item())\n\n if batch_idx % 50 == 0:\n logger.info(\"The train loss of epoch{}-batch-{}:{}\".format(self.current_epoch,\n batch_idx, cur_loss.item()))\n batch_idx += 1\n\n self.current_iter += 1\n\n # print(cur_loss)\n if np.isnan(float(cur_loss.item())):\n raise ValueError('Loss is nan during training...')\n\n pred = pred.data.cpu().numpy()\n label = y.cpu().numpy()\n argpred = np.argmax(pred, axis=1)\n self.Eval.add_batch(label, argpred)\n\n PA = self.Eval.Pixel_Accuracy()\n MPA = self.Eval.Mean_Pixel_Accuracy()\n MIoU = self.Eval.Mean_Intersection_over_Union()\n FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()\n\n logger.info('Epoch:{}, train PA1:{}, MPA1:{}, MIoU1:{}, FWIoU1:{}'.format(self.current_epoch, PA, MPA,\n MIoU, FWIoU))\n\n\n tr_loss = sum(train_loss)/len(train_loss)\n self.writer.add_scalar('train_loss', tr_loss, self.current_epoch)\n tqdm.write(\"The average loss of train epoch-{}-:{}\".format(self.current_epoch, tr_loss))\n tqdm_epoch.close()\n\n def validate(self):\n logger.info('validating one epoch...')\n self.Eval.reset()\n with torch.no_grad():\n tqdm_batch = tqdm(self.dataloader.valid_loader, total=self.dataloader.valid_iterations,\n desc=\"Val Epoch-{}-\".format(self.current_epoch + 1))\n val_loss = []\n preds = []\n lab = []\n self.model.eval()\n\n for x, y, id in tqdm_batch:\n # y.to(torch.long)\n if self.cuda:\n x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)\n\n # model\n pred = self.model(x)\n y = torch.squeeze(y, 1)\n\n cur_loss = self.loss(pred, y)\n if np.isnan(float(cur_loss.item())):\n raise ValueError('Loss is nan during validating...')\n val_loss.append(cur_loss.item())\n\n # if self.args.store_result == True and self.current_epoch == 20:\n # for i in range(len(id)):\n # result = Image.fromarray(np.asarray(argpred, dtype=np.uint8)[i], mode='P')\n # # logger.info(\"before:{}\".format(result.mode))\n # result = result.convert(\"RGB\")\n # # logger.info(\"after:{}\".format(result.mode))\n # # logger.info(\"shape:{}\".format(result.getpixel((1,1))))\n # result.save(self.args.result_filepath + id[i] + '.png')\n\n pred = pred.data.cpu().numpy()\n label = y.cpu().numpy()\n argpred = np.argmax(pred, axis=1)\n\n self.Eval.add_batch(label, argpred)\n\n PA = self.Eval.Pixel_Accuracy()\n MPA = self.Eval.Mean_Pixel_Accuracy()\n MIoU = self.Eval.Mean_Intersection_over_Union()\n FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()\n\n logger.info('Epoch:{}, validation PA1:{}, MPA1:{}, MIoU1:{}, FWIoU1:{}'.format(self.current_epoch, PA, MPA,\n MIoU, FWIoU))\n v_loss = sum(val_loss) / len(val_loss)\n logger.info(\"The average loss of val loss:{}\".format(v_loss))\n self.writer.add_scalar('val_loss', v_loss, self.current_epoch)\n\n # logger.info(score)\n tqdm_batch.close()\n\n return PA, MPA, MIoU, FWIoU\n\n def save_checkpoint(self, is_best, filename=None):\n \"\"\"\n Save checkpoint if a new best is achieved\n :param state:\n :param is_best:\n :param filepath:\n :return:\n \"\"\"\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n if is_best:\n logger.info(\"=>saving a new best checkpoint...\")\n torch.save(state, filename)\n else:\n logger.info(\"=> The MIoU of val does't improve.\")\n\n def load_checkpoint(self, filename):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n try:\n logger.info(\"Loading checkpoint '{}'\".format(filename))\n checkpoint = torch.load(filename)\n\n # self.current_epoch = checkpoint['epoch']\n # self.current_iter = checkpoint['iteration']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.best_MIou = checkpoint['best_MIou']\n\n logger.info(\"Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {},MIoU:{})\\n\"\n .format(self.args.checkpoint_dir, checkpoint['epoch'], checkpoint['iteration'],\n checkpoint['best_MIou']))\n except OSError as e:\n logger.info(\"No checkpoint exists from '{}'. Skipping...\".format(self.args.checkpoint_dir))\n logger.info(\"**First time to train**\")\n\n def get_params(self, model, key):\n # For Dilated CNN\n if key == \"1x\":\n for m in model.named_modules():\n if \"Resnet101\" in m[0]:\n for p in m[1].parameters():\n yield p\n #\n if key == \"10x\":\n for m in model.named_modules():\n if \"encoder\" in m[0] or \"decoder\" in m[0]:\n for p in m[1].parameters():\n yield p\n\n\n def poly_lr_scheduler(self, optimizer, init_lr, iter, max_iter, power):\n new_lr = init_lr * (1 - float(iter) / max_iter) ** power\n optimizer.param_groups[0][\"lr\"] = new_lr\n optimizer.param_groups[1][\"lr\"] = 10 * new_lr\n\n\n\n\n\nif __name__ == '__main__':\n assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), 'PyTorch>=0.4.0 is required'\n\n arg_parser = argparse.ArgumentParser()\n\n # Path related arguments\n arg_parser.add_argument('--data_root_path', type=str, default=\"/home/feizy/datasets/nyuv2/\",\n help=\"the root path of dataset\")\n arg_parser.add_argument('--checkpoint_dir', default=os.path.abspath('..') + \"/checkpoints/\",\n help=\"the path of ckpt file\")\n arg_parser.add_argument('--result_filepath', default=\"/home/feizy/PycharmProjects/Deeplab-v3plus/data/NYUDV2/Results/\",\n help=\"the filepath where mask store\")\n arg_parser.add_argument('--loss_weights_dir', default=\"/data/linhua/VOCdevkit/pretrained_weights/\")\n\n # Model related arguments\n arg_parser.add_argument('--backbone', default='resnet101',\n help=\"backbone of encoder\")\n arg_parser.add_argument('--output_stride', type=int, default=16, choices=[8, 16],\n help=\"choose from 8 or 16\")\n arg_parser.add_argument('--bn_momentum', type=float, default=0.1,\n help=\"batch normalization momentum\")\n arg_parser.add_argument('--imagenet_pretrained', type=str2bool, default=False,\n help=\"whether apply iamgenet pretrained weights\")\n arg_parser.add_argument('--pretrained_ckpt_file', type=str, default=None,\n help=\"whether apply pretrained checkpoint\")\n arg_parser.add_argument('--save_ckpt_file', type=str2bool, default=True,\n help=\"whether to save trained checkpoint file \")\n arg_parser.add_argument('--store_result_mask', type=str2bool, default=True,\n help=\"whether store mask after val or test\")\n arg_parser.add_argument('--loss_weight_file', type=str, default=None,\n help=\"the filename of weights for loss function\")\n arg_parser.add_argument('--validation', type=str2bool, default=True,\n help=\"whether to val after each train epoch\")\n\n # train related arguments\n arg_parser.add_argument('--gpu', type=str, default=\"0\",\n help=\" the num of gpu\")\n arg_parser.add_argument('--batch_size_per_gpu', default=2, type=int,\n help='input batch size')\n\n # dataset related arguments\n arg_parser.add_argument('--dataset', default='nyudv2', type=str,\n help='dataset choice')\n arg_parser.add_argument('--base_size', default=(640,480), type=int,\n help='crop size of image')\n arg_parser.add_argument('--crop_size', default=(640,480), type=int,\n help='base size of image')\n arg_parser.add_argument('--num_classes', default=21, type=int,\n help='num class of mask')\n arg_parser.add_argument('--data_loader_workers', default=16, type=int,\n help='num_workers of Dataloader')\n arg_parser.add_argument('--pin_memory', default=2, type=int,\n help='pin_memory of Dataloader')\n arg_parser.add_argument('--split', type=str, default='train',\n help=\"choose from train/val/test/trainval\")\n\n # optimization related arguments\n\n arg_parser.add_argument('--freeze_bn', type=str2bool, default=False,\n help=\"whether freeze BatchNormalization\")\n\n arg_parser.add_argument('--momentum', type=float, default=0.9)\n arg_parser.add_argument('--dampening', type=float, default=0)\n arg_parser.add_argument('--nesterov', type=str2bool, default=True)\n arg_parser.add_argument('--weight_decay', type=float, default=4e-5)\n\n arg_parser.add_argument('--lr', type=float, default=0.007,\n help=\"init learning rate \")\n arg_parser.add_argument('--iter_max', type=int, default=30000,\n help=\"the maxinum of iteration\")\n arg_parser.add_argument('--poly_power', type=float, default=0.9,\n help=\"poly_power\")\n arg_parser.add_argument('--batch_size', type=int, default=4)\n args = arg_parser.parse_args()\n\n\n\n train_id = str(args.backbone) + '_' + str(args.output_stride)\n train_id += '_iamgenet_pre-' + str(args.imagenet_pretrained)\n train_id += '_ckpt_file-' + str(args.pretrained_ckpt_file)\n train_id += '_loss_weight_file-' + str(args.loss_weight_file)\n train_id += '_batch_size-' + str(args.batch_size)\n train_id += '_base_size-' + str(args.base_size)\n train_id += '_crop_size-' + str(args.crop_size)\n train_id += '_split-' + str(args.split)\n train_id += '_lr-' + str(args.lr)\n train_id += '_iter_max-' + str(args.iter_max)\n\n # logger configure\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n fh = logging.FileHandler(train_id+'.txt')\n ch = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n agent = Trainer(args=args, cuda=True)\n agent.main()\n" ]
[ [ "torch.device", "torch.save", "torch.no_grad", "numpy.load", "torch.cuda.get_device_name", "torch.cuda.current_device", "torch.squeeze", "torch.cuda.is_available", "numpy.argmax", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
lvbu12/DL_sequence_models_homework
[ "6742c1ffdf2f6e35c3c88dcfd8a126ea070ae0dd" ]
[ "Week_02/Emojify/train_01.py" ]
[ "#_*_ coding:utf-8 _*_\r\nimport numpy as np\r\nfrom emo_utils import *\r\nimport emoji\r\nimport matplotlib.pyplot as plt\r\n\r\nX_train, Y_train = read_csv('data/train_emoji.csv')\r\nX_test, Y_test = read_csv('data/tesss.csv')\r\n\r\nmaxLen = len(max(X_train, key=len).split())\r\n\r\nindex = 1\r\nprint(X_train[index], label_to_emoji(Y_train[index]))\r\n\r\nY_oh_train = convert_to_one_hot(Y_train, C=5)\r\nY_oh_test = convert_to_one_hot(Y_test, C=5)\r\n\r\nindex = 50\r\nprint(Y_train[index], \"is converted into one hot\", Y_oh_train[index])\r\n\r\nword_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')\r\n\r\nword = \"cucumber\"\r\nindex = 289846\r\nprint(\"the index of\", word, \"in the vocabulary is\", word_to_index[word])\r\nprint(\"the\", str(index) + \"th word in the vocabulary is\", index_to_word[index])\r\n\r\n\r\ndef sentence_to_avg(sentence, word_to_vec_map):\r\n \"\"\"\r\n Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word\r\n and averages its value into a single vector encoding the meaning of the sentence.\r\n\r\n Arguments:\r\n sentence -- string, one training example from X\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n\r\n Returns:\r\n avg -- average vector encoding information about the sentence, numpy-array of shape (50,)\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\r\n words = [word.lower() for word in sentence.split()]\r\n\r\n # Initialize the average word vector, should have the same shape as your word vectors.\r\n avg = np.zeros((50,))\r\n\r\n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\r\n for w in words:\r\n avg += word_to_vec_map[w]\r\n avg = avg / len(words)\r\n\r\n ### END CODE HERE ###\r\n\r\n return avg\r\n\r\navg = sentence_to_avg(\"Morrocan couscous is my favorite dish\", word_to_vec_map)\r\nprint(\"avg = \", avg)\r\n\r\n\r\ndef model(X, Y, word_to_vec_map, learning_rate=0.01, num_iterations=400):\r\n \"\"\"\r\n Model to train word vector representations in numpy.\r\n\r\n Arguments:\r\n X -- input data, numpy array of sentences as strings, of shape (m, 1)\r\n Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n learning_rate -- learning_rate for the stochastic gradient descent algorithm\r\n num_iterations -- number of iterations\r\n\r\n Returns:\r\n pred -- vector of predictions, numpy-array of shape (m, 1)\r\n W -- weight matrix of the softmax layer, of shape (n_y, n_h)\r\n b -- bias of the softmax layer, of shape (n_y,)\r\n \"\"\"\r\n\r\n np.random.seed(1)\r\n\r\n # Define number of training examples\r\n m = Y.shape[0] # number of training examples\r\n n_y = 5 # number of classes\r\n n_h = 50 # dimensions of the GloVe vectors\r\n\r\n # Initialize parameters using Xavier initialization\r\n W = np.random.randn(n_y, n_h) / np.sqrt(n_h)\r\n b = np.zeros((n_y,))\r\n\r\n # Convert Y to Y_onehot with n_y classes\r\n Y_oh = convert_to_one_hot(Y, C=n_y)\r\n\r\n # Optimization loop\r\n for t in range(num_iterations): # Loop over the number of iterations\r\n for i in range(m): # Loop over the training examples\r\n\r\n ### START CODE HERE ### (≈ 4 lines of code)\r\n # Average the word vectors of the words from the i'th training example\r\n avg = sentence_to_avg(X[i], word_to_vec_map)\r\n\r\n # Forward propagate the avg through the softmax layer\r\n z = np.dot(W, avg) + b\r\n a = softmax(z)\r\n\r\n # Compute cost using the i'th training label's one hot representation and \"A\" (the output of the softmax)\r\n cost = - np.dot(Y_oh[i].T, np.log(a))\r\n ### END CODE HERE ###\r\n\r\n # Compute gradients\r\n dz = a - Y_oh[i]\r\n\r\n dW = np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))\r\n db = dz\r\n\r\n # Update parameters with Stochastic Gradient Descent\r\n W = W - learning_rate * dW\r\n b = b - learning_rate * db\r\n\r\n if t % 100 == 0:\r\n print(\"Epoch: \" + str(t) + \" --- cost = \" + str(cost))\r\n pred = predict(X, Y, W, b, word_to_vec_map)\r\n\r\n return pred, W, b\r\n\r\nprint(X_train.shape)\r\nprint(Y_train.shape)\r\nprint(np.eye(5)[Y_train.reshape(-1)].shape)\r\nprint(X_train[0])\r\nprint(type(X_train))\r\nY = np.asarray([5,0,0,5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])\r\nprint(Y.shape)\r\n\r\nX = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',\r\n 'Lets go party and drinks','Congrats on the new job','Congratulations',\r\n 'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',\r\n 'You totally deserve this prize', 'Let us go play football',\r\n 'Are you down for football this afternoon', 'Work hard play harder',\r\n 'It is suprising how people can be dumb sometimes',\r\n 'I am very disappointed','It is the best day in my life',\r\n 'I think I will end up alone','My life is so boring','Good job',\r\n 'Great so awesome'])\r\n\r\nprint(X.shape)\r\nprint(np.eye(5)[Y_train.reshape(-1)].shape)\r\nprint(type(X_train))\r\n\r\npred, W, b = model(X_train, Y_train, word_to_vec_map)\r\nprint(pred)\r\n\r\nprint(\"Training set:\")\r\npred_train = predict(X_train, Y_train, W, b, word_to_vec_map)\r\nprint('Test set:')\r\npred_test = predict(X_test, Y_test, W, b, word_to_vec_map)\r\n\r\nX_my_sentences = np.array([\"i adore you\", \"i love you\", \"funny lol\", \"lets play with a ball\", \"food is ready\", \"not feeling happy\"])\r\nY_my_labels = np.array([[0], [0], [2], [1], [4],[3]])\r\n\r\npred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)\r\nprint_predictions(X_my_sentences, pred)\r\n\r\nprint(Y_test.shape)\r\nprint(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))\r\nprint(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))\r\nplot_confusion_matrix(Y_test, pred_test)\r\n\r\n\"\"\"\r\nEmojifier-V2: Using LSTMs in Keras.\r\n\"\"\"\r\nimport numpy as np\r\nnp.random.seed(0)\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, Input, Dropout, LSTM, Activation\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.preprocessing import sequence\r\nfrom keras.initializers import glorot_uniform\r\nnp.random.seed(1)\r\n\r\n\r\ndef sentences_to_indices(X, word_to_index, max_len):\r\n \"\"\"\r\n Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.\r\n The output shape should be such that it can be given to `Embedding()` (described in Figure 4).\r\n\r\n Arguments:\r\n X -- array of sentences (strings), of shape (m, 1)\r\n word_to_index -- a dictionary containing the each word mapped to its index\r\n max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.\r\n\r\n Returns:\r\n X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)\r\n \"\"\"\r\n\r\n m = X.shape[0] # number of training examples\r\n\r\n # Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)\r\n X_indices = np.zeros((m, max_len))\r\n\r\n for i in range(m): # loop over training examples\r\n\r\n # Convert the ith training sentence in lower case and split is into words. You should get a list of words.\r\n sentence_words = [word.lower() for word in X[i].split()]\r\n\r\n # Initialize j to 0\r\n j = 0\r\n\r\n # Loop over the words of sentence_words\r\n for w in sentence_words:\r\n # Set the (i,j)th entry of X_indices to the index of the correct word.\r\n X_indices[i, j] = word_to_index[w]\r\n # Increment j to j + 1\r\n j = j + 1\r\n\r\n return X_indices\r\n\r\nX1 = np.array([\"funny lol\", \"lets play baseball\", \"food is ready for you\"])\r\nX1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)\r\nprint(\"X1 =\", X1)\r\nprint(\"X1_indices =\", X1_indices)\r\n\r\n\r\ndef pretrained_embedding_layer(word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.\r\n\r\n Arguments:\r\n word_to_vec_map -- dictionary mapping words to their GloVe vector representation.\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n embedding_layer -- pretrained layer Keras instance\r\n \"\"\"\r\n\r\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\r\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\r\n\r\n # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\r\n emb_matrix = np.zeros((vocab_len, emb_dim))\r\n\r\n # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\r\n for word, index in word_to_index.items():\r\n emb_matrix[index, :] = word_to_vec_map[word]\r\n\r\n # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.\r\n embedding_layer = Embedding(input_dim=vocab_len, output_dim=emb_dim, trainable=False)\r\n\r\n # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\r\n embedding_layer.build((None,))\r\n\r\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\r\n embedding_layer.set_weights([emb_matrix])\r\n\r\n return embedding_layer\r\n\r\nembedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\nprint(\"weights[0][1][3] =\", embedding_layer.get_weights()[0][1][3])\r\n\r\n\r\ndef Emojify_V2(input_shape, word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Function creating the Emojify-v2 model's graph.\r\n\r\n Arguments:\r\n input_shape -- shape of the input, usually (max_len,)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n model -- a model instance in Keras\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings = embedding_layer(sentence_indices)\r\n\r\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a batch of sequences.\r\n X = LSTM(units=128, return_sequences=True)(embeddings)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(rate=0.5)(X)\r\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\r\n X = LSTM(units=128, return_sequences=False)(X)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(rate=0.5)(X)\r\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\r\n X = Dense(units=5)(X)\r\n # Add a softmax activation\r\n X = Activation('softmax')(X)\r\n\r\n # Create Model instance which converts sentence_indices into X.\r\n model = Model(inputs=sentence_indices, outputs=X)\r\n\r\n ### END CODE HERE ###\r\n\r\n return model\r\n\r\nmodel = Emojify_V2((maxLen,), word_to_vec_map, word_to_index)\r\nmodel.summary()\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nX_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)\r\nY_train_oh = convert_to_one_hot(Y_train, C = 5)\r\n\r\nmodel.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)\r\n\r\nX_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)\r\nY_test_oh = convert_to_one_hot(Y_test, C = 5)\r\nloss, acc = model.evaluate(X_test_indices, Y_test_oh)\r\nprint()\r\nprint(\"Test accuracy = \", acc)\r\n\r\n# This code allows you to see the mislabelled examples\r\nC = 5\r\ny_test_oh = np.eye(C)[Y_test.reshape(-1)]\r\nX_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)\r\npred = model.predict(X_test_indices)\r\nfor i in range(len(X_test)):\r\n x = X_test_indices\r\n num = np.argmax(pred[i])\r\n if(num != Y_test[i]):\r\n print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())\r\n\r\n# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.\r\nx_test = np.array(['not feeling happy'])\r\nX_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)\r\nprint(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))\r\n\r\n\r\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.asarray", "numpy.zeros", "numpy.log", "numpy.random.seed", "numpy.random.randn", "numpy.eye", "numpy.argmax", "numpy.sqrt" ] ]
OnionMoeCat/Project-Behavioral-Cloning
[ "42a7e66e576e71736e942ce6e6ffad83244fa815" ]
[ "drive.py" ]
[ "import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\n\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\n\nfrom keras.models import load_model\nimport h5py\nfrom keras import __version__ as keras_version\n\nimport utils\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\n\nclass SimplePIController:\n def __init__(self, Kp, Ki):\n self.Kp = Kp\n self.Ki = Ki\n self.set_point = 0.\n self.error = 0.\n self.integral = 0.\n\n def set_desired(self, desired):\n self.set_point = desired\n\n def update(self, measurement):\n # proportional error\n self.error = self.set_point - measurement\n\n # integral error\n self.integral += self.error\n\n return self.Kp * self.error + self.Ki * self.integral\n\n\ncontroller = SimplePIController(0.1, 0.002)\nset_speed = 9\ncontroller.set_desired(set_speed)\n\n\[email protected]('telemetry')\ndef telemetry(sid, data):\n if data:\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n # preprocess the image \n image_array = utils.preprocess(image_array) # apply the preprocessing\n steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))\n\n throttle = controller.update(float(speed))\n\n send_control(steering_angle, throttle)\n\n # save frame\n if args.image_folder != '':\n timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n image_filename = os.path.join(args.image_folder, timestamp)\n image.save('{}.jpg'.format(image_filename))\n else:\n # NOTE: DON'T EDIT THIS.\n sio.emit('manual', data={}, skip_sid=True)\n\n\[email protected]('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n print(\"sending control\", steering_angle, \":\", throttle)\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n parser.add_argument(\n 'image_folder',\n type=str,\n nargs='?',\n default='',\n help='Path to image folder. This is where the images from the run will be saved.'\n )\n args = parser.parse_args()\n\n # check that model Keras version is same as local Keras version\n f = h5py.File(args.model, mode='r')\n model_version = f.attrs.get('keras_version')\n keras_version = str(keras_version).encode('utf8')\n\n if model_version != keras_version:\n print('You are using Keras version ', keras_version,\n ', but the model was built using ', model_version)\n\n model = load_model(args.model)\n\n if args.image_folder != '':\n print(\"Creating image folder at {}\".format(args.image_folder))\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n else:\n shutil.rmtree(args.image_folder)\n os.makedirs(args.image_folder)\n print(\"RECORDING THIS RUN ...\")\n else:\n print(\"NOT RECORDING THIS RUN ...\")\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n" ]
[ [ "numpy.asarray" ] ]
kenliufang/showandtell
[ "76c9adab967e58c652ab5cdf8de447d2b6e775af" ]
[ "im2txt/im2txt/inference_client.py" ]
[ "r\"\"\"Generate captions for images using default beam search parameters.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom concurrent import futures\nimport time\n\n\nimport math\nimport os\n\n\nimport tensorflow as tf\n\nfrom im2txt import server_pb2\n\nimport grpc\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"input_file\", \"\",\n \"input file\"\n \"input\")\n#\n\ndef run():\n channel = grpc.insecure_channel('localhost:50051')\n stub = server_pb2.ShowAndTellServiceStub(channel)\n with tf.gfile.GFile(FLAGS.input_file, \"r\") as f:\n image = f.read()\n response = stub.ShowAndTell(\n server_pb2.ShowAndTellRequest(image_data=image))\n for caption in response.captions:\n print(\"%s (p=%f)\" % (caption.caption, caption.score))\n #print(\"Greeter client received: \" + response.message)\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "tensorflow.flags.DEFINE_string", "tensorflow.gfile.GFile" ] ]
certik/pandas
[ "758ca05e2eb04532b5d78331ba87c291038e2c61" ]
[ "pandas/tests/test_tseries.py" ]
[ "# -*- coding: utf-8 -*-\nimport nose\nfrom numpy import nan\nimport numpy as np\nfrom pandas import Index, isnull, Timestamp\nfrom pandas.util.testing import assert_almost_equal\nimport pandas.util.testing as tm\nfrom pandas.compat import range, lrange, zip\nimport pandas.lib as lib\nimport pandas._period as period\nimport pandas.algos as algos\nfrom pandas.tseries.holiday import Holiday, SA, next_monday\nfrom pandas import DateOffset\n\n\nclass TestTseriesUtil(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def test_combineFunc(self):\n pass\n\n def test_reindex(self):\n pass\n\n def test_isnull(self):\n pass\n\n def test_groupby(self):\n pass\n\n def test_groupby_withnull(self):\n pass\n\n def test_backfill(self):\n old = Index([1, 5, 10])\n new = Index(lrange(12))\n\n filler = algos.backfill_int64(old.values, new.values)\n\n expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]\n self.assert_numpy_array_equal(filler, expect_filler)\n\n # corner case\n old = Index([1, 4])\n new = Index(lrange(5, 10))\n filler = algos.backfill_int64(old.values, new.values)\n\n expect_filler = [-1, -1, -1, -1, -1]\n self.assert_numpy_array_equal(filler, expect_filler)\n\n def test_pad(self):\n old = Index([1, 5, 10])\n new = Index(lrange(12))\n\n filler = algos.pad_int64(old.values, new.values)\n\n expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]\n self.assert_numpy_array_equal(filler, expect_filler)\n\n # corner case\n old = Index([5, 10])\n new = Index(lrange(5))\n filler = algos.pad_int64(old.values, new.values)\n expect_filler = [-1, -1, -1, -1, -1]\n self.assert_numpy_array_equal(filler, expect_filler)\n\n\ndef test_left_join_indexer_unique():\n a = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n b = np.array([2, 2, 3, 4, 4], dtype=np.int64)\n\n result = algos.left_join_indexer_unique_int64(b, a)\n expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n\ndef test_left_outer_join_bug():\n left = np.array([0, 1, 0, 1, 1, 2, 3, 1, 0, 2, 1, 2, 0, 1, 1, 2, 3, 2, 3,\n 2, 1, 1, 3, 0, 3, 2, 3, 0, 0, 2, 3, 2, 0, 3, 1, 3, 0, 1,\n 3, 0, 0, 1, 0, 3, 1, 0, 1, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0,\n 3, 1, 2, 0, 0, 3, 1, 3, 2, 2, 0, 1, 3, 0, 2, 3, 2, 3, 3,\n 2, 3, 3, 1, 3, 2, 0, 0, 3, 1, 1, 1, 0, 2, 3, 3, 1, 2, 0,\n 3, 1, 2, 0, 2], dtype=np.int64)\n\n right = np.array([3, 1], dtype=np.int64)\n max_groups = 4\n\n lidx, ridx = algos.left_outer_join(left, right, max_groups, sort=False)\n\n exp_lidx = np.arange(len(left))\n exp_ridx = -np.ones(len(left))\n exp_ridx[left == 1] = 1\n exp_ridx[left == 3] = 0\n\n assert(np.array_equal(lidx, exp_lidx))\n assert(np.array_equal(ridx, exp_ridx))\n\n\ndef test_inner_join_indexer():\n a = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n b = np.array([0, 3, 5, 7, 9], dtype=np.int64)\n\n index, ares, bres = algos.inner_join_indexer_int64(a, b)\n\n index_exp = np.array([3, 5], dtype=np.int64)\n assert_almost_equal(index, index_exp)\n\n aexp = np.array([2, 4])\n bexp = np.array([1, 2])\n assert_almost_equal(ares, aexp)\n assert_almost_equal(bres, bexp)\n\n a = np.array([5], dtype=np.int64)\n b = np.array([5], dtype=np.int64)\n\n index, ares, bres = algos.inner_join_indexer_int64(a, b)\n assert_almost_equal(index, [5])\n assert_almost_equal(ares, [0])\n assert_almost_equal(bres, [0])\n\n\ndef test_outer_join_indexer():\n a = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n b = np.array([0, 3, 5, 7, 9], dtype=np.int64)\n\n index, ares, bres = algos.outer_join_indexer_int64(a, b)\n\n index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)\n assert_almost_equal(index, index_exp)\n\n aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)\n bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])\n assert_almost_equal(ares, aexp)\n assert_almost_equal(bres, bexp)\n\n a = np.array([5], dtype=np.int64)\n b = np.array([5], dtype=np.int64)\n\n index, ares, bres = algos.outer_join_indexer_int64(a, b)\n assert_almost_equal(index, [5])\n assert_almost_equal(ares, [0])\n assert_almost_equal(bres, [0])\n\n\ndef test_left_join_indexer():\n a = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n b = np.array([0, 3, 5, 7, 9], dtype=np.int64)\n\n index, ares, bres = algos.left_join_indexer_int64(a, b)\n\n assert_almost_equal(index, a)\n\n aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)\n bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)\n assert_almost_equal(ares, aexp)\n assert_almost_equal(bres, bexp)\n\n a = np.array([5], dtype=np.int64)\n b = np.array([5], dtype=np.int64)\n\n index, ares, bres = algos.left_join_indexer_int64(a, b)\n assert_almost_equal(index, [5])\n assert_almost_equal(ares, [0])\n assert_almost_equal(bres, [0])\n\n\ndef test_left_join_indexer2():\n idx = Index([1, 1, 2, 5])\n idx2 = Index([1, 2, 5, 7, 9])\n\n res, lidx, ridx = algos.left_join_indexer_int64(idx2.values, idx.values)\n\n exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)\n assert_almost_equal(res, exp_res)\n\n exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)\n assert_almost_equal(lidx, exp_lidx)\n\n exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)\n assert_almost_equal(ridx, exp_ridx)\n\n\ndef test_outer_join_indexer2():\n idx = Index([1, 1, 2, 5])\n idx2 = Index([1, 2, 5, 7, 9])\n\n res, lidx, ridx = algos.outer_join_indexer_int64(idx2.values, idx.values)\n\n exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)\n assert_almost_equal(res, exp_res)\n\n exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)\n assert_almost_equal(lidx, exp_lidx)\n\n exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)\n assert_almost_equal(ridx, exp_ridx)\n\n\ndef test_inner_join_indexer2():\n idx = Index([1, 1, 2, 5])\n idx2 = Index([1, 2, 5, 7, 9])\n\n res, lidx, ridx = algos.inner_join_indexer_int64(idx2.values, idx.values)\n\n exp_res = np.array([1, 1, 2, 5], dtype=np.int64)\n assert_almost_equal(res, exp_res)\n\n exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)\n assert_almost_equal(lidx, exp_lidx)\n\n exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)\n assert_almost_equal(ridx, exp_ridx)\n\n\ndef test_is_lexsorted():\n failure = [\n np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3,\n 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,\n 15, 14,\n 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,\n 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,\n 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,\n 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,\n 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,\n 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,\n 4, 3, 2, 1, 0])]\n\n assert(not algos.is_lexsorted(failure))\n\n# def test_get_group_index():\n# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64)\n# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64)\n# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64)\n\n# result = lib.get_group_index([a, b], (3, 4))\n\n# assert(np.array_equal(result, expected))\n\n\ndef test_groupsort_indexer():\n a = np.random.randint(0, 1000, 100).astype(np.int64)\n b = np.random.randint(0, 1000, 100).astype(np.int64)\n\n result = algos.groupsort_indexer(a, 1000)[0]\n\n # need to use a stable sort\n expected = np.argsort(a, kind='mergesort')\n assert(np.array_equal(result, expected))\n\n # compare with lexsort\n key = a * 1000 + b\n result = algos.groupsort_indexer(key, 1000000)[0]\n expected = np.lexsort((b, a))\n assert(np.array_equal(result, expected))\n\n\ndef test_ensure_platform_int():\n arr = np.arange(100)\n\n result = algos.ensure_platform_int(arr)\n assert(result is arr)\n\n\ndef test_duplicated_with_nas():\n keys = np.array([0, 1, nan, 0, 2, nan], dtype=object)\n\n result = lib.duplicated(keys)\n expected = [False, False, False, True, False, True]\n assert(np.array_equal(result, expected))\n\n result = lib.duplicated(keys, take_last=True)\n expected = [True, False, True, False, False, False]\n assert(np.array_equal(result, expected))\n\n keys = np.empty(8, dtype=object)\n for i, t in enumerate(zip([0, 0, nan, nan] * 2, [0, nan, 0, nan] * 2)):\n keys[i] = t\n\n result = lib.duplicated(keys)\n falses = [False] * 4\n trues = [True] * 4\n expected = falses + trues\n assert(np.array_equal(result, expected))\n\n result = lib.duplicated(keys, take_last=True)\n expected = trues + falses\n assert(np.array_equal(result, expected))\n\n\ndef test_maybe_booleans_to_slice():\n arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)\n result = lib.maybe_booleans_to_slice(arr)\n assert(result.dtype == np.bool_)\n\n result = lib.maybe_booleans_to_slice(arr[:0])\n assert(result == slice(0, 0))\n\n\ndef test_convert_objects():\n arr = np.array(['a', 'b', nan, nan, 'd', 'e', 'f'], dtype='O')\n result = lib.maybe_convert_objects(arr)\n assert(result.dtype == np.object_)\n\n\ndef test_convert_infs():\n arr = np.array(['inf', 'inf', 'inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n assert(result.dtype == np.float64)\n\n arr = np.array(['-inf', '-inf', '-inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n assert(result.dtype == np.float64)\n\n\ndef test_convert_objects_ints():\n # test that we can detect many kinds of integers\n dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n\n for dtype_str in dtypes:\n arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')\n assert(arr[0].dtype == np.dtype(dtype_str))\n result = lib.maybe_convert_objects(arr)\n assert(issubclass(result.dtype.type, np.integer))\n\n\ndef test_convert_objects_complex_number():\n for dtype in np.sctypes['complex']:\n arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')\n assert(arr[0].dtype == np.dtype(dtype))\n result = lib.maybe_convert_objects(arr)\n assert(issubclass(result.dtype.type, np.complexfloating))\n\n\ndef test_rank():\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n def _check(arr):\n mask = ~np.isfinite(arr)\n arr = arr.copy()\n result = algos.rank_1d_float64(arr)\n arr[mask] = np.inf\n exp = rankdata(arr)\n exp[mask] = nan\n assert_almost_equal(result, exp)\n\n _check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))\n _check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))\n\n\ndef test_get_reverse_indexer():\n indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)\n result = lib.get_reverse_indexer(indexer, 5)\n expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n\ndef test_pad_backfill_object_segfault():\n from datetime import datetime\n old = np.array([], dtype='O')\n new = np.array([datetime(2010, 12, 31)], dtype='O')\n\n result = algos.pad_object(old, new)\n expected = np.array([-1], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n result = algos.pad_object(new, old)\n expected = np.array([], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n result = algos.backfill_object(old, new)\n expected = np.array([-1], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n result = algos.backfill_object(new, old)\n expected = np.array([], dtype=np.int64)\n assert(np.array_equal(result, expected))\n\n\ndef test_arrmap():\n values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')\n result = algos.arrmap_object(values, lambda x: x in ['foo', 'bar'])\n assert(result.dtype == np.bool_)\n\n\ndef test_series_grouper():\n from pandas import Series\n obj = Series(np.random.randn(10))\n dummy = obj[:0]\n\n labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)\n\n grouper = lib.SeriesGrouper(obj, np.mean, labels, 2, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[3:6].mean(), obj[6:].mean()])\n assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 4], dtype=np.int64)\n assert_almost_equal(counts, exp_counts)\n\n\ndef test_series_bin_grouper():\n from pandas import Series\n obj = Series(np.random.randn(10))\n dummy = obj[:0]\n\n bins = np.array([3, 6])\n\n grouper = lib.SeriesBinGrouper(obj, np.mean, bins, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])\n assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 3, 4], dtype=np.int64)\n assert_almost_equal(counts, exp_counts)\n\n\nclass TestBinGroupers(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.obj = np.random.randn(10, 1)\n self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)\n self.bins = np.array([3, 6], dtype=np.int64)\n\n def test_generate_bins(self):\n from pandas.core.groupby import generate_bins_generic\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n binner = np.array([0, 3, 6, 9], dtype=np.int64)\n\n for func in [lib.generate_bins_dt64, generate_bins_generic]:\n bins = func(values, binner, closed='left')\n assert((bins == np.array([2, 5, 6])).all())\n\n bins = func(values, binner, closed='right')\n assert((bins == np.array([3, 6, 6])).all())\n\n for func in [lib.generate_bins_dt64, generate_bins_generic]:\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n binner = np.array([0, 3, 6], dtype=np.int64)\n\n bins = func(values, binner, closed='right')\n assert((bins == np.array([3, 6])).all())\n\n self.assertRaises(ValueError, generate_bins_generic, values, [],\n 'right')\n self.assertRaises(ValueError, generate_bins_generic, values[:0],\n binner, 'right')\n\n self.assertRaises(ValueError, generate_bins_generic,\n values, [4], 'right')\n self.assertRaises(ValueError, generate_bins_generic,\n values, [-3, -1], 'right')\n\n def test_group_bin_functions(self):\n\n dtypes = ['float32','float64']\n funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']\n\n np_funcs = {\n 'add': np.sum,\n 'mean': np.mean,\n 'prod': np.prod,\n 'min': np.min,\n 'max': np.max,\n 'var': lambda x: x.var(ddof=1) if len(x) >= 2 else np.nan\n }\n\n for fname in funcs:\n for d in dtypes:\n check_less_precise = False\n if d == 'float32':\n check_less_precise = True\n args = [getattr(algos, 'group_%s_%s' % (fname,d)),\n getattr(algos, 'group_%s_bin_%s' % (fname,d)),\n np_funcs[fname],\n d,\n check_less_precise]\n self._check_versions(*args)\n\n def _check_versions(self, irr_func, bin_func, np_func, dtype, check_less_precise):\n obj = self.obj.astype(dtype)\n\n cts = np.zeros(3, dtype=np.int64)\n exp = np.zeros((3, 1), dtype)\n irr_func(exp, cts, obj, self.labels)\n\n # bin-based version\n bins = np.array([3, 6], dtype=np.int64)\n out = np.zeros((3, 1), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n bin_func(out, counts, obj, bins)\n\n assert_almost_equal(out, exp, check_less_precise=check_less_precise)\n\n bins = np.array([3, 9, 10], dtype=np.int64)\n out = np.zeros((3, 1), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n bin_func(out, counts, obj, bins)\n exp = np.array([np_func(obj[:3]), np_func(obj[3:9]),\n np_func(obj[9:])],\n dtype=dtype)\n assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise)\n\n # duplicate bins\n bins = np.array([3, 6, 10, 10], dtype=np.int64)\n out = np.zeros((4, 1), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n bin_func(out, counts, obj, bins)\n exp = np.array([np_func(obj[:3]), np_func(obj[3:6]),\n np_func(obj[6:10]), np.nan],\n dtype=dtype)\n assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise)\n\n\ndef test_group_ohlc():\n\n def _check(dtype):\n obj = np.array(np.random.randn(20),dtype=dtype)\n\n bins = np.array([6, 12], dtype=np.int64)\n out = np.zeros((3, 4), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n\n func = getattr(algos,'group_ohlc_%s' % dtype)\n func(out, counts, obj[:, None], bins)\n\n def _ohlc(group):\n if isnull(group).all():\n return np.repeat(nan, 4)\n return [group[0], group.max(), group.min(), group[-1]]\n\n expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]),\n _ohlc(obj[12:])])\n\n assert_almost_equal(out, expected)\n assert_almost_equal(counts, [6, 6, 8])\n\n obj[:6] = nan\n func(out, counts, obj[:, None], bins)\n expected[0] = nan\n assert_almost_equal(out, expected)\n\n _check('float32')\n _check('float64')\n\ndef test_try_parse_dates():\n from dateutil.parser import parse\n\n arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)\n\n result = lib.try_parse_dates(arr, dayfirst=True)\n expected = [parse(d, dayfirst=True) for d in arr]\n assert(np.array_equal(result, expected))\n\n\nclass TestTypeInference(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def test_length_zero(self):\n result = lib.infer_dtype(np.array([], dtype='i4'))\n self.assertEqual(result, 'integer')\n\n result = lib.infer_dtype([])\n self.assertEqual(result, 'empty')\n\n def test_integers(self):\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'integer')\n\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'],\n dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed-integer')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='i4')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'integer')\n\n def test_bools(self):\n arr = np.array([True, False, True, True, True], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n arr = np.array([True, False, True, 'foo'], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed')\n\n arr = np.array([True, False, True], dtype=bool)\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n def test_floats(self):\n arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],\n dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed-integer')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f4')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f8')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n def test_string(self):\n pass\n\n def test_unicode(self):\n pass\n\n def test_datetime(self):\n import datetime\n dates = [datetime.datetime(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n self.assertEqual(index.inferred_type, 'datetime64')\n\n def test_date(self):\n import datetime\n dates = [datetime.date(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n self.assertEqual(index.inferred_type, 'date')\n\n def test_to_object_array_tuples(self):\n r = (5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values)\n\n try:\n # make sure record array works\n from collections import namedtuple\n record = namedtuple('record', 'x y')\n r = record(5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values)\n except ImportError:\n pass\n\n def test_object(self):\n\n # GH 7431\n # cannot infer more than this as only a single element\n arr = np.array([None],dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed')\n\n def test_categorical(self):\n\n # GH 8974\n from pandas import Categorical, Series\n arr = Categorical(list('abc'))\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'categorical')\n\n result = lib.infer_dtype(Series(arr))\n self.assertEqual(result, 'categorical')\n\n arr = Categorical(list('abc'),categories=['cegfab'],ordered=True)\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'categorical')\n\n result = lib.infer_dtype(Series(arr))\n self.assertEqual(result, 'categorical')\n\nclass TestMoments(tm.TestCase):\n pass\n\n\nclass TestReducer(tm.TestCase):\n\n def test_int_index(self):\n from pandas.core.series import Series\n\n arr = np.random.randn(100, 4)\n result = lib.reduce(arr, np.sum, labels=Index(np.arange(4)))\n expected = arr.sum(0)\n assert_almost_equal(result, expected)\n\n result = lib.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))\n expected = arr.sum(1)\n assert_almost_equal(result, expected)\n\n dummy = Series(0., index=np.arange(100))\n result = lib.reduce(\n arr, np.sum, dummy=dummy, labels=Index(np.arange(4)))\n expected = arr.sum(0)\n assert_almost_equal(result, expected)\n\n dummy = Series(0., index=np.arange(4))\n result = lib.reduce(arr, np.sum, axis=1,\n dummy=dummy, labels=Index(np.arange(100)))\n expected = arr.sum(1)\n assert_almost_equal(result, expected)\n\n result = lib.reduce(arr, np.sum, axis=1,\n dummy=dummy, labels=Index(np.arange(100)))\n assert_almost_equal(result, expected)\n\n\nclass TestTsUtil(tm.TestCase):\n def test_min_valid(self):\n # Ensure that Timestamp.min is a valid Timestamp\n Timestamp(Timestamp.min)\n\n def test_max_valid(self):\n # Ensure that Timestamp.max is a valid Timestamp\n Timestamp(Timestamp.max)\n\n def test_to_datetime_bijective(self):\n # Ensure that converting to datetime and back only loses precision\n # by going from nanoseconds to microseconds.\n self.assertEqual(Timestamp(Timestamp.max.to_pydatetime()).value/1000, Timestamp.max.value/1000)\n self.assertEqual(Timestamp(Timestamp.min.to_pydatetime()).value/1000, Timestamp.min.value/1000)\n\nclass TestPeriodField(tm.TestCase):\n\n def test_get_period_field_raises_on_out_of_range(self):\n self.assertRaises(ValueError, period.get_period_field, -1, 0, 0)\n\n def test_get_period_field_array_raises_on_out_of_range(self):\n self.assertRaises(ValueError, period.get_period_field_arr, -1, np.empty(1), 0)\n\n\nclass TestHolidayConflictingArguments(tm.TestCase):\n\n # GH 10217\n\n def test_both_offset_observance_raises(self):\n\n with self.assertRaises(NotImplementedError) as cm:\n h = Holiday(\"Cyber Monday\", month=11, day=1,\n offset=[DateOffset(weekday=SA(4))], observance=next_monday)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "numpy.array_equal", "pandas.lib.SeriesGrouper", "pandas.algos.left_join_indexer_int64", "pandas.algos.ensure_platform_int", "pandas.Timestamp", "pandas.util.testing._skip_if_no_scipy", "pandas.compat.range", "scipy.stats.rankdata", "pandas.Timestamp.min.to_pydatetime", "numpy.dtype", "pandas.algos.inner_join_indexer_int64", "pandas.algos.outer_join_indexer_int64", "numpy.empty", "pandas.algos.rank_1d_float64", "pandas.algos.is_lexsorted", "pandas.lib.to_object_array_tuples", "pandas.algos.backfill_int64", "numpy.arange", "numpy.random.randint", "numpy.isfinite", "pandas.lib.try_parse_dates", "pandas.tseries.holiday.SA", "numpy.int32", "pandas.algos.groupsort_indexer", "numpy.array", "pandas.algos.pad_object", "pandas.lib.get_reverse_indexer", "numpy.lexsort", "numpy.zeros", "pandas.util.testing.assert_almost_equal", "numpy.random.randn", "pandas.lib.infer_dtype", "numpy.float64", "pandas.compat.lrange", "numpy.float32", "pandas.lib.maybe_booleans_to_slice", "numpy.bool_", "numpy.argsort", "pandas.algos.left_outer_join", "pandas.isnull", "pandas.Index", "pandas.lib.SeriesBinGrouper", "pandas.lib.maybe_convert_objects", "pandas.algos.arrmap_object", "pandas.compat.zip", "pandas.algos.backfill_object", "pandas.core.series.Series", "pandas.lib.duplicated", "pandas.algos.left_join_indexer_unique_int64", "pandas.Timestamp.max.to_pydatetime", "numpy.repeat", "numpy.int64", "pandas.algos.pad_int64" ] ]
sdjespersen/pyzinc
[ "54baeec6d1b2b36cef98428b3888558004ba43d3" ]
[ "tests/test_zinc_parser.py" ]
[ "# coding: utf-8\nimport io\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\nimport pytest # type: ignore\nimport zincio\n\nfrom pandas.api.types import CategoricalDtype # type: ignore\nfrom pathlib import Path\n\n\ndef get_abspath(relpath):\n return Path(__file__).parent / relpath\n\n\nFULL_GRID_FILE = get_abspath(\"full_grid.zinc\")\nSINGLE_SERIES_FILE = get_abspath(\"single_series_grid.zinc\")\nHISREAD_SERIES_FILE = get_abspath(\"hisread_series.zinc\")\nMINIMAL_COLINFO_FILE = get_abspath(\"minimal_colinfo.zinc\")\n\n\ndef assert_grid_equal(a, b):\n assert a.grid_info == b.grid_info\n assert a.column_info == b.column_info\n pd.testing.assert_frame_equal(a.to_pandas(squeeze=False), b.data)\n\n\ndef test_parse_zinc_grid_same_as_read_from_file():\n with open(FULL_GRID_FILE) as f:\n actual = zincio.parse(f.read())\n expected = zincio.read(FULL_GRID_FILE)\n assert_grid_equal(actual, expected)\n\n\ndef test_read_zinc_grid():\n expected_grid_info = dict(\n view=zincio.String(\"chart\"),\n hisStart=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T00:00:00-07:00\"), tz=\"Los_Angeles\"),\n hisEnd=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T01:15:00-07:00\"), tz=\"Los_Angeles\"),\n hisLimit=zincio.Number(10000),\n dis=zincio.String(\"Mon 18-May-2020\"))\n expected_column_info = dict(\n ts=dict(\n disKey=zincio.String('ui::timestamp'),\n tz=zincio.String('Los_Angeles'),\n chartFormat=zincio.String('ka'),\n ),\n v0=dict(\n id=zincio.Ref('p:q01b001:r:0197767d-c51944e4',\n 'Building One VAV1-01 Eff Heat SP'),\n navName=zincio.String('Eff Heat SP'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref('p:q01b001:r:b78a8dcc-828caa1b', None),\n curVal=zincio.Number(65.972, '°F'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Number'),\n unit=zincio.String('°F'),\n tz=zincio.String('Los_Angeles'),\n sp=zincio.MARKER,\n temp=zincio.MARKER,\n cur=zincio.MARKER,\n haystackPoint=zincio.MARKER,\n air=zincio.MARKER,\n effective=zincio.MARKER,\n heating=zincio.MARKER\n ),\n v1=dict(\n id=zincio.Ref('p:q01b001:r:e69a7401-f4b340ff',\n 'Building One VAV1-01 Eff Occupancy'),\n navName=zincio.String('Eff Occupancy'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref(\n 'p:q01b001:r:b78a8dcc-828caa1b', 'Building One VAV1-01'),\n curVal=zincio.String('Occupied'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Str'),\n tz=zincio.String('Los_Angeles'),\n sensor=zincio.MARKER,\n cur=zincio.MARKER,\n haystackPoint=zincio.MARKER,\n hisCollectCov=zincio.MARKER,\n enum=zincio.String('Nul,Occupied,Unoccupied,Bypass,Standby'),\n effective=zincio.MARKER,\n occupied=zincio.MARKER,\n ),\n v2=dict(\n id=zincio.Ref('p:q01b001:r:dcfe87d9-cd034388',\n 'Building One VAV1-01 Damper Pos'),\n navName=zincio.String('Damper Pos'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref(\n 'p:q01b001:r:b78a8dcc-828caa1b', 'Building One VAV1-01'),\n curVal=zincio.Number(41.5, '%'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Number'),\n unit=zincio.String('%'),\n tz=zincio.String('Los_Angeles'),\n sensor=zincio.MARKER,\n cur=zincio.MARKER,\n damper=zincio.MARKER,\n precision=zincio.Number(1.0),\n haystackPoint=zincio.MARKER,\n air=zincio.MARKER\n ),\n v3=dict(\n id=zincio.Ref('p:q01b001:r:8fab195e-58ffca99',\n 'Building One VAV1-01 Occ Heat SP Offset'),\n navName=zincio.String('Occ Heat SP Offset'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref(\n 'p:q01b001:r:b78a8dcc-828caa1b', 'Building One VAV1-01'),\n curVal=zincio.Number(-2.394, '°C'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Number'),\n unit=zincio.String('°C'),\n tz=zincio.String('Los_Angeles'),\n sp=zincio.MARKER,\n temp=zincio.MARKER,\n cur=zincio.MARKER,\n air=zincio.MARKER,\n occ=zincio.MARKER,\n writable=zincio.MARKER,\n writeStatus=zincio.String('unknown'),\n zone=zincio.MARKER,\n hisCollectInterval=zincio.Number(5.0, 'min'),\n heating=zincio.MARKER,\n offset=zincio.MARKER,\n writeLevel=zincio.Number(8.0, None),\n haystackPoint=zincio.MARKER,\n writeVal=zincio.Number(-10.0),\n actions=zincio.String(\n 'ver:\\\\\"3.0\\\\\"\\\\ndis,expr\\\\n\\\\\"Override\\\\\",'\n '\\\\\"pointOverride(\\\\$self, \\\\$val, \\\\$duration)\\\\\"\\\\n'\n '\\\\\"Auto\\\\\",\\\\\"pointAuto(\\\\$self)\\\\\"\\\\n')\n ),\n v4=dict(\n id=zincio.Ref('p:q01b001:r:260ce2bb-2ef5065f',\n 'Building One VAV1-01 Air Flow'),\n navName=zincio.String('Air Flow'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref(\n 'p:q01b001:r:b78a8dcc-828caa1b', 'Building One VAV1-01'),\n curVal=zincio.Number(117.6611, 'cfm'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Number'),\n unit=zincio.String('cfm'),\n tz=zincio.String('Los_Angeles'),\n sensor=zincio.MARKER,\n cur=zincio.MARKER,\n )\n )\n expected_index = pd.DatetimeIndex(\n [\n pd.to_datetime('2020-05-17T23:47:08-07:00'),\n pd.to_datetime('2020-05-17T23:55:00-07:00'),\n pd.to_datetime('2020-05-18T00:00:00-07:00'),\n pd.to_datetime('2020-05-18T00:05:00-07:00'),\n pd.to_datetime('2020-05-18T01:13:09-07:00'),\n ],\n name='ts')\n expected_dataframe = pd.DataFrame(\n index=expected_index,\n data={\n ('@p:q01b001:r:0197767d-c51944e4 '\n '\"Building One VAV1-01 Eff Heat SP\"'): [\n np.nan, 68.553, 68.554, 69.723, np.nan,\n ],\n ('@p:q01b001:r:e69a7401-f4b340ff '\n '\"Building One VAV1-01 Eff Occupancy\"'): pd.Series(\n ['Occupied', '', '', '', 'Unoccupied'],\n index=expected_index,\n dtype=CategoricalDtype(categories=[\n 'Nul', 'Occupied', 'Unoccupied', 'Bypass', 'Standby'])\n ),\n ('@p:q01b001:r:dcfe87d9-cd034388 '\n '\"Building One VAV1-01 Damper Pos\"'): [np.nan, 3, 7, 18, np.nan],\n ('@p:q01b001:r:8fab195e-58ffca99 '\n '\"Building One VAV1-01 Occ Heat SP Offset\"'): [\n np.nan, -1.984, -2.203, 5.471, np.nan,\n ],\n '@p:q01b001:r:260ce2bb-2ef5065f \"Building One VAV1-01 Air Flow\"': [\n np.nan, 118.65, 62.0, np.nan, np.nan,\n ],\n })\n actual = zincio.read(FULL_GRID_FILE)\n expected = zincio.Grid(\n version=3,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_dataframe)\n assert_grid_equal(actual, expected)\n\n\ndef test_read_zinc_single_series():\n expected_grid_info = dict(\n view=zincio.String(\"chart\"),\n hisStart=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T00:00:00-07:00\"), tz=\"Los_Angeles\"),\n hisEnd=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T01:15:00-07:00\"), tz=\"Los_Angeles\"),\n hisLimit=zincio.Number(10000),\n dis=zincio.String(\"Mon 18-May-2020\"))\n expected_column_info = dict(\n ts=dict(\n disKey=zincio.String('ui::timestamp'),\n tz=zincio.String('Los_Angeles'),\n chartFormat=zincio.String('ka'),\n ),\n v0=dict(\n id=zincio.Ref('p:q01b001:r:0197767d-c51944e4',\n 'Building One VAV1-01 Eff Heat SP'),\n navName=zincio.String('Eff Heat SP'),\n point=zincio.MARKER,\n his=zincio.MARKER,\n siteRef=zincio.Ref(\n 'p:q01b001:r:8fc116f8-72c5320c', 'Building One'),\n equipRef=zincio.Ref(\n 'p:q01b001:r:b78a8dcc-828caa1b', 'Building One VAV1-01'),\n curVal=zincio.Number(65.972, '°F'),\n curStatus=zincio.String('ok'),\n kind=zincio.String('Number'),\n unit=zincio.String('°F'),\n tz=zincio.String('Los_Angeles'),\n sp=zincio.MARKER,\n temp=zincio.MARKER,\n cur=zincio.MARKER,\n haystackPoint=zincio.MARKER,\n air=zincio.MARKER,\n effective=zincio.MARKER,\n heating=zincio.MARKER\n ),\n )\n dname = '@p:q01b001:r:0197767d-c51944e4 \"Building One VAV1-01 Eff Heat SP\"'\n expected_data = pd.DataFrame(\n data={dname: [np.nan, 68.553, 68.554, 69.723, np.nan]},\n index=pd.DatetimeIndex(\n [\n pd.to_datetime('2020-05-17T23:47:08-07:00'),\n pd.to_datetime('2020-05-17T23:55:00-07:00'),\n pd.to_datetime('2020-05-18T00:00:00-07:00'),\n pd.to_datetime('2020-05-18T00:05:00-07:00'),\n pd.to_datetime('2020-05-18T01:13:09-07:00'),\n ],\n name='ts',\n ))\n expected = zincio.Grid(\n version=3,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_data)\n actual = zincio.read(SINGLE_SERIES_FILE)\n assert_grid_equal(actual, expected)\n pd.testing.assert_series_equal(actual.to_pandas(), expected.data[dname])\n\n\ndef test_read_zinc_deficient_column_info():\n expected_grid_info = dict(\n id=zincio.Ref(\"p:q01b001:r:20aad139-beff4e8c\",\n \"Building One VAV1-01 DA Temp\"),\n hisStart=zincio.Datetime(\n pd.Timestamp(\"2020-04-01T00:00:00-07:00\"), tz=\"Los_Angeles\"),\n hisEnd=zincio.Datetime(\n pd.Timestamp(\"2020-04-02T00:00:00-07:00\"), tz=\"Los_Angeles\"))\n expected_column_info = dict(ts={}, val={})\n expected_data = pd.DataFrame(\n data={'val': [66.092, 66.002, 65.930]},\n index=pd.DatetimeIndex(\n [\n pd.to_datetime('2020-04-01T00:00:00-07:00'),\n pd.to_datetime('2020-04-01T00:05:00-07:00'),\n pd.to_datetime('2020-04-01T00:10:00-07:00'),\n ],\n name='ts',\n ))\n expected = zincio.Grid(\n version=3,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_data)\n actual = zincio.read(HISREAD_SERIES_FILE)\n assert_grid_equal(actual, expected)\n pd.testing.assert_series_equal(actual.to_pandas(), expected.data['val'])\n\n\ndef test_read_zinc_minimal_colinfo():\n expected_grid_info = dict(hisEnd=zincio.MARKER, hisStart=zincio.MARKER)\n expected_column_info = dict(\n ts=dict(),\n v0=dict(id=zincio.Ref(\"vrt.x02.motion_state\", None)),\n v1=dict(id=zincio.Ref(\"vrt.x02.temperature\", None)),\n v2=dict(id=zincio.Ref(\"vrt.x18.humidity\", None)),\n v3=dict(id=zincio.Ref(\"vrt.x18.illuminance\", None)),\n v4=dict(id=zincio.Ref(\"vrt.x18.motion_count\", None)),\n v5=dict(id=zincio.Ref(\"vrt.x18.motion_state\", None)),\n v6=dict(id=zincio.Ref(\"vrt.x18.temperature\", None)),\n v7=dict(id=zincio.Ref(\"vrt.x19.humidity\", None)),\n v8=dict(id=zincio.Ref(\"vrt.x19.illuminance\", None)),\n v9=dict(id=zincio.Ref(\"vrt.x19.motion_count\", None)))\n expected_data = pd.DataFrame(\n data={\n \"@vrt.x02.motion_state\": [False, None, None],\n \"@vrt.x02.temperature\": [25.5586, np.nan, np.nan],\n \"@vrt.x18.humidity\": [np.nan, 62.3369, np.nan],\n \"@vrt.x18.illuminance\": [np.nan, 927, np.nan],\n \"@vrt.x18.motion_count\": [np.nan, 1, np.nan],\n \"@vrt.x18.motion_state\": [None, True, None],\n \"@vrt.x18.temperature\": [np.nan, 26.1035, np.nan],\n \"@vrt.x19.humidity\": [np.nan, np.nan, 63.5195],\n \"@vrt.x19.illuminance\": [np.nan, np.nan, 945],\n \"@vrt.x19.motion_count\": [np.nan, np.nan, 11],\n },\n index=pd.Series(\n data=[\n pd.to_datetime(\"2018-03-21T15:45:00+10:00\"),\n pd.to_datetime(\"2018-03-21T14:30:00+10:00\"),\n pd.to_datetime(\"2018-03-21T14:45:00+10:00\"),\n ],\n name='ts'\n )\n )\n expected = zincio.Grid(\n version=2,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_data)\n actual = zincio.read(MINIMAL_COLINFO_FILE)\n assert_grid_equal(actual, expected)\n\n\ndef test_parse_grid_with_sentinels():\n s = ('ver:\"3.0\" hisEnd:M hisStart:M\\n'\n 'ts,v0 id:@vrt.x02.motion_state,v1 id:@vrt.x03.motion_amount\\n'\n '2018-03-21T15:45:00+10:00 GMT-10,F,INF\\n'\n '2018-03-21T15:50:00+10:00 GMT-10,N,NA\\n'\n '2018-03-21T15:55:00+10:00 GMT-10,T,NaN\\n\\n')\n expected_grid_info = dict(hisEnd=zincio.MARKER, hisStart=zincio.MARKER)\n expected_column_info = dict(\n ts=dict(),\n v0=dict(id=zincio.Ref(\"vrt.x02.motion_state\", None)),\n v1=dict(id=zincio.Ref(\"vrt.x03.motion_amount\", None)),\n )\n expected_data = pd.DataFrame(\n data={\n '@vrt.x02.motion_state': [False, None, True],\n '@vrt.x03.motion_amount': [np.inf, np.nan, np.nan],\n },\n index=pd.Series(\n data=[\n pd.to_datetime('2018-03-21T15:45:00+10:00'),\n pd.to_datetime('2018-03-21T15:50:00+10:00'),\n pd.to_datetime('2018-03-21T15:55:00+10:00'),\n ],\n name='ts',\n ),\n )\n expected = zincio.Grid(\n version=3,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_data)\n actual_from_string = zincio.parse(s)\n assert_grid_equal(actual_from_string, expected)\n actual_from_bytes = zincio.parse(s.encode())\n assert_grid_equal(actual_from_bytes, expected)\n\n\ndef test_read_zinc_malformed_grid():\n grid = 'this is not a legal grid'\n with pytest.raises(zincio.ZincParseException):\n zincio.read(io.StringIO(grid))\n\n\ndef test_read_zinc_error_grid():\n err_grid = (\n 'ver:\"3.0\" errType:\"sys::NullErr\" err '\n 'errTrace:\"sys::NullErr: java.lang.NullPointerException\\n\" '\n 'dis:\"sys::NullErr: java.lang.NullPointerException\"\\n'\n 'empty')\n with pytest.raises(zincio.ZincErrorGridException):\n zincio.read(io.StringIO(err_grid))\n\n\ndef test_read_zinc_stringio_same_as_file():\n expected = zincio.read(FULL_GRID_FILE)\n with open(FULL_GRID_FILE, encoding='utf-8') as f:\n raw = f.read()\n actual = zincio.read(io.StringIO(raw))\n assert_grid_equal(actual, expected)\n\n\ndef test_parse_kitchen_sink_data():\n raw_coord = 'C(37.427539, -122.170244)'\n long_action_str = (\n '\"ver:\\\\\"3.0\\\\\"\\\\ndis,expr\\\\n\\\\\"Override\\\\\",\\\\\"pointOverride(\\\\$self,'\n ' \\\\$val, \\\\$duration)\\\\\"\\\\n\\\\\"Auto\\\\\",\\\\\"pointAuto(\\\\$self)\\\\\"\\\\n\"')\n s = (\n # grid meta\n 'ver:\"3.0\" '\n 'hisStart:2020-05-18T03:00:00-07:00 GMT-8 '\n 'hisEnd:2020-05-18T04:00:00-07:00 GMT-8 '\n 'mod:2020-03-23T23:36:40.343Z\\n'\n # column info\n 'ts,v0 id:@point.location \"LatLng\",'\n 'v1 id:@point.temp unit:\"°F\" link:`http://www.example.com/`,'\n f'v2 id:@point.boolean actions:{long_action_str},'\n 'v3 id:@point.sometimes_inf_nan\\n'\n # rows\n f'2020-05-18T03:00:00-07:00 GMT-8,{raw_coord},65.972°F,T,2.34E-3\\n'\n f'2020-05-18T03:05:00-07:00 GMT-8,{raw_coord},-13.232°F,F,INF\\n'\n f'2020-05-18T03:10:00-07:00 GMT-8,{raw_coord},85.103°F,N,NA\\n'\n f'2020-05-18T03:15:00-07:00 GMT-8,{raw_coord},44.072°F,T,NaN\\n\\n')\n expected_grid_info = dict(\n hisStart=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T03:00:00-07:00\"), tz=\"GMT-8\"),\n hisEnd=zincio.Datetime(\n pd.Timestamp(\"2020-05-18T04:00:00-07:00\"), tz=\"GMT-8\"),\n mod=zincio.Datetime(pd.Timestamp('2020-03-23T23:36:40.343Z')),\n )\n expected_column_info = dict(\n ts={},\n v0=dict(id=zincio.Ref('point.location', 'LatLng')),\n v1=dict(\n id=zincio.Ref('point.temp'),\n unit=zincio.String('°F'),\n link=zincio.Uri('http://www.example.com/'),\n ),\n v2=dict(\n id=zincio.Ref('point.boolean'),\n actions=zincio.String(long_action_str.strip('\"')),\n ),\n v3=dict(id=zincio.Ref('point.sometimes_inf_nan'))\n )\n coord = zincio.Coord(37.427539, -122.170244)\n expected_data = pd.DataFrame(\n data={\n '@point.location \"LatLng\"': [coord, coord, coord, coord],\n '@point.temp': [65.972, -13.232, 85.103, 44.072],\n '@point.boolean': [True, False, None, True],\n '@point.sometimes_inf_nan': [\n 0.00234, float(\"inf\"), np.nan, np.nan,\n ],\n },\n index=pd.Series(\n data=[\n pd.to_datetime('2020-05-18T03:00:00-07:00'),\n pd.to_datetime('2020-05-18T03:05:00-07:00'),\n pd.to_datetime('2020-05-18T03:10:00-07:00'),\n pd.to_datetime('2020-05-18T03:15:00-07:00'),\n ],\n name='ts',\n ),\n )\n expected = zincio.Grid(\n version=3,\n grid_info=expected_grid_info,\n column_info=expected_column_info,\n data=expected_data)\n actual = zincio.parse(s)\n assert_grid_equal(actual, expected)\n" ]
[ [ "pandas.to_datetime", "pandas.Timestamp", "pandas.api.types.CategoricalDtype" ] ]
smsharma/gamma-gp
[ "573a9a7ecbf71a1e6c0d20e3d6ef189538c8b4e5" ]
[ "utils/psf_compute.py" ]
[ "###############################################################################\n# psf_compute.py\n###############################################################################\n#\n# The statistics of non-poissonian templates is modified by the non-zero point\n# spread functions associated with real instruments. Here we calculate that\n# correction for an arbitrary user specified PSF.\n#\n###############################################################################\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport healpy as hp\nfrom . import pdf_sampler\n\n\ndef psf_corr(nside, num_f_bins, n_psf, n_pts_per_psf, f_trunc, psf_r_func, sample_psf_max, psf_samples):\n # PSF can't extend beyond 180 degrees, so check hasn't been asked for\n assert sample_psf_max <= np.pi, \"PSF on a sphere cannot extend more than 180 degrees\"\n\n # Setup pdf of the psf\n # On a sphere the PSF correction as a function of r is sin(r)*PSF(r)\n radial_pdf = lambda r: np.sin(r) * psf_r_func(r)\n rvals = np.linspace(0, sample_psf_max, psf_samples)\n pofr = radial_pdf(rvals)\n dist = pdf_sampler.PDFSampler(rvals, pofr)\n\n # Create an array of n_psf points to put down psfs\n # Establish an array of n_ps unit vectors\n # By sampling vals from a normal, end up with uniform normed vectors\n xyz = np.random.normal(size=(n_psf, 3))\n xyz_unit = np.divide(xyz, np.linalg.norm(xyz, axis=1)[:, None])\n\n # Convert to array of theta and phi values\n # theta = arccos(z/r), and here r=1. Similar expression for phi\n theta_c = np.arccos(xyz_unit[:, 2])\n phi_c = np.arctan2(xyz_unit[:, 1], xyz_unit[:, 0])\n\n # Now put a point source down at each of these locations\n outlist = []\n for ps_i in range(n_psf):\n # For each point source put down n_pts_per_psf counts\n # Determine where they are placed on the map as determine by the psf\n dr = dist(n_pts_per_psf)\n dangle = np.random.uniform(0, 2 * np.pi, n_pts_per_psf)\n dtheta = dr * np.sin(dangle)\n dphi = dr * np.cos(dangle) / (np.sin(theta_c[ps_i] + dtheta / 2))\n\n # Now combine with position of point source to get the exact location\n theta_base = theta_c[ps_i] + dtheta\n phi_base = phi_c[ps_i] + dphi\n\n # Want 0 <= theta < pi; 0 <= phi < 2pi\n # Carefully map to ensure this is true\n theta_remap_north = np.where(theta_base > np.pi)[0]\n theta_base[theta_remap_north] = 2 * np.pi - theta_base[theta_remap_north]\n theta_remap_south = np.where(theta_base < 0)[0]\n theta_base[theta_remap_south] = -theta_base[theta_remap_south]\n\n phi_base[theta_remap_north] += np.pi\n phi_base[theta_remap_south] += np.pi\n phi_base = np.mod(phi_base, 2 * np.pi)\n\n # As the PSF extends to infinity, if draw a value a long way from the\n # centre can occasionally still have a theta value outside the default\n # range above. For any sensible PSF (much smaller than the size of the\n # sky) this happens rarely. As such we just cut these values out.\n good_val = np.where((theta_base <= np.pi) & (theta_base >= 0))[0]\n theta = theta_base[good_val]\n phi = phi_base[good_val]\n\n # Convert these values back to a healpix pixel\n pixel = hp.ang2pix(nside, theta, phi)\n\n # From this information determine the flux fraction per pixel\n mn = np.min(pixel)\n mx = np.max(pixel) + 1\n pixel_hist = np.histogram(pixel, bins=mx - mn, range=(mn, mx), density=1)[0]\n outlist.append(pixel_hist)\n\n f_values = np.concatenate(outlist)\n # f_values is now the full list of flux fractions from all psfs\n # Ignore values which fall below the cutoff f_trunc\n f_values_trunc = f_values[f_values >= f_trunc]\n\n # Rebin into the user defined number of bins\n rho_ary, f_bin_edges = np.histogram(f_values_trunc, bins=num_f_bins, range=(0.0, 1.0))\n\n # Convert to output format\n df = f_bin_edges[1] - f_bin_edges[0]\n f_ary = (f_bin_edges[:-1] + f_bin_edges[1:]) / 2.0\n rho_ary = rho_ary / (df * n_psf)\n rho_ary /= np.sum(df * f_ary * rho_ary)\n df_rho_div_f_ary = df * rho_ary / f_ary\n\n return f_ary, df_rho_div_f_ary\n" ]
[ [ "numpy.concatenate", "numpy.random.normal", "numpy.histogram", "numpy.arccos", "numpy.sin", "numpy.linalg.norm", "numpy.max", "numpy.sum", "numpy.min", "numpy.where", "numpy.random.uniform", "numpy.arctan2", "numpy.cos", "numpy.linspace", "numpy.mod" ] ]
zlin7/experiments_dnn
[ "24a2a1617a75fb74254301abc75682b5f90fce77" ]
[ "scripts/utility/load_cifar.py" ]
[ "from keras.datasets import cifar10\nimport numpy as np\n\ndef load_cifar_n_classes(classes = [1,3,5]): # Default car, cat, dog\n \n (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Load in CIFAR-10 dataset\n \n indices_tr = [idx for idx in range(len(y_train)) if y_train[idx] in classes] # Get indices of certain samples\n indices_te = [idx for idx in range(len(y_test)) if y_test[idx] in classes] # Get indices of certain samples\n \n y_train = y_train[indices_tr]\n y_test = y_test[indices_te]\n \n # Shift class labels to 0,1,2\n for i, y in enumerate(y_train):\n y_train[i] = np.where(classes == y)[0]\n \n for i, y in enumerate(y_test):\n y_test[i] = np.where(classes == y)[0]\n\n \n return (x_train[indices_tr], y_train), (x_test[indices_te], y_test) # Return samples from correct classes" ]
[ [ "numpy.where" ] ]
nyck33/reinforcement-learning
[ "07de5dcab22e893072acf98ee84cc7ceb65ae1cf", "07de5dcab22e893072acf98ee84cc7ceb65ae1cf", "07de5dcab22e893072acf98ee84cc7ceb65ae1cf" ]
[ "2-cartpole/1-dqn/cartpole_only_per.py", "1-grid-world/4-sarsa/environment.py", "1-grid-world/4-sarsa/sarsa_agent.py" ]
[ "import sys\r\nimport gym\r\nimport pylab\r\nimport random\r\nimport numpy as np\r\nfrom SumTree import SumTree\r\nfrom collections import deque\r\nfrom keras.layers import Dense\r\nfrom keras.optimizers import Adam\r\nfrom keras.models import Sequential\r\n\r\nEPISODES = 300\r\n\r\n\r\n# 카트폴 예제에서의 DQN 에이전트\r\nclass DQNAgent:\r\n def __init__(self, state_size, action_size):\r\n self.render = False\r\n self.load_model = False\r\n\r\n # 상태와 행동의 크기 정의\r\n self.state_size = state_size\r\n self.action_size = action_size\r\n\r\n # DQN 하이퍼파라미터\r\n self.discount_factor = 0.99\r\n self.learning_rate = 0.001\r\n self.epsilon = 1.0\r\n self.epsilon_decay = 0.999\r\n self.epsilon_min = 0.01\r\n self.batch_size = 64\r\n self.train_start = 2000\r\n self.memory_size = 2000\r\n\r\n # 리플레이 메모리, 최대 크기 2000\r\n self.memory = Memory(self.memory_size)\r\n\r\n # 모델과 타깃 모델 생성\r\n self.model = self.build_model()\r\n self.target_model = self.build_model()\r\n\r\n # 타깃 모델 초기화\r\n self.update_target_model()\r\n\r\n if self.load_model:\r\n self.model.load_weights(\"./save_model/cartpole_dqn_trained.h5\")\r\n\r\n # 상태가 입력, 큐함수가 출력인 인공신경망 생성\r\n def build_model(self):\r\n model = Sequential()\r\n model.add(Dense(24, input_dim=self.state_size, activation='relu',\r\n kernel_initializer='he_uniform'))\r\n model.add(Dense(24, activation='relu',\r\n kernel_initializer='he_uniform'))\r\n model.add(Dense(self.action_size, activation='linear',\r\n kernel_initializer='he_uniform'))\r\n model.summary()\r\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\r\n return model\r\n\r\n # 타깃 모델을 모델의 가중치로 업데이트\r\n def update_target_model(self):\r\n self.target_model.set_weights(self.model.get_weights())\r\n\r\n # 입실론 탐욕 정책으로 행동 선택\r\n def get_action(self, state):\r\n if np.random.rand() <= self.epsilon:\r\n return random.randrange(self.action_size)\r\n else:\r\n q_value = self.model.predict(state)\r\n return np.argmax(q_value[0])\r\n\r\n # 샘플 <s, a, r, s'>을 리플레이 메모리에 저장\r\n def append_sample(self, state, action, reward, next_state, done):\r\n if self.epsilon == 1:\r\n done = True\r\n\r\n # TD-error 를 구해서 같이 메모리에 저장\r\n target = self.model.predict([state])\r\n old_val = target[0][action]\r\n target_val = self.target_model.predict([next_state])\r\n if done:\r\n target[0][action] = reward\r\n else:\r\n target[0][action] = reward + self.discount_factor * (\r\n np.amax(target_val[0]))\r\n error = abs(old_val - target[0][action])\r\n\r\n self.memory.add(error, (state, action, reward, next_state, done))\r\n\r\n # 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습\r\n def train_model(self):\r\n if self.epsilon > self.epsilon_min:\r\n self.epsilon *= self.epsilon_decay\r\n\r\n # 메모리에서 배치 크기만큼 무작위로 샘플 추출\r\n mini_batch = self.memory.sample(self.batch_size)\r\n\r\n errors = np.zeros(self.batch_size)\r\n states = np.zeros((self.batch_size, self.state_size))\r\n next_states = np.zeros((self.batch_size, self.state_size))\r\n actions, rewards, dones = [], [], []\r\n\r\n for i in range(self.batch_size):\r\n states[i] = mini_batch[i][1][0]\r\n actions.append(mini_batch[i][1][1])\r\n rewards.append(mini_batch[i][1][2])\r\n next_states[i] = mini_batch[i][1][3]\r\n dones.append(mini_batch[i][1][4])\r\n\r\n # 현재 상태에 대한 모델의 큐함수\r\n # 다음 상태에 대한 타깃 모델의 큐함수\r\n target = self.model.predict(states)\r\n target_val = self.target_model.predict(next_states)\r\n\r\n # 벨만 최적 방정식을 이용한 업데이트 타깃\r\n for i in range(self.batch_size):\r\n old_val = target[i][actions[i]]\r\n if dones[i]:\r\n target[i][actions[i]] = rewards[i]\r\n else:\r\n target[i][actions[i]] = rewards[i] + self.discount_factor * (\r\n np.amax(target_val[i]))\r\n # TD-error를 저장\r\n errors[i] = abs(old_val - target[i][actions[i]])\r\n\r\n # TD-error로 priority 업데이트\r\n for i in range(self.batch_size):\r\n idx = mini_batch[i][0]\r\n self.memory.update(idx, errors[i])\r\n\r\n self.model.fit(states, target, batch_size=self.batch_size,\r\n epochs=1, verbose=0)\r\n\r\n\r\nclass Memory: # stored as ( s, a, r, s_ ) in SumTree\r\n e = 0.01\r\n a = 0.6\r\n\r\n def __init__(self, capacity):\r\n self.tree = SumTree(capacity)\r\n\r\n def _getPriority(self, error):\r\n return (error + self.e) ** self.a\r\n\r\n def add(self, error, sample):\r\n p = self._getPriority(error)\r\n self.tree.add(p, sample)\r\n\r\n def sample(self, n):\r\n batch = []\r\n segment = self.tree.total() / n\r\n\r\n for i in range(n):\r\n a = segment * i\r\n b = segment * (i + 1)\r\n\r\n s = random.uniform(a, b)\r\n (idx, p, data) = self.tree.get(s)\r\n batch.append((idx, data))\r\n\r\n return batch\r\n\r\n def update(self, idx, error):\r\n p = self._getPriority(error)\r\n self.tree.update(idx, p)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # CartPole-v1 환경, 최대 타임스텝 수가 500\r\n env = gym.make('CartPole-v1')\r\n state_size = env.observation_space.shape[0]\r\n action_size = env.action_space.n\r\n\r\n # DQN 에이전트 생성\r\n agent = DQNAgent(state_size, action_size)\r\n\r\n scores, episodes = [], []\r\n\r\n step = 0\r\n for e in range(EPISODES):\r\n done = False\r\n score = 0\r\n # env 초기화\r\n state = env.reset()\r\n state = np.reshape(state, [1, state_size])\r\n\r\n while not done:\r\n if agent.render:\r\n env.render()\r\n step += 1\r\n # 현재 상태로 행동을 선택\r\n action = agent.get_action(state)\r\n # 선택한 행동으로 환경에서 한 타임스텝 진행\r\n next_state, reward, done, info = env.step(action)\r\n next_state = np.reshape(next_state, [1, state_size])\r\n # 에피소드가 중간에 끝나면 -100 보상\r\n r = reward if not done or score+reward == 500 else -10\r\n # 리플레이 메모리에 샘플 <s, a, r, s'> 저장\r\n agent.append_sample(state, action, r, next_state, done)\r\n # 매 타임스텝마다 학습\r\n if step >= agent.train_start:\r\n agent.train_model()\r\n\r\n score += reward\r\n state = next_state\r\n\r\n if done:\r\n # 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트\r\n agent.update_target_model()\r\n\r\n# score = score if score == 500 else score + 100\r\n # 에피소드마다 학습 결과 출력\r\n scores.append(score)\r\n episodes.append(e)\r\n pylab.plot(episodes, scores, 'b')\r\n pylab.savefig(\"./save_graph/cartpole_dqn.png\")\r\n print(\"episode:\", e, \" score:\", score, \" memory length:\",\r\n step if step <= agent.memory_size else agent.memory_size, \" epsilon:\", agent.epsilon)\r\n\r\n # 이전 10개 에피소드의 점수 평균이 490보다 크면 학습 중단\r\n if np.mean(scores[-min(10, len(scores)):]) > 490:\r\n agent.model.save_weights(\"./save_model/cartpole_dqn.h5\")\r\n sys.exit()\r\n", "import time\r\nimport numpy as np\r\nimport tkinter as tk\r\nfrom PIL import ImageTk, Image\r\n\r\nnp.random.seed(1)\r\nPhotoImage = ImageTk.PhotoImage\r\nUNIT = 100 # pixels\r\nHEIGHT = 5 # grid height\r\nWIDTH = 5 # grid width\r\n\r\n\r\nclass Env(tk.Tk):\r\n def __init__(self):\r\n super(Env, self).__init__()\r\n self.action_space = ['u', 'd', 'l', 'r']\r\n self.n_actions = len(self.action_space)\r\n self.title('SARSA')\r\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))\r\n self.shapes = self.load_images()\r\n self.canvas = self._build_canvas()\r\n self.texts = []\r\n\r\n def _build_canvas(self):\r\n canvas = tk.Canvas(self, bg='white',\r\n height=HEIGHT * UNIT,\r\n width=WIDTH * UNIT)\r\n # create grids\r\n for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80\r\n x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT\r\n canvas.create_line(x0, y0, x1, y1)\r\n for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80\r\n x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r\r\n canvas.create_line(x0, y0, x1, y1)\r\n\r\n # add img to canvas\r\n self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])\r\n self.triangle1 = canvas.create_image(250, 150, image=self.shapes[1])\r\n self.triangle2 = canvas.create_image(150, 250, image=self.shapes[1])\r\n self.circle = canvas.create_image(250, 250, image=self.shapes[2])\r\n\r\n # pack all\r\n canvas.pack()\r\n\r\n return canvas\r\n\r\n def load_images(self):\r\n rectangle = PhotoImage(\r\n Image.open(\"../img/rectangle.png\").resize((65, 65)))\r\n triangle = PhotoImage(\r\n Image.open(\"../img/triangle.png\").resize((65, 65)))\r\n circle = PhotoImage(\r\n Image.open(\"../img/circle.png\").resize((65, 65)))\r\n\r\n return rectangle, triangle, circle\r\n\r\n def text_value(self, row, col, contents, action, font='Helvetica', size=10,\r\n style='normal', anchor=\"nw\"):\r\n if action == 0:\r\n origin_x, origin_y = 7, 42\r\n elif action == 1:\r\n origin_x, origin_y = 85, 42\r\n elif action == 2:\r\n origin_x, origin_y = 42, 5\r\n else:\r\n origin_x, origin_y = 42, 77\r\n\r\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\r\n font = (font, str(size), style)\r\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\r\n font=font, anchor=anchor)\r\n return self.texts.append(text)\r\n\r\n def print_value_all(self, q_table):\r\n for i in self.texts:\r\n self.canvas.delete(i)\r\n self.texts.clear()\r\n for x in range(HEIGHT):\r\n for y in range(WIDTH):\r\n for action in range(0, 4):\r\n state = [x, y]\r\n if str(state) in q_table.keys():\r\n temp = q_table[str(state)][action]\r\n self.text_value(y, x, round(temp, 2), action)\r\n\r\n def coords_to_state(self, coords):\r\n x = int((coords[0] - 50) / 100)\r\n y = int((coords[1] - 50) / 100)\r\n return [x, y]\r\n\r\n def reset(self):\r\n self.update()\r\n time.sleep(0.5)\r\n x, y = self.canvas.coords(self.rectangle)\r\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\r\n self.render()\r\n # return observation\r\n return self.coords_to_state(self.canvas.coords(self.rectangle))\r\n\r\n def step(self, action):\r\n state = self.canvas.coords(self.rectangle)\r\n base_action = np.array([0, 0])\r\n self.render()\r\n\r\n if action == 0: # up\r\n if state[1] > UNIT:\r\n base_action[1] -= UNIT\r\n elif action == 1: # down\r\n if state[1] < (HEIGHT - 1) * UNIT:\r\n base_action[1] += UNIT\r\n elif action == 2: # left\r\n if state[0] > UNIT:\r\n base_action[0] -= UNIT\r\n elif action == 3: # right\r\n if state[0] < (WIDTH - 1) * UNIT:\r\n base_action[0] += UNIT\r\n\r\n # move agent\r\n self.canvas.move(self.rectangle, base_action[0], base_action[1])\r\n # move rectangle to top level of canvas\r\n self.canvas.tag_raise(self.rectangle)\r\n next_state = self.canvas.coords(self.rectangle)\r\n\r\n # reward function\r\n if next_state == self.canvas.coords(self.circle):\r\n reward = 100\r\n done = True\r\n elif next_state in [self.canvas.coords(self.triangle1),\r\n self.canvas.coords(self.triangle2)]:\r\n reward = -100\r\n done = True\r\n else:\r\n reward = 0\r\n done = False\r\n\r\n next_state = self.coords_to_state(next_state)\r\n\r\n return next_state, reward, done\r\n\r\n def render(self):\r\n time.sleep(0.03)\r\n self.update()\r\n", "import numpy as np\r\nimport random\r\nfrom collections import defaultdict\r\nfrom environment import Env\r\n\r\n\r\n# SARSA agent learns every time step from the sample <s, a, r, s', a'>\r\nclass SARSAgent:\r\n def __init__(self, actions):\r\n self.actions = actions\r\n self.learning_rate = 0.01\r\n self.discount_factor = 0.9\r\n self.epsilon = 0.1\r\n self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])\r\n\r\n # with sample <s, a, r, s', a'>, learns new q function\r\n def learn(self, state, action, reward, next_state, next_action):\r\n current_q = self.q_table[state][action]\r\n next_state_q = self.q_table[next_state][next_action]\r\n new_q = (current_q + self.learning_rate *\r\n (reward + self.discount_factor * next_state_q - current_q))\r\n self.q_table[state][action] = new_q\r\n\r\n # get action for the state according to the q function table\r\n # agent pick action of epsilon-greedy policy\r\n def get_action(self, state):\r\n if np.random.rand() < self.epsilon:\r\n # take random action\r\n action = np.random.choice(self.actions)\r\n else:\r\n # take action according to the q function table\r\n state_action = self.q_table[state]\r\n action = self.arg_max(state_action)\r\n return action\r\n\r\n @staticmethod\r\n def arg_max(state_action):\r\n max_index_list = []\r\n max_value = state_action[0]\r\n for index, value in enumerate(state_action):\r\n if value > max_value:\r\n max_index_list.clear()\r\n max_value = value\r\n max_index_list.append(index)\r\n elif value == max_value:\r\n max_index_list.append(index)\r\n return random.choice(max_index_list)\r\n\r\nif __name__ == \"__main__\":\r\n env = Env()\r\n agent = SARSAgent(actions=list(range(env.n_actions)))\r\n\r\n for episode in range(1000):\r\n # reset environment and initialize state\r\n\r\n state = env.reset()\r\n # get action of state from agent\r\n action = agent.get_action(str(state))\r\n\r\n while True:\r\n env.render()\r\n\r\n # take action and proceed one step in the environment\r\n next_state, reward, done = env.step(action)\r\n next_action = agent.get_action(str(next_state))\r\n\r\n # with sample <s,a,r,s',a'>, agent learns new q function\r\n agent.learn(str(state), action, reward, str(next_state), next_action)\r\n\r\n state = next_state\r\n action = next_action\r\n\r\n # print q function of all states at screen\r\n env.print_value_all(agent.q_table)\r\n\r\n # if episode ends, then break\r\n if done:\r\n break\r\n\r\n" ]
[ [ "numpy.random.rand", "numpy.reshape", "numpy.zeros", "numpy.argmax", "numpy.amax" ], [ "numpy.random.seed", "numpy.array" ], [ "numpy.random.rand", "numpy.random.choice" ] ]
pjh4993/AdelaiDet
[ "8b622b185eb66205a2341cd9fbef94a42019148b" ]
[ "adet/modeling/fcos/fcos.py" ]
[ "import math\nfrom typing import List, Dict\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import ShapeSpec, NaiveSyncBatchNorm\nfrom detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY\n\nfrom adet.layers import DFConv2d, NaiveGroupNorm\nfrom adet.utils.comm import compute_locations\nfrom .fcos_outputs import FCOSOutputs\n\n\n__all__ = [\"FCOS\"]\n\nINF = 100000000\n\n\nclass Scale(nn.Module):\n def __init__(self, init_value=1.0):\n super(Scale, self).__init__()\n self.scale = nn.Parameter(torch.FloatTensor([init_value]))\n\n def forward(self, input):\n return input * self.scale\n\n\nclass ModuleListDial(nn.ModuleList):\n def __init__(self, modules=None):\n super(ModuleListDial, self).__init__(modules)\n self.cur_position = 0\n\n def forward(self, x):\n result = self[self.cur_position](x)\n self.cur_position += 1\n if self.cur_position >= len(self):\n self.cur_position = 0\n return result\n\n\n@PROPOSAL_GENERATOR_REGISTRY.register()\nclass FCOS(nn.Module):\n \"\"\"\n Implement FCOS (https://arxiv.org/abs/1904.01355).\n \"\"\"\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n super().__init__()\n self.in_features = cfg.MODEL.FCOS.IN_FEATURES\n self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES\n self.yield_proposal = cfg.MODEL.FCOS.YIELD_PROPOSAL\n\n self.fcos_head = FCOSHead(cfg, [input_shape[f] for f in self.in_features])\n self.in_channels_to_top_module = self.fcos_head.in_channels_to_top_module\n\n self.fcos_outputs = FCOSOutputs(cfg)\n\n def forward_head(self, features, top_module=None):\n features = [features[f] for f in self.in_features]\n pred_class_logits, pred_deltas, pred_centerness, top_feats, bbox_towers, identity = self.fcos_head(\n features, top_module, self.yield_proposal)\n return pred_class_logits, pred_deltas, pred_centerness, top_feats, bbox_towers, identity\n\n def forward(self, images, features, gt_instances=None, top_module=None):\n \"\"\"\n Arguments:\n images (list[Tensor] or ImageList): images to be processed\n targets (list[BoxList]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n features = [features[f] for f in self.in_features]\n locations = self.compute_locations(features)\n logits_pred, reg_pred, ctrness_pred, identity, top_feats, bbox_towers = self.fcos_head(\n features, top_module, self.yield_proposal\n )\n\n results = {}\n if self.yield_proposal:\n results[\"features\"] = {\n f: b for f, b in zip(self.in_features, bbox_towers)\n }\n\n if self.training:\n results, losses = self.fcos_outputs.losses(\n logits_pred, reg_pred, ctrness_pred, identity,\n locations, gt_instances, top_feats,\n )\n \n if self.yield_proposal:\n with torch.no_grad():\n results[\"proposals\"] = self.fcos_outputs.predict_proposals(\n logits_pred, reg_pred, ctrness_pred,\n locations, images.image_sizes, top_feats\n )\n return results, losses\n else:\n results = self.fcos_outputs.predict_proposals(\n logits_pred, reg_pred, ctrness_pred,\n locations, images.image_sizes, top_feats\n )\n\n return results, {}\n\n def compute_locations(self, features):\n locations = []\n for level, feature in enumerate(features):\n h, w = feature.size()[-2:]\n locations_per_level = compute_locations(\n h, w, self.fpn_strides[level],\n feature.device\n )\n locations.append(locations_per_level)\n return locations\n\n\nclass FCOSHead(nn.Module):\n def __init__(self, cfg, input_shape: List[ShapeSpec]):\n \"\"\"\n Arguments:\n in_channels (int): number of channels of the input feature\n \"\"\"\n super().__init__()\n # TODO: Implement the sigmoid version first.\n self.num_classes = cfg.MODEL.FCOS.NUM_CLASSES\n self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES\n self.ctrness_on_bbox = cfg.MODEL.FCOS.CTRNESS_ON_BBOX\n head_configs = {\"cls\": (cfg.MODEL.FCOS.NUM_CLS_CONVS,\n cfg.MODEL.FCOS.USE_DEFORMABLE),\n \"bbox\": (cfg.MODEL.FCOS.NUM_BOX_CONVS,\n cfg.MODEL.FCOS.USE_DEFORMABLE),\n \"share\": (cfg.MODEL.FCOS.NUM_SHARE_CONVS,\n False),\n \"identity\": (cfg.MODEL.FCOS.NUM_ID_CONVS,\n False),\n }\n norm = None if cfg.MODEL.FCOS.NORM == \"none\" else cfg.MODEL.FCOS.NORM\n self.num_levels = len(input_shape)\n self.id_dim = cfg.MODEL.FCOS.ID_DIM\n\n in_channels = [s.channels for s in input_shape]\n assert len(set(in_channels)) == 1, \"Each level must have the same channel!\"\n in_channels = in_channels[0]\n\n self.in_channels_to_top_module = in_channels\n\n for head in head_configs:\n tower = []\n num_convs, use_deformable = head_configs[head]\n for i in range(num_convs):\n if use_deformable and i == num_convs - 1:\n conv_func = DFConv2d\n else:\n conv_func = nn.Conv2d\n tower.append(conv_func(\n in_channels, in_channels,\n kernel_size=3, stride=1,\n padding=1, bias=True\n ))\n if norm == \"GN\":\n tower.append(nn.GroupNorm(32, in_channels))\n elif norm == \"NaiveGN\":\n tower.append(NaiveGroupNorm(32, in_channels))\n elif norm == \"BN\":\n tower.append(ModuleListDial([\n nn.BatchNorm2d(in_channels) for _ in range(self.num_levels)\n ]))\n elif norm == \"SyncBN\":\n tower.append(ModuleListDial([\n NaiveSyncBatchNorm(in_channels) for _ in range(self.num_levels)\n ]))\n tower.append(nn.ReLU())\n self.add_module('{}_tower'.format(head),\n nn.Sequential(*tower))\n\n self.cls_logits = nn.Conv2d(\n in_channels, self.num_classes,\n kernel_size=3, stride=1,\n padding=1\n )\n self.bbox_pred = nn.Conv2d(\n in_channels, 4, kernel_size=3,\n stride=1, padding=1\n )\n self.ctrness = nn.Conv2d(\n in_channels, 1, kernel_size=3,\n stride=1, padding=1\n )\n self.identity = nn.Conv2d(\n in_channels, self.id_dim, kernel_size=3,\n stride=1, padding=1\n )\n\n if cfg.MODEL.FCOS.USE_SCALE:\n self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(self.num_levels)])\n else:\n self.scales = None\n\n for modules in [\n self.cls_tower, self.bbox_tower,\n self.share_tower, self.identity_tower,\n self.cls_logits, self.bbox_pred, self.ctrness, self.identity,\n ]:\n for l in modules.modules():\n if isinstance(l, nn.Conv2d):\n torch.nn.init.normal_(l.weight, std=0.01)\n torch.nn.init.constant_(l.bias, 0)\n\n # initialize the bias for focal loss\n prior_prob = cfg.MODEL.FCOS.PRIOR_PROB\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n torch.nn.init.constant_(self.cls_logits.bias, bias_value)\n # initialize the bias for centerness\n torch.nn.init.constant_(self.ctrness.bias, bias_value)\n\n def forward(self, x, top_module=None, yield_bbox_towers=False):\n logits = []\n bbox_reg = []\n ctrness = []\n identity = []\n top_feats = []\n bbox_towers = []\n for l, feature in enumerate(x):\n feature = self.share_tower(feature)\n cls_tower = self.cls_tower(feature)\n bbox_tower = self.bbox_tower(feature)\n identity_tower = self.identity_tower(feature)\n if yield_bbox_towers:\n bbox_towers.append(bbox_tower)\n\n logits.append(self.cls_logits(cls_tower))\n identity.append(self.identity(identity_tower).sigmoid())\n\n if self.ctrness_on_bbox:\n ctrness.append(self.ctrness(bbox_tower))\n else:\n ctrness.append(self.ctrness(cls_tower))\n\n reg = self.bbox_pred(bbox_tower)\n if self.scales is not None:\n reg = self.scales[l](reg)\n # Note that we use relu, as in the improved FCOS, instead of exp.\n bbox_reg.append(F.relu(reg))\n if top_module is not None:\n top_feats.append(top_module(bbox_tower))\n \n return logits, bbox_reg, ctrness, identity, top_feats, bbox_towers, " ]
[ [ "torch.nn.init.constant_", "torch.nn.Sequential", "torch.FloatTensor", "torch.no_grad", "torch.nn.BatchNorm2d", "torch.nn.GroupNorm", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.nn.functional.relu" ] ]
WaveBlocks/WaveBlocks
[ "2af3730dcf27e54006ec602e696b4d4df25459d8", "2af3730dcf27e54006ec602e696b4d4df25459d8" ]
[ "src/scripts_spawn_na/AposterioriSpawningNAKF.py", "src/scripts_spawn_na/PlotInnerproducts.py" ]
[ "\"\"\"The WaveBlocks Project\n\nScript to spawn new wavepackets aposteriori to an already completed simulation.\nThis can be used to evaluate spawning errors and test criteria for finding the\nbest spawning time.\n\n@author: R. Bourquin\n@copyright: Copyright (C) 2011 R. Bourquin\n@license: Modified BSD License\n\"\"\"\n\nimport sys\n\nimport numpy as np\nfrom scipy import linalg as spla\n\nfrom WaveBlocks import ParameterLoader\nfrom WaveBlocks import ParameterProvider\nfrom WaveBlocks import IOManager\nfrom WaveBlocks import PotentialFactory\nfrom WaveBlocks import HagedornWavepacket\nfrom WaveBlocks import InhomogeneousQuadrature\n\nfrom NonAdiabaticSpawnerKF import NonAdiabaticSpawnerKF\n\n\ndef aposteriori_spawning(fin, fout, pin, pout, save_canonical=False):\n \"\"\"\n :param f: An ``IOManager`` instance providing the simulation data.\n :param datablock: The data block where the results are.\n \"\"\"\n # Number of time steps we saved\n timesteps = fin.load_wavepacket_timegrid()\n nrtimesteps = timesteps.shape[0]\n\n params = fin.load_wavepacket_parameters()\n coeffs = fin.load_wavepacket_coefficients()\n\n # A data transformation needed by API specification\n coeffs = [ [ coeffs[i,j,:] for j in xrange(pin[\"ncomponents\"]) ] for i in xrange(nrtimesteps) ]\n\n # The potential\n Potential = PotentialFactory().create_potential(pin)\n\n # Initialize a mother Hagedorn wavepacket with the data from another simulation\n HAWP = HagedornWavepacket(pin)\n HAWP.set_quadrature(None)\n\n # Initialize an empty wavepacket for spawning\n SWP = HagedornWavepacket(pout)\n SWP.set_quadrature(None)\n\n # Initialize a Spawner\n NAS = NonAdiabaticSpawnerKF(pout)\n\n # Try spawning for these components, if none is given, try it for all.\n if not \"spawn_components\" in parametersout:\n components = range(pin[\"ncomponents\"])\n else:\n components = parametersout[\"spawn_components\"]\n\n # Iterate over all timesteps and spawn\n for i, step in enumerate(timesteps):\n print(\" Try spawning at timestep \"+str(step))\n\n # Configure the wave packet and project to the eigenbasis.\n HAWP.set_parameters(params[i])\n HAWP.set_coefficients(coeffs[i])\n\n # Project to the eigenbasis as the parameter estimation\n # has to happen there because of coupling.\n T = HAWP.clone()\n T.project_to_eigen(Potential)\n\n # Try spawning a new packet for each component\n estps = [ NAS.estimate_parameters(T, component=acomp) for acomp in components ]\n\n # The quadrature\n quadrature = InhomogeneousQuadrature()\n\n # Quadrature, assume same quadrature order for both packets\n # Assure the \"right\" quadrature is choosen if mother and child have\n # different basis sizes\n if max(HAWP.get_basis_size()) > max(SWP.get_basis_size()):\n quadrature.set_qr(HAWP.get_quadrature().get_qr())\n else:\n quadrature.set_qr(SWP.get_quadrature().get_qr())\n\n for index, ps in enumerate(estps):\n if ps is not None:\n # One choice of the sign\n U = SWP.clone()\n U.set_parameters(ps)\n # Project the coefficients to the spawned packet\n tmp = T.clone()\n NAS.project_coefficients(tmp, U, component=components[index])\n\n # Other choice of the sign\n V = SWP.clone()\n # Transform parameters\n psm = list(ps)\n B = ps[0]\n Bm = -np.real(B)+1.0j*np.imag(B)\n psm[0] = Bm\n V.set_parameters(psm)\n # Project the coefficients to the spawned packet\n tmp = T.clone()\n NAS.project_coefficients(tmp, V, component=components[index])\n\n # Compute some inner products to finally determine which parameter set we use\n ou = abs(quadrature.quadrature(T,U, component=components[index]))\n ov = abs(quadrature.quadrature(T,V, component=components[index]))\n\n # Choose the packet which maximizes the inner product. This is the main point!\n if ou >= ov:\n U = U\n else:\n U = V\n\n # Finally do the spawning, this is essentially to get the remainder T right\n # The packet U is already ok by now.\n NAS.project_coefficients(T, U, component=components[index])\n\n # Transform back\n if save_canonical is True:\n T.project_to_canonical(Potential)\n U.project_to_canonical(Potential)\n\n # Save the mother packet rest\n fout.save_wavepacket_parameters(T.get_parameters(), timestep=step, blockid=2*index)\n fout.save_wavepacket_coefficients(T.get_coefficients(), timestep=step, blockid=2*index)\n\n # Save the spawned packet\n fout.save_wavepacket_parameters(U.get_parameters(), timestep=step, blockid=2*index+1)\n fout.save_wavepacket_coefficients(U.get_coefficients(), timestep=step, blockid=2*index+1)\n\n\n\n\nif __name__ == \"__main__\":\n # Input data manager\n iomin = IOManager()\n\n # Read file with simulation data\n try:\n iomin.open_file(filename=sys.argv[1])\n except IndexError:\n iomin.open_file()\n\n # Read a configuration file with the spawn parameters\n try:\n parametersspawn = ParameterLoader().load_from_file(sys.argv[2])\n except IndexError:\n raise IOError(\"No spawn configuration given!\")\n\n parametersin = iomin.load_parameters()\n\n # Check if we can start a spawning simulation\n if parametersin[\"algorithm\"] != \"hagedorn\":\n iomin.finalize()\n raise ValueError(\"Unknown propagator algorithm.\")\n\n # Parameters for spawning simulation\n parametersout = ParameterProvider()\n\n # Transfer the simulation parameters\n parametersout.set_parameters(parametersin)\n\n # And add spawning related configurations variables\n parametersout.update_parameters(parametersspawn)\n\n # How much time slots do we need\n tm = parametersout.get_timemanager()\n slots = tm.compute_number_saves()\n\n # Second IOM for output data of the spawning simulation\n iomout = IOManager()\n iomout.create_file(parametersout, filename=\"simulation_results_spawn.hdf5\")\n\n # Some data in the global data block\n iomout.add_grid(parametersout, blockid=\"global\")\n iomout.save_grid(iomin.load_grid(blockid=\"global\"), blockid=\"global\")\n\n # Allocate all the data blocks\n for i in xrange(len(parametersout[\"spawn_components\"])):\n gid = iomout.create_group()\n bid1 = iomout.create_block(groupid=gid)\n bid2 = iomout.create_block(groupid=gid)\n # Block for remainder / mother after spawning\n iomout.add_wavepacket(parametersin, blockid=bid1)\n # Block for spawned packet\n iomout.add_wavepacket(parametersout, blockid=bid2)\n\n # Really do the aposteriori spawning simulation\n aposteriori_spawning(iomin, iomout, parametersin, parametersout)\n\n # Close the inpout/output files\n iomin.finalize()\n iomout.finalize()\n", "\"\"\"The WaveBlocks Project\n\nPlot some interesting values of the original and estimated\nparameters sets Pi_m=(P,Q,S,p,q) and Pi_s=(B,A,S,b,a).\nPlot the inner products of spawned and original packets.\n\n@author: R. Bourquin\n@copyright: Copyright (C) 2011 R. Bourquin\n@license: Modified BSD License\n\"\"\"\n\nimport sys\nfrom numpy import real, imag, abs, angle\nfrom matplotlib.pyplot import *\n\nfrom WaveBlocks import ComplexMath\nfrom WaveBlocks import IOManager\nfrom WaveBlocks import TrapezoidalQR\nfrom WaveBlocks import HagedornWavepacket\nfrom WaveBlocks import InhomogeneousQuadrature\n\nimport GraphicsDefaults as GD\n\n\ndef read_data_spawn(fo, fs):\n parameters_fo = fo.load_parameters()\n parameters_fs = fs.load_parameters()\n\n timegrids = []\n AllPA = []\n AllC = []\n\n\n timegrids.append(parameters_fo[\"dt\"] * fo.load_wavepacket_timegrid(blockid=0))\n\n Pi = fo.load_wavepacket_parameters(blockid=0)\n Phist = Pi[:,0]\n Qhist = Pi[:,1]\n Shist = Pi[:,2]\n phist = Pi[:,3]\n qhist = Pi[:,4]\n AllPA.append([Phist, Qhist, Shist, phist, qhist])\n\n Ci = fo.load_wavepacket_coefficients(blockid=0)\n AllC.append(Ci)\n\n\n timegrids.append(parameters_fs[\"dt\"] * fs.load_wavepacket_timegrid(blockid=1))\n\n Pi = fs.load_wavepacket_parameters(blockid=1)\n Phist = Pi[:,0]\n Qhist = Pi[:,1]\n Shist = Pi[:,2]\n phist = Pi[:,3]\n qhist = Pi[:,4]\n AllPA.append([Phist, Qhist, Shist, phist, qhist])\n\n Ci = fs.load_wavepacket_coefficients(blockid=1)\n AllC.append(Ci)\n\n return parameters_fo, parameters_fs, timegrids, AllPA, AllC\n\n\ndef compute(parameters_fo, parameters_fs, timegrids, AllPA, AllC):\n # Grid of mother and first spawned packet\n grid_m = timegrids[0]\n grid_s = timegrids[1]\n\n # Parameters of the original packet\n P, Q, S, p, q = AllPA[0]\n\n # Parameter of the spawned packet, first try\n B, A, S, b, a = AllPA[1]\n\n # Parameter of the spawned packet, second try\n A2, S2, b2, a2 = A, S, b, a\n B2 = -real(B)+1.0j*imag(B)\n\n C0 = AllC[0]\n C1 = AllC[1]\n\n # Construct the packets from the data\n OWP = HagedornWavepacket(parameters_fo)\n OWP.set_quadrature(None)\n\n S1WP = HagedornWavepacket(parameters_fs)\n S1WP.set_quadrature(None)\n\n S2WP = HagedornWavepacket(parameters_fs)\n S2WP.set_quadrature(None)\n\n nrtimesteps = grid_m.shape[0]\n\n # The quadrature\n quadrature = InhomogeneousQuadrature()\n\n # Quadrature, assume same quadrature order for both packets\n # Assure the \"right\" quadrature is choosen if OWP and S*WP have\n # different basis sizes\n if max(OWP.get_basis_size()) > max(S1WP.get_basis_size()):\n quadrature.set_qr(OWP.get_quadrature().get_qr())\n else:\n quadrature.set_qr(S1WP.get_quadrature().get_qr())\n\n ip_oo = []\n ip_os1 = []\n ip_os2 = []\n ip_s1s1 = []\n ip_s2s2 = []\n\n # Inner products\n for step in xrange(nrtimesteps):\n print(\"Timestep \"+str(step))\n\n # Put the data from the current timestep into the packets\n OWP.set_parameters((P[step], Q[step], S[step], p[step], q[step]))\n OWP.set_coefficients(C0[step,...])\n\n S1WP.set_parameters((B[step], A[step], S[step], b[step], a[step]))\n S1WP.set_coefficients(C1[step,...])\n\n S2WP.set_parameters((B2[step], A2[step], S2[step], b2[step], a2[step]))\n S2WP.set_coefficients(C1[step,...])\n\n # Compute the inner products\n ip_oo.append(quadrature.quadrature(OWP, OWP, summed=True))\n ip_os1.append(quadrature.quadrature(OWP, S1WP, summed=True))\n ip_os2.append(quadrature.quadrature(OWP, S2WP, summed=True))\n ip_s1s1.append(quadrature.quadrature(S1WP, S1WP, summed=True))\n ip_s2s2.append(quadrature.quadrature(S2WP, S2WP, summed=True))\n\n # Plot\n figure()\n plot(grid_m, abs(ip_oo), label=r\"$\\langle O|O\\rangle $\")\n plot(grid_m, abs(ip_os1), \"-*\", label=r\"$\\langle O|S1\\rangle $\")\n plot(grid_m, abs(ip_os2), \"-\", label=r\"$\\langle O|S2\\rangle $\")\n plot(grid_m, abs(ip_s1s1), label=r\"$\\langle S1|S1\\rangle $\")\n plot(grid_m, abs(ip_s2s2), label=r\"$\\langle S2|S2\\rangle $\")\n legend()\n grid(True)\n savefig(\"inner_products\"+GD.output_format)\n close()\n\n\n\n\nif __name__ == \"__main__\":\n iom_s = IOManager()\n iom_o = IOManager()\n\n # NOTE\n #\n # first cmd-line data file is spawning data\n # second cmd-line data file is reference data\n\n # Read file with new simulation data\n try:\n iom_s.open_file(filename=sys.argv[1])\n except IndexError:\n iom_s.open_file()\n\n # Read file with original reference simulation data\n try:\n iom_o.open_file(filename=sys.argv[2])\n except IndexError:\n iom_o.open_file()\n\n compute(*read_data_spawn(iom_o, iom_s))\n\n iom_s.finalize()\n iom_o.finalize()\n" ]
[ [ "numpy.imag", "numpy.real" ], [ "numpy.imag", "numpy.real", "numpy.abs" ] ]
mtn/ml
[ "2cd2c447c15baa41e9626fa453c2fdc872e73cd6" ]
[ "robotics/ekf_slam_and_pf_localization/code/ekf-slam/Laser.py" ]
[ "import numpy as np\nfrom Gridmap import Gridmap\n\n\nclass Laser(object):\n # Construct an Laser instance with the following set of variables,\n # which are described in Section 6.3.1 of Probabilistic Robotics\n # numBeams: Number of beams that comprise the scan\n def __init__(self, numBeams=41):\n self.pHit = 0.9800\n self.pShort = 0.01\n self.pMax = 0.005\n self.pRand = 0.005\n self.sigmaHit = 0.02\n self.lambdaShort = 1\n self.zMax = 20\n self.zMaxEps = 0.02\n self.Angles = np.linspace(-np.pi, np.pi, numBeams) # array of angles\n\n # The following computes the likelihood of a given LIDAR scan from\n # a given pose in the provided map according to the algorithm given\n # in Table 6.1 of Probabilistic Robotics\n #\n # Ranges: An array of ranges (the angles are defined by self.Angles)\n # x, y, thta: The robot's position (x,y) and heading\n # gridmap: An instance of the Gridmap class that specifies\n # an occupancy grid representation of the map\n # where 1: occupied and 0: free\n #\n # Returns:\n # likelihood: Scan likelihood\n def scanProbability(self, z, x, gridmap):\n\n # Your code goes here\n # Implement the algorithm given in Table 6.1\n # You are provided with an implementation (albeit slow) of ray tracing below\n\n print(\"Please add code\")\n\n # Function to convert range and bearing to (x,y) in LIDAR frame\n # range: 1xn array of range measurements\n # bearing: 1xn array of bearings\n #\n # Returns:\n # XY: 2xn array, where each column is an (x,y) pair\n def getXY(self, range, bearing):\n\n CosSin = np.vstack((np.cos(bearing[:]), np.sin(bearing[:])))\n XY = np.tile(range, (2, 1)) * CosSin\n\n return XY\n\n # An implementation of ray tracing\n # (xr, yr, thetar): The robot's pose\n # lAngle: The LIDAR angle (in the LIDAR reference frame)\n # gridmap: An instance of the Gridmap class that specifies\n # an occupancy grid representation of the map\n # where 1: occupied and 0: free\n #\n # Returns:\n # d: Range\n # coords: Array of (x,y) coordinates\n def rayTracing(self, xr, yr, thetar, lAngle, gridmap):\n\n angle = thetar + lAngle\n x0 = xr / gridmap.xres\n y0 = yr / gridmap.yres\n\n (m, n) = gridmap.getShape()\n if gridmap.inCollision(int(np.floor(x0)), int(np.floor(y0)), True):\n d = 0\n coords = np.array([[x0, y0]]).transpose()\n return (d, coords)\n\n if x0 == np.floor(x0):\n x0 = x0 + 0.001\n\n if y0 == np.floor(y0):\n y0 = y0 + 0.001\n\n eps = 0.0001\n\n # Intersection with horizontal lines\n x = x0\n y = y0\n found = False\n\n if np.mod(angle, np.pi) != np.pi / 2:\n tanangle = np.tan(angle)\n if np.cos(angle) >= 0:\n while (x <= (n - 1)) and (found == False):\n x = np.floor(x + 1)\n y = y0 + tanangle * (x - x0)\n\n if (y > ((m - 1))) or (y < 0):\n break\n\n if (\n gridmap.inCollision(\n int(np.floor(x + eps)), int(np.floor(y)), True\n )\n == 1\n ):\n xfound_hor = x\n yfound_hor = y\n found = True\n else:\n while (x >= 1) and (found == False):\n x = int(np.ceil(x - 1))\n y = y0 + tanangle * (x - x0)\n\n if (y > ((m - 1))) or (y < 0):\n break\n\n if gridmap.inCollision(\n int(np.floor(x - eps)), int(np.floor(y)), True\n ):\n xfound_hor = x\n yfound_hor = y\n found = True\n\n found_hor = found\n\n # Intersection with vertical lines\n x = x0\n y = y0\n found = False\n\n if np.mod(angle, np.pi) != 0:\n cotangle = 1 / np.tan(angle)\n if np.sin(angle) >= 0:\n while (y <= (m - 1)) and (found == False):\n y = np.floor(y + 1)\n x = x0 + cotangle * (y - y0)\n\n if (x > ((n - 1))) or (x < 0):\n break\n\n if gridmap.inCollision(\n int(np.floor(x)), int(np.floor(y + eps)), True\n ):\n xfound_ver = x\n yfound_ver = y\n found = True\n else:\n while (y >= 1) and (found == False):\n y = int(np.floor(y - 1))\n x = x0 + cotangle * (y - y0)\n\n if (x > ((n - 1))) or (x < 0):\n break\n\n if gridmap.inCollision(\n int(np.floor(x)), int(np.floor(y - eps)), True\n ):\n xfound_ver = x\n yfound_ver = y\n found = True\n\n found_ver = found\n\n if (found_hor == False) and (found_ver == False):\n print(\"rayTracing: Error finding return\")\n\n # Check which one was first\n if (found_ver == True) and (found_hor == False):\n d_ver = np.sqrt(np.square(xfound_ver - x0) + np.square(yfound_ver - y0))\n d = d_ver\n coords = np.array([[xfound_ver, yfound_ver]]).transpose()\n elif (found_hor == True) and (found_ver == False):\n d_hor = np.sqrt(np.square(xfound_hor - x0) + np.square(yfound_hor - y0))\n d = d_hor\n coords = np.array([[xfound_hor, yfound_hor]]).transpose()\n else:\n d_ver = np.sqrt(np.square(xfound_ver - x0) + np.square(yfound_ver - y0))\n d_hor = np.sqrt(np.square(xfound_hor - x0) + np.square(yfound_hor - y0))\n\n if d_hor <= d_ver:\n coords = np.array([[xfound_hor, yfound_hor]]).transpose()\n d = d_hor\n else:\n coords = np.array([[xfound_ver, yfound_ver]]).transpose()\n d = d_ver\n\n return (d, coords)\n" ]
[ [ "numpy.square", "numpy.sin", "numpy.array", "numpy.ceil", "numpy.tan", "numpy.tile", "numpy.cos", "numpy.linspace", "numpy.mod", "numpy.floor" ] ]
skylarkgit/face-verification
[ "75751ba2fc23994f1574a3c03d1323f30900adf1" ]
[ "server.py" ]
[ "from flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nimport werkzeug\nimport face\nimport pprint\nfrom matplotlib import pyplot\nimport facenet.src.facenet as facenet\nfrom keras.models import load_model\nimport os\nimport tensorflow as tf\nfrom flask_cors import CORS, cross_origin\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n\napp = Flask(__name__)\napi = Api(app)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nmodel = load_model('facenet_keras.h5')\nmodel._make_predict_function()\ngraph = tf.get_default_graph()\n\nclass Face(Resource):\n def post(self):\n parse = reqparse.RequestParser()\n parse.add_argument('source', type=werkzeug.datastructures.FileStorage, location='files')\n parse.add_argument('target', type=werkzeug.datastructures.FileStorage, location='files')\n args = parse.parse_args()\n sourceImage = face.extract_face(args['source'])\n targetImage = face.extract_face(args['target'])\n distance = None\n with graph.as_default():\n sourceEmbedding = face.get_embedding(model, sourceImage)\n targetEmbedding = face.get_embedding(model, targetImage)\n distance = facenet.distance(sourceEmbedding, targetEmbedding)\n return {'distance': distance.tolist()}\n\napi.add_resource(Face, '/verify')\n\napp.run(port='5002')" ]
[ [ "tensorflow.get_default_graph" ] ]
sidneydong/QUANTAXIS
[ "92815e2d50936b812b39619f9090aa761473d46a" ]
[ "QUANTAXIS/QAApplication/QAAnalysis.py" ]
[ "# Encoding:UTF-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2021 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n\"\"\"\nAnalysis Center for Backtest\nwe will give some function\n\"\"\"\nimport math\nimport sys\n\nimport numpy\nimport pandas as pd\n\nfrom QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_day\nfrom QUANTAXIS.QAUtil import QA_util_log_info, trade_date_sse\n\n\ndef QA_backtest_analysis_backtest(client, code_list, assets_d, account_days, message, total_date, benchmark_data):\n\n # 主要要从message_history分析\n # 1.收益率\n # 2.胜率\n # 3.回撤\n \"\"\"\n Annualized Returns: 策略年化收益率。表示投资期限为一年的预期收益率。\n 具体计算方式为 (策略最终价值 / 策略初始价值)^(250 / 回测交易日数量) - 1\n\n Alpha:阿尔法\n 具体计算方式为 (策略年化收益 - 无风险收益) - beta × (参考标准年化收益 - 无风险收益),这里的无风险收益指的是中国固定利率国债收益率曲线上10年期国债的年化到期收益率。\n\n Beta:贝塔\n 具体计算方法为 策略每日收益与参考标准每日收益的协方差 / 参考标准每日收益的方差 。\n\n Sharpe Ratio:夏普比率。表示每承受一单位总风险,会产生多少的超额报酬。\n 具体计算方法为 (策略年化收益率 - 回测起始交易日的无风险利率) / 策略收益波动率 。\n\n Volatility:策略收益波动率。用来测量资产的风险性。\n 具体计算方法为 策略每日收益的年化标准差 。\n\n Information Ratio:信息比率。衡量超额风险带来的超额收益。\n 具体计算方法为 (策略每日收益 - 参考标准每日收益)的年化均值 / 年化标准差 。\n\n Max Drawdown:最大回撤。描述策略可能出现的最糟糕的情况。\n 具体计算方法为 max(1 - 策略当日价值 / 当日之前虚拟账户最高价值)\n\n\n 单次交易收益\n 收益/次数的频次直方图\n 单日最大持仓\n \"\"\"\n # 数据检查\n if (len(benchmark_data)) < 1:\n QA_util_log_info('Wrong with benchmark data ! ')\n sys.exit()\n\n # 计算一个benchmark\n # 这个benchmark 是在开始的那天 市价买入和策略所选标的一致的所有股票,然后一直持仓\n data = pd.concat([pd.DataFrame(message['body']['account']['history'],\n columns=['time', 'code', 'price', 'towards', 'amount', 'order_id', 'trade_id', 'commission']),\n pd.DataFrame(message['body']['account']['assets'], columns=['assets'])], axis=1)\n data['time'] = pd.to_datetime(data['time']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai')\n data.set_index('time', drop=False, inplace=True)\n\n trade_history = message['body']['account']['history']\n cash = message['body']['account']['cash']\n assets = message['body']['account']['assets']\n\n #assets_= data.resample('D').last().dropna()\n # 计算交易日\n trade_date = account_days\n # benchmark资产\n benchmark_assets = QA_backtest_calc_benchmark(\n benchmark_data, assets[0])\n # d2=pd.concat([data.resample('D').last(),pd.DataFrame(benchmark_assets,columns=['benchmark'])])\n # benchmark年化收益\n benchmark_annualized_returns = QA_backtest_calc_profit_per_year(\n benchmark_assets, len(total_date))\n # 计算账户的收益\n\n # days=len(assest_history)-1\n # 策略年化收益\n annualized_returns = QA_backtest_calc_profit_per_year(\n assets_d, len(total_date))\n\n # 收益矩阵\n assest_profit = QA_backtest_calc_profit_matrix(assets)\n benchmark_profit = QA_backtest_calc_profit_matrix(benchmark_assets)\n\n # 策略日收益\n profit_day = QA_backtest_calc_profit_matrix(assets_d)\n # 胜率\n win_rate = QA_backtest_calc_win_rate(assest_profit)\n # 日胜率\n win_rate_day = QA_backtest_calc_win_rate(profit_day)\n # 年化波动率\n volatility_year = QA_backtest_calc_volatility(profit_day)\n benchmark_volatility_year = QA_backtest_calc_volatility(benchmark_profit)\n # 夏普比率\n sharpe = QA_backtest_calc_sharpe(\n annualized_returns, 0.05, volatility_year)\n\n # 最大回撤\n max_drop = QA_backtest_calc_dropback_max(assets_d)\n\n # 计算beta\n beta = QA_backtest_calc_beta(profit_day, benchmark_profit)\n # 计算Alpha\n alpha = QA_backtest_calc_alpha(\n annualized_returns, benchmark_annualized_returns, beta, 0.05)\n message = {\n 'code': code_list,\n 'annualized_returns': annualized_returns,\n 'benchmark_annualized_returns': benchmark_annualized_returns,\n 'assets': assets_d[1:],\n 'benchmark_assets': benchmark_assets[1:],\n 'vol': volatility_year,\n 'benchmark_vol': benchmark_volatility_year,\n 'sharpe': sharpe,\n 'alpha': alpha,\n 'beta': beta,\n 'total_date': total_date,\n 'trade_date': trade_date,\n 'max_drop': max_drop,\n 'win_rate': win_rate}\n return message\n\n\ndef QA_backtest_calc_assets(trade_history, assets):\n assets_d = []\n trade_date = []\n for i in range(0, len(trade_history), 1):\n if trade_history[i][0] not in trade_date:\n trade_date.append(trade_history[i][0])\n assets_d.append(assets[i])\n else:\n assets_d.pop(-1)\n assets_d.append(assets[i])\n\n return assets_d\n\n\ndef QA_backtest_calc_benchmark(benchmark_data, init_assets):\n\n return list(benchmark_data['close'] / float(benchmark_data['open'][0]) * float(init_assets))\n\n\ndef QA_backtest_calc_alpha(annualized_returns, benchmark_annualized_returns, beta, r):\n\n alpha = (annualized_returns - r) - (beta) * \\\n (benchmark_annualized_returns - r)\n return alpha\n\n\ndef QA_backtest_calc_beta(assest_profit, benchmark_profit):\n if len(assest_profit) < len(benchmark_profit):\n for i in range(0, len(benchmark_profit) - len(assest_profit), 1):\n assest_profit.append(0)\n elif len(assest_profit) > len(benchmark_profit):\n for i in range(0, len(assest_profit) - len(benchmark_profit), 1):\n benchmark_profit.append(0)\n calc_cov = numpy.cov(assest_profit, benchmark_profit)\n beta = calc_cov[0, 1] / calc_cov[1, 1]\n return beta\n\n\ndef QA_backtest_calc_profit(assest_history):\n return (assest_history[-1] / assest_history[1]) - 1\n\n\ndef QA_backtest_calc_profit_per_year(assest_history, days):\n return math.pow(float(assest_history[-1]) / float(assest_history[0]), 250.0 / float(days)) - 1.0\n\n\ndef QA_backtest_calc_profit_matrix(assest_history):\n assest_profit = []\n if len(assest_history) > 1:\n assest_profit = [assest_history[i + 1] / assest_history[i] -\n 1.0 for i in range(len(assest_history) - 1)]\n return assest_profit\n\n\ndef QA_backtest_calc_volatility(assest_profit_matrix):\n # 策略每日收益的年化标准差\n assest_profit = assest_profit_matrix\n\n volatility_day = numpy.std(assest_profit)\n volatility_year = volatility_day * math.sqrt(250)\n return volatility_year\n\n\ndef QA_backtest_calc_dropback_max(history):\n drops = []\n for i in range(1, len(history), 1):\n maxs = max(history[:i])\n cur = history[i - 1]\n drop = 1 - cur / maxs\n drops.append(drop)\n max_drop = max(drops)\n return max_drop\n\n\ndef QA_backtest_calc_sharpe(annualized_returns, r, volatility_year):\n '计算夏普比率'\n return (annualized_returns - r) / volatility_year\n\n\ndef QA_backtest_calc_trade_date(history):\n '计算交易日期'\n trade_date = []\n\n # trade_date_sse.index(history[-1][0])-trade_date_sse.index(history[0][0])\n for i in range(0, len(history), 1):\n if history[i][0] not in trade_date:\n trade_date.append(history[i][0])\n return trade_date\n\n\ndef calc_trade_time(history):\n return len(history)\n\n\ndef calc_every_pnl(detail):\n pass\n\n\ndef QA_backtest_calc_win_rate(profit_day):\n # 大于0的次数\n abovez = 0\n belowz = 0\n for i in range(0, len(profit_day) - 1, 1):\n if profit_day[i] > 0:\n abovez = abovez + 1\n elif profit_day[i] < 0:\n belowz = belowz + 1\n if belowz == 0:\n belowz = 1\n if abovez == 0:\n abovez = 1\n win_rate = abovez / (abovez + belowz)\n return win_rate\n" ]
[ [ "pandas.to_datetime", "numpy.std", "numpy.cov", "pandas.DataFrame" ] ]
molimat/YOLOv4-Counter-in-TF
[ "2c112424336fa03e82de67cf6b8487e8cb99a54a" ]
[ "core/yolov4.py" ]
[ "#! /usr/bin/env python\n# coding=utf-8\n\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nimport core.common as common\nimport core.backbone as backbone\nfrom core.config import cfg\n\n# NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))\n# STRIDES = np.array(cfg.YOLO.STRIDES)\n# IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH\n# XYSCALE = cfg.YOLO.XYSCALE\n# ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS)\n\ndef YOLO(input_layer, NUM_CLASS, model='yolov4', is_tiny=False):\n if is_tiny:\n if model == 'yolov4':\n return YOLOv4_tiny(input_layer, NUM_CLASS)\n elif model == 'yolov3':\n return YOLOv3_tiny(input_layer, NUM_CLASS)\n else:\n if model == 'yolov4':\n return YOLOv4(input_layer, NUM_CLASS)\n elif model == 'yolov3':\n return YOLOv3(input_layer, NUM_CLASS)\n\ndef YOLOv3(input_layer, NUM_CLASS):\n route_1, route_2, conv = backbone.darknet53(input_layer)\n\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n conv = common.convolutional(conv, (3, 3, 512, 1024))\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n conv = common.convolutional(conv, (3, 3, 512, 1024))\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n\n conv_lobj_branch = common.convolutional(conv, (3, 3, 512, 1024))\n conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.upsample(conv)\n\n conv = tf.concat([conv, route_2], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 768, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n\n conv_mobj_branch = common.convolutional(conv, (3, 3, 256, 512))\n conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.upsample(conv)\n\n conv = tf.concat([conv, route_1], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 384, 128))\n conv = common.convolutional(conv, (3, 3, 128, 256))\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.convolutional(conv, (3, 3, 128, 256))\n conv = common.convolutional(conv, (1, 1, 256, 128))\n\n conv_sobj_branch = common.convolutional(conv, (3, 3, 128, 256))\n conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n return [conv_sbbox, conv_mbbox, conv_lbbox]\n\ndef YOLOv4(input_layer, NUM_CLASS):\n route_1, route_2, conv = backbone.cspdarknet53(input_layer)\n\n route = conv\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.upsample(conv)\n route_2 = common.convolutional(route_2, (1, 1, 512, 256))\n conv = tf.concat([route_2, conv], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n\n route_2 = conv\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.upsample(conv)\n route_1 = common.convolutional(route_1, (1, 1, 256, 128))\n conv = tf.concat([route_1, conv], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.convolutional(conv, (3, 3, 128, 256))\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.convolutional(conv, (3, 3, 128, 256))\n conv = common.convolutional(conv, (1, 1, 256, 128))\n\n route_1 = conv\n conv = common.convolutional(conv, (3, 3, 128, 256))\n conv_sbbox = common.convolutional(conv, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(route_1, (3, 3, 128, 256), downsample=True)\n conv = tf.concat([conv, route_2], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv = common.convolutional(conv, (1, 1, 512, 256))\n\n route_2 = conv\n conv = common.convolutional(conv, (3, 3, 256, 512))\n conv_mbbox = common.convolutional(conv, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(route_2, (3, 3, 256, 512), downsample=True)\n conv = tf.concat([conv, route], axis=-1)\n\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n conv = common.convolutional(conv, (3, 3, 512, 1024))\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n conv = common.convolutional(conv, (3, 3, 512, 1024))\n conv = common.convolutional(conv, (1, 1, 1024, 512))\n\n conv = common.convolutional(conv, (3, 3, 512, 1024))\n conv_lbbox = common.convolutional(conv, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n return [conv_sbbox, conv_mbbox, conv_lbbox]\n\ndef YOLOv4_tiny(input_layer, NUM_CLASS):\n route_1, conv = backbone.cspdarknet53_tiny(input_layer)\n\n conv = common.convolutional(conv, (1, 1, 512, 256))\n\n conv_lobj_branch = common.convolutional(conv, (3, 3, 256, 512))\n conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.upsample(conv)\n conv = tf.concat([conv, route_1], axis=-1)\n\n conv_mobj_branch = common.convolutional(conv, (3, 3, 128, 256))\n conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n return [conv_mbbox, conv_lbbox]\n\ndef YOLOv3_tiny(input_layer, NUM_CLASS):\n route_1, conv = backbone.darknet53_tiny(input_layer)\n\n conv = common.convolutional(conv, (1, 1, 1024, 256))\n\n conv_lobj_branch = common.convolutional(conv, (3, 3, 256, 512))\n conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = common.convolutional(conv, (1, 1, 256, 128))\n conv = common.upsample(conv)\n conv = tf.concat([conv, route_1], axis=-1)\n\n conv_mobj_branch = common.convolutional(conv, (3, 3, 128, 256))\n conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n return [conv_mbbox, conv_lbbox]\n\ndef decode(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE=[1,1,1], FRAMEWORK='tf'):\n if FRAMEWORK == 'trt':\n return decode_trt(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)\n elif FRAMEWORK == 'tflite':\n return decode_tflite(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)\n else:\n return decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE)\n\ndef decode_train(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]):\n conv_output = tf.reshape(conv_output,\n (tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS),\n axis=-1)\n\n xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])\n\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \\\n STRIDES[i]\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i])\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)\n\ndef decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]):\n conv_output = tf.reshape(conv_output,\n (tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS),\n axis=-1)\n\n xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])\n\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \\\n STRIDES[i]\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i])\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n pred_prob = pred_conf * pred_prob\n return pred_xywh, pred_prob\n # return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)\n\ndef decode_tflite(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1,1,1]):\n conv_output = tf.reshape(conv_output, (1, output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1)\n\n xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [1, 1, 1, 3, 1])\n\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \\\n STRIDES[i]\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i])\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n pred_prob = pred_conf * pred_prob\n return pred_xywh, pred_prob\n # return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)\n\ndef decode_trt(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1,1,1]):\n conv_output = tf.reshape(conv_output, (tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1)\n\n xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])\n\n # x = tf.tile(tf.expand_dims(tf.range(output_size, dtype=tf.float32), axis=0), [output_size, 1])\n # y = tf.tile(tf.expand_dims(tf.range(output_size, dtype=tf.float32), axis=1), [1, output_size])\n # xy_grid = tf.expand_dims(tf.stack([x, y], axis=-1), axis=2) # [gx, gy, 1, 2]\n # xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])\n\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n # pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \\\n # STRIDES[i]\n pred_xy = (tf.reshape(tf.sigmoid(conv_raw_dxdy), (-1, 2)) * XYSCALE[i] - 0.5 * (XYSCALE[i] - 1) + tf.reshape(xy_grid, (-1, 2))) * STRIDES[i]\n pred_xy = tf.reshape(pred_xy, (tf.shape(conv_output)[0], output_size, output_size, 3, 2))\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i])\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n pred_prob = pred_conf * pred_prob\n return pred_xywh, pred_prob\n # return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)\n\n\ndef filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constant([416,416])):\n scores_max = tf.math.reduce_max(scores, axis=-1)\n\n mask = scores_max >= score_threshold\n class_boxes = tf.boolean_mask(box_xywh, mask)\n pred_conf = tf.boolean_mask(scores, mask)\n class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])\n pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])\n\n box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)\n\n input_shape = tf.cast(input_shape, dtype=tf.float32)\n\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n\n box_mins = (box_yx - (box_hw / 2.)) / input_shape\n box_maxes = (box_yx + (box_hw / 2.)) / input_shape\n boxes = tf.concat([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ], axis=-1)\n # return tf.concat([boxes, pred_conf], axis=-1)\n return (boxes, pred_conf)\n\n\ndef compute_loss(pred, conv, label, bboxes, STRIDES, NUM_CLASS, IOU_LOSS_THRESH, i=0):\n conv_shape = tf.shape(conv)\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n input_size = STRIDES[i] * output_size\n conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_conf = conv[:, :, :, :, 4:5]\n conv_raw_prob = conv[:, :, :, :, 5:]\n\n pred_xywh = pred[:, :, :, :, 0:4]\n pred_conf = pred[:, :, :, :, 4:5]\n\n label_xywh = label[:, :, :, :, 0:4]\n respond_bbox = label[:, :, :, :, 4:5]\n label_prob = label[:, :, :, :, 5:]\n\n giou = tf.expand_dims(utils.bbox_giou(pred_xywh, label_xywh), axis=-1)\n input_size = tf.cast(input_size, tf.float32)\n\n bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)\n giou_loss = respond_bbox * bbox_loss_scale * (1- giou)\n\n iou = utils.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])\n max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)\n\n respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < IOU_LOSS_THRESH, tf.float32 )\n\n conf_focal = tf.pow(respond_bbox - pred_conf, 2)\n\n conf_loss = conf_focal * (\n respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)\n +\n respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)\n )\n\n prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)\n\n giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))\n conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))\n prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))\n\n return giou_loss, conf_loss, prob_loss\n\n\n\n\n\n" ]
[ [ "tensorflow.exp", "tensorflow.shape", "tensorflow.range", "tensorflow.concat", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.reshape", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.constant", "tensorflow.reduce_max", "tensorflow.math.reduce_max", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.split", "tensorflow.pow", "tensorflow.boolean_mask", "tensorflow.cast" ] ]
ashim95/parser
[ "61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199" ]
[ "supar/parsers/semantic_dependency.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom supar.models import (BiaffineSemanticDependencyModel,\n VISemanticDependencyModel)\nfrom supar.parsers.parser import Parser\nfrom supar.utils import Config, Dataset, Embedding\nfrom supar.utils.common import bos, pad, unk\nfrom supar.utils.field import ChartField, Field, SubwordField\nfrom supar.utils.logging import get_logger, progress_bar\nfrom supar.utils.metric import ChartMetric\nfrom supar.utils.transform import CoNLL\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nlogger = get_logger(__name__)\n\n\nclass BiaffineSemanticDependencyParser(Parser):\n r\"\"\"\n The implementation of Biaffine Semantic Dependency Parser.\n\n References:\n - Timothy Dozat and Christopher D. Manning. 20178.\n `Simpler but More Accurate Semantic Dependency Parsing`_.\n\n .. _Simpler but More Accurate Semantic Dependency Parsing:\n https://www.aclweb.org/anthology/P18-2077/\n \"\"\"\n\n NAME = 'biaffine-semantic-dependency'\n MODEL = BiaffineSemanticDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.WORD, self.CHAR, self.BERT = self.transform.FORM\n self.LEMMA = self.transform.LEMMA\n self.TAG = self.transform.POS\n self.EDGE, self.LABEL = self.transform.PHEAD\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for training.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for evaluation.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, buckets=8, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for prediction.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), ChartMetric()\n\n for words, *feats, edges, labels in bar:\n self.optimizer.zero_grad()\n\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n s_edge, s_label = self.model(words, feats)\n loss = self.model.loss(s_edge, s_label, edges, labels, mask)\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n self.optimizer.step()\n self.scheduler.step()\n\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n metric(label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1),\n labels.masked_fill(~(edges.gt(0) & mask), -1))\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, ChartMetric()\n\n for words, *feats, edges, labels in loader:\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n s_edge, s_label = self.model(words, feats)\n loss = self.model.loss(s_edge, s_label, edges, labels, mask)\n total_loss += loss.item()\n\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n metric(label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1),\n labels.masked_fill(~(edges.gt(0) & mask), -1))\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {}\n charts, probs = [], []\n for words, *feats in progress_bar(loader):\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n lens = mask[:, 1].sum(-1).tolist()\n s_edge, s_label = self.model(words, feats)\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n chart_preds = label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1)\n charts.extend(chart[1:i, :i].tolist() for i, chart in zip(lens, chart_preds.unbind()))\n if self.args.prob:\n probs.extend([prob[1:i, :i].cpu() for i, prob in zip(lens, s_edge.softmax(-1).unbind())])\n charts = [CoNLL.build_relations([[self.LABEL.vocab[i] if i >= 0 else None for i in row] for row in chart])\n for chart in charts]\n preds = {'labels': charts}\n if self.args.prob:\n preds['probs'] = probs\n\n return preds\n\n @classmethod\n def build(cls,\n path,\n optimizer_args={'lr': 1e-3, 'betas': (.0, .95), 'eps': 1e-12, 'weight_decay': 3e-9},\n scheduler_args={'gamma': .75**(1/5000)},\n min_freq=7,\n fix_len=20,\n **kwargs):\n r\"\"\"\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n optimizer_args (dict):\n Arguments for creating an optimizer.\n scheduler_args (dict):\n Arguments for creating a scheduler.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary. Default:7.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n \"\"\"\n\n args = Config(**locals())\n args.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n os.makedirs(os.path.dirname(path), exist_ok=True)\n if os.path.exists(path) and not args.build:\n parser = cls.load(**args)\n parser.model = cls.MODEL(**parser.args)\n parser.model.load_pretrained(parser.WORD.embed).to(args.device)\n return parser\n\n logger.info(\"Building the fields\")\n WORD = Field('words', pad=pad, unk=unk, bos=bos, lower=True)\n TAG, CHAR, LEMMA, BERT = None, None, None, None\n if 'tag' in args.feat:\n TAG = Field('tags', bos=bos)\n if 'char' in args.feat:\n CHAR = SubwordField('chars', pad=pad, unk=unk, bos=bos, fix_len=args.fix_len)\n if 'lemma' in args.feat:\n LEMMA = Field('lemmas', pad=pad, unk=unk, bos=bos, lower=True)\n if 'bert' in args.feat:\n from transformers import AutoTokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.bert)\n BERT = SubwordField('bert',\n pad=tokenizer.pad_token,\n unk=tokenizer.unk_token,\n bos=tokenizer.bos_token or tokenizer.cls_token,\n fix_len=args.fix_len,\n tokenize=tokenizer.tokenize)\n BERT.vocab = tokenizer.get_vocab()\n EDGE = ChartField('edges', use_vocab=False, fn=CoNLL.get_edges)\n LABEL = ChartField('labels', fn=CoNLL.get_labels)\n transform = CoNLL(FORM=(WORD, CHAR, BERT), LEMMA=LEMMA, POS=TAG, PHEAD=(EDGE, LABEL))\n\n train = Dataset(transform, args.train)\n WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))\n if TAG is not None:\n TAG.build(train)\n if CHAR is not None:\n CHAR.build(train)\n if LEMMA is not None:\n LEMMA.build(train)\n LABEL.build(train)\n args.update({\n 'n_words': WORD.vocab.n_init,\n 'n_labels': len(LABEL.vocab),\n 'n_tags': len(TAG.vocab) if TAG is not None else None,\n 'n_chars': len(CHAR.vocab) if CHAR is not None else None,\n 'char_pad_index': CHAR.pad_index if CHAR is not None else None,\n 'n_lemmas': len(LEMMA.vocab) if LEMMA is not None else None,\n 'bert_pad_index': BERT.pad_index if BERT is not None else None,\n 'pad_index': WORD.pad_index,\n 'unk_index': WORD.unk_index\n })\n logger.info(f\"{transform}\")\n\n logger.info(\"Building the model\")\n model = cls.MODEL(**args).load_pretrained(WORD.embed).to(args.device)\n logger.info(f\"{model}\\n\")\n\n optimizer = Adam(model.parameters(), **optimizer_args)\n scheduler = ExponentialLR(optimizer, **scheduler_args)\n\n return cls(args, model, transform, optimizer, scheduler)\n\n\nclass VISemanticDependencyParser(BiaffineSemanticDependencyParser):\n r\"\"\"\n The implementation of Semantic Dependency Parser using Variational Inference.\n\n References:\n - Xinyu Wang, Jingxian Huang and Kewei Tu. 2019.\n `Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.\n\n .. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:\n https://www.aclweb.org/anthology/P19-1454/\n \"\"\"\n\n NAME = 'vi-semantic-dependency'\n MODEL = VISemanticDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.WORD, self.CHAR, self.BERT = self.transform.FORM\n self.LEMMA = self.transform.LEMMA\n self.TAG = self.transform.POS\n self.EDGE, self.LABEL = self.transform.PHEAD\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for training.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for evaluation.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, buckets=8, batch_size=5000, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding the unconsumed arguments that can be used to update the configurations for prediction.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), ChartMetric()\n\n for words, *feats, edges, labels in bar:\n self.optimizer.zero_grad()\n\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n s_edge, s_sib, s_cop, s_grd, s_label = self.model(words, feats)\n loss, s_edge = self.model.loss(s_edge, s_sib, s_cop, s_grd, s_label, edges, labels, mask)\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n self.optimizer.step()\n self.scheduler.step()\n\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n metric(label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1),\n labels.masked_fill(~(edges.gt(0) & mask), -1))\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, ChartMetric()\n\n for words, *feats, edges, labels in loader:\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n s_edge, s_sib, s_cop, s_grd, s_label = self.model(words, feats)\n loss, s_edge = self.model.loss(s_edge, s_sib, s_cop, s_grd, s_label, edges, labels, mask)\n total_loss += loss.item()\n\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n metric(label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1),\n labels.masked_fill(~(edges.gt(0) & mask), -1))\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {}\n charts, probs = [], []\n for words, *feats in progress_bar(loader):\n mask = words.ne(self.WORD.pad_index)\n mask = mask.unsqueeze(1) & mask.unsqueeze(2)\n mask[:, 0] = 0\n lens = mask[:, 1].sum(-1).tolist()\n s_edge, s_sib, s_cop, s_grd, s_label = self.model(words, feats)\n s_edge = self.model.lbp((s_edge, s_sib, s_cop, s_grd), mask)\n edge_preds, label_preds = self.model.decode(s_edge, s_label)\n chart_preds = label_preds.masked_fill(~(edge_preds.gt(0) & mask), -1)\n charts.extend(chart[1:i, :i].tolist() for i, chart in zip(lens, chart_preds.unbind()))\n if self.args.prob:\n probs.extend([prob[1:i, :i].cpu() for i, prob in zip(lens, s_edge.softmax(-1).unbind())])\n charts = [CoNLL.build_relations([[self.LABEL.vocab[i] if i >= 0 else None for i in row] for row in chart])\n for chart in charts]\n preds = {'labels': charts}\n if self.args.prob:\n preds['probs'] = probs\n\n return preds\n\n @classmethod\n def build(cls,\n path,\n optimizer_args={'lr': 1e-3, 'betas': (.0, .95), 'eps': 1e-12},\n scheduler_args={'gamma': .75**(1/5000)},\n min_freq=7,\n fix_len=20,\n **kwargs):\n r\"\"\"\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n optimizer_args (dict):\n Arguments for creating an optimizer.\n scheduler_args (dict):\n Arguments for creating a scheduler.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary. Default:7.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n \"\"\"\n\n args = Config(**locals())\n args.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n os.makedirs(os.path.dirname(path), exist_ok=True)\n if os.path.exists(path) and not args.build:\n parser = cls.load(**args)\n parser.model = cls.MODEL(**parser.args)\n parser.model.load_pretrained(parser.WORD.embed).to(args.device)\n return parser\n\n logger.info(\"Building the fields\")\n WORD = Field('words', pad=pad, unk=unk, bos=bos, lower=True)\n TAG, CHAR, LEMMA, BERT = None, None, None, None\n if 'tag' in args.feat:\n TAG = Field('tags', bos=bos)\n if 'char' in args.feat:\n CHAR = SubwordField('chars', pad=pad, unk=unk, bos=bos, fix_len=args.fix_len)\n if 'lemma' in args.feat:\n LEMMA = Field('lemmas', pad=pad, unk=unk, bos=bos, lower=True)\n if 'bert' in args.feat:\n from transformers import AutoTokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.bert)\n BERT = SubwordField('bert',\n pad=tokenizer.pad_token,\n unk=tokenizer.unk_token,\n bos=tokenizer.bos_token or tokenizer.cls_token,\n fix_len=args.fix_len,\n tokenize=tokenizer.tokenize)\n BERT.vocab = tokenizer.get_vocab()\n EDGE = ChartField('edges', use_vocab=False, fn=CoNLL.get_edges)\n LABEL = ChartField('labels', fn=CoNLL.get_labels)\n transform = CoNLL(FORM=(WORD, CHAR, BERT), LEMMA=LEMMA, POS=TAG, PHEAD=(EDGE, LABEL))\n\n train = Dataset(transform, args.train)\n WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))\n if TAG is not None:\n TAG.build(train)\n if CHAR is not None:\n CHAR.build(train)\n if LEMMA is not None:\n LEMMA.build(train)\n LABEL.build(train)\n args.update({\n 'n_words': WORD.vocab.n_init,\n 'n_labels': len(LABEL.vocab),\n 'n_tags': len(TAG.vocab) if TAG is not None else None,\n 'n_chars': len(CHAR.vocab) if CHAR is not None else None,\n 'char_pad_index': CHAR.pad_index if CHAR is not None else None,\n 'n_lemmas': len(LEMMA.vocab) if LEMMA is not None else None,\n 'bert_pad_index': BERT.pad_index if BERT is not None else None,\n 'pad_index': WORD.pad_index,\n 'unk_index': WORD.unk_index\n })\n logger.info(f\"{transform}\")\n\n logger.info(\"Building the model\")\n model = cls.MODEL(**args).load_pretrained(WORD.embed).to(args.device)\n logger.info(f\"{model}\\n\")\n\n optimizer = Adam(model.parameters(), **optimizer_args)\n scheduler = ExponentialLR(optimizer, **scheduler_args)\n\n return cls(args, model, transform, optimizer, scheduler)\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.optim.lr_scheduler.ExponentialLR" ] ]
philtgun/compare-embeddings
[ "72c1cfffe17d6d3049f5e41ad6bef1e8a353f6a0" ]
[ "src/visualize.py" ]
[ "# Copyright 2022 Philip Tovstogan, Music Technology Group\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import rcParams\n\n\ndef visualize(input_file: Path, output_file: Path, scale: int) -> None:\n df_all = pd.read_csv(input_file)\n n_at_list = sorted(df_all['at'].unique())\n n_rows = int(np.ceil(len(n_at_list) / 2))\n fig, axes = plt.subplots(n_rows, 2, figsize=(scale * 2, scale * n_rows), sharex='all', sharey='all')\n\n # string conversion function: don't print 1.0 and remove leading 0s\n to_str = np.vectorize(lambda x: f'{x:.2f}'.removeprefix('0') if x < 1.0 else ' ')\n\n for n_at, ax in zip(n_at_list, axes.flat):\n df = df_all[df_all['at'] == n_at]\n names = sorted(set(df['src']) | set(df['dst']))\n names_indices = {name: i for i, name in enumerate(names)}\n n = len(names)\n matrix = np.zeros((n, n))\n for _, row in df.iterrows():\n i, j = names_indices[row['src']], names_indices[row['dst']]\n matrix[i, j] = row['similarity']\n matrix[j, i] = row['similarity']\n\n np.fill_diagonal(matrix, 1)\n annot = to_str(matrix)\n\n ax.set_title(f'@{n_at}')\n sns.heatmap(matrix, annot=annot, fmt='s', ax=ax, cmap='mako_r', xticklabels=names, yticklabels=names,\n square=True, vmin=0, vmax=1, cbar=False)\n ax.tick_params(left=False, bottom=False)\n\n if output_file is not None:\n plt.savefig(output_file, bbox_inches='tight')\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Create figure to visualize the data in NN-similarity .csv file')\n parser.add_argument('input_file', type=Path, help='input similarity.csv file')\n parser.add_argument('output_file', type=Path, help='output .png or .pdf plot file')\n parser.add_argument('--scale', type=float, default=6, help='increase this number if the figure is too cluttered')\n parser.add_argument('--serif', action='store_true', help='use serif font for figures (e.g. for thesis)')\n args = parser.parse_args()\n\n if args.serif:\n rcParams['font.family'] = 'serif'\n rcParams['font.serif'] = 'Times New Roman'\n\n visualize(args.input_file, args.output_file, args.scale)\n" ]
[ [ "numpy.fill_diagonal", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
pvgladkov/abstraction-and-reasoning-challenge
[ "0dfe16b5044f5aba0d5f53397dc615400e61aa69" ]
[ "arc_run.py" ]
[ "import pickle\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom arc.colors import TaskSolverColor\nfrom arc.sophisticated_trees import StackedTaskSolver\nfrom arc.trees import TaskSolverTree\nfrom arc.utils import load_data, flattener, get_logger\n\nBASE_PATH = '/data/arc'\n\n# BASE_PATH = '../input/abstraction-and-reasoning-challenge/'\n\nDEBUG = True\n\nlogger = get_logger()\n\n\ndef make_prediction(tasks, solver):\n result = pd.Series()\n for idx, task in tqdm(tasks.items()):\n task_result = solver.train(task['train'])\n if task_result:\n pred = solver.predict(task['test'])\n else:\n pred = [el['input'] for el in task['test']]\n\n for i, p in enumerate(pred):\n result[f'{idx}_{i}'] = flattener(np.array(p).tolist())\n\n return result\n\n\ndef calc_score(task_test, predict):\n def comp(out, pred):\n try:\n return int(np.equal(out, pred).all())\n except:\n return 0\n\n return [comp(sample['output'], pred) for sample, pred in zip(task_test, predict)]\n\n\ndef evaluate(tasks, solver):\n result = []\n predictions = []\n for i, task in enumerate(tqdm(tasks)):\n task_result = solver.train(task['train'])\n if task_result:\n pred = solver.predict(task['test'])\n score = calc_score(task['test'], pred)\n else:\n pred = [el['input'] for el in task['test']]\n score = [0] * len(task['test'])\n\n predictions.append(pred)\n result.append(score)\n\n return result, predictions\n\n\nif __name__ == '__main__':\n\n task_solver_1 = StackedTaskSolver(logger)\n task_solver_2 = TaskSolverTree(logger)\n task_solver_3 = TaskSolverColor(logger)\n\n solvers = [task_solver_2]\n\n train_tasks = load_data(BASE_PATH + '/training')\n evaluation_tasks = load_data(BASE_PATH + '/evaluation')\n test_tasks = load_data(BASE_PATH + '/test')\n\n submissions = []\n\n for i, task_solver in enumerate(solvers):\n\n logger.info(f'task solver {i}')\n\n if DEBUG:\n train_result, train_predictions = evaluate(train_tasks, task_solver)\n train_solved = [any(score) for score in train_result]\n\n total = sum([len(score) for score in train_result])\n logger.info(f\"train solved : {sum(train_solved)} from {total} ({sum(train_solved) / total})\")\n\n evaluation_result, evaluation_predictions = evaluate(evaluation_tasks, task_solver)\n evaluation_solved = [any(score) for score in evaluation_result]\n\n total = sum([len(score) for score in evaluation_result])\n logger.info(f\"evaluation solved : {sum(evaluation_solved)} from {total} ({sum(evaluation_solved) / total})\")\n\n with open('pkl/evaluation_tasks.pkl', 'w+b') as f:\n pickle.dump(evaluation_tasks, f)\n with open('pkl/evaluation_result.pkl', 'w+b') as f:\n pickle.dump(evaluation_result, f)\n with open('pkl/evaluation_predictions.pkl', 'w+b') as f:\n pickle.dump(evaluation_predictions, f)\n with open('pkl/evaluation_solved.pkl', 'w+b') as f:\n pickle.dump(evaluation_solved, f)\n\n submission = make_prediction(test_tasks, task_solver)\n submission = submission.reset_index()\n submission.columns = ['output_id', f'output_{i}']\n submission = submission.sort_values(by=\"output_id\")\n submissions.append(submission)\n\n submission = pd.merge(submissions[0], submissions[1], on='output_id')\n submission = pd.merge(submission, submissions[2], on='output_id')\n\n def merge_cols(row):\n c1 = row[1].strip().split(\" \")[:1]\n c2 = row[2].strip().split(\" \")[:1]\n c3 = row[3].strip().split(\" \")[:1]\n return ' '.join(c1 + c2 + c3)\n\n submission['output'] = submission.apply(merge_cols, axis=1)\n\n submission[['output_id', 'output']].to_csv('submission.csv', index=False)\n" ]
[ [ "numpy.equal", "numpy.array", "pandas.merge", "pandas.Series" ] ]
tomicapretto/pymc3
[ "692a09f816acb573ba35927f930a214989b1c519" ]
[ "pymc3/tests/test_variational_inference.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport functools\nimport io\nimport operator\nimport numpy as np\nfrom theano import theano, tensor as tt\n\n\nimport pymc3 as pm\nimport pymc3.memoize\nimport pymc3.util\nfrom pymc3.theanof import (\n change_flags,\n intX,\n)\nfrom pymc3.variational.approximations import (\n MeanFieldGroup, FullRankGroup,\n NormalizingFlowGroup, EmpiricalGroup,\n MeanField, FullRank, NormalizingFlow, Empirical\n)\nfrom pymc3.variational.inference import (\n ADVI, FullRankADVI, SVGD, NFVI, ASVGD,\n fit\n)\nfrom pymc3.variational import flows\nfrom pymc3.variational.opvi import Approximation, Group\nfrom pymc3.variational import opvi\nfrom . import models\nfrom .helpers import not_raises\n\npytestmark = pytest.mark.usefixtures(\n 'strict_float32',\n 'seeded_test'\n)\n\n\[email protected](\n 'diff',\n [\n 'relative',\n 'absolute'\n ]\n)\[email protected](\n 'ord',\n [1, 2, np.inf]\n)\ndef test_callbacks_convergence(diff, ord):\n cb = pm.variational.callbacks.CheckParametersConvergence(every=1, diff=diff, ord=ord)\n\n class _approx:\n params = (theano.shared(np.asarray([1, 2, 3])), )\n\n approx = _approx()\n\n with pytest.raises(StopIteration):\n cb(approx, None, 1)\n cb(approx, None, 10)\n\n\ndef test_tracker_callback():\n import time\n tracker = pm.callbacks.Tracker(\n ints=lambda *t: t[-1],\n ints2=lambda ap, h, j: j,\n time=time.time,\n )\n for i in range(10):\n tracker(None, None, i)\n assert 'time' in tracker.hist\n assert 'ints' in tracker.hist\n assert 'ints2' in tracker.hist\n assert (len(tracker['ints'])\n == len(tracker['ints2'])\n == len(tracker['time'])\n == 10)\n assert tracker['ints'] == tracker['ints2'] == list(range(10))\n tracker = pm.callbacks.Tracker(\n bad=lambda t: t # bad signature\n )\n with pytest.raises(TypeError):\n tracker(None, None, 1)\n\n\[email protected]('module')\ndef three_var_model():\n with pm.Model() as model:\n pm.HalfNormal('one', shape=(10, 2), total_size=100)\n pm.Normal('two', shape=(10, ))\n pm.Normal('three', shape=(10, 1, 2))\n return model\n\n\[email protected](\n ['raises', 'grouping'],\n [\n (not_raises(), {MeanFieldGroup: None}),\n (not_raises(), {FullRankGroup: None, MeanFieldGroup: ['one']}),\n (not_raises(), {MeanFieldGroup: ['one'], FullRankGroup: ['two'], NormalizingFlowGroup: ['three']}),\n (pytest.raises(TypeError, match='Found duplicates'),\n {MeanFieldGroup: ['one'], FullRankGroup: ['two', 'one'], NormalizingFlowGroup: ['three']}),\n (pytest.raises(TypeError, match='No approximation is specified'), {MeanFieldGroup: ['one', 'two']}),\n (not_raises(), {MeanFieldGroup: ['one'], FullRankGroup: ['two', 'three']}),\n ]\n)\ndef test_init_groups(three_var_model, raises, grouping):\n with raises, three_var_model:\n approxes, groups = zip(*grouping.items())\n groups = [list(map(functools.partial(getattr, three_var_model), g))\n if g is not None else None\n for g in groups]\n inited_groups = [a(group=g) for a, g in zip(approxes, groups)]\n approx = Approximation(inited_groups)\n for ig, g in zip(inited_groups, groups):\n if g is None:\n pass\n else:\n assert set(pm.util.get_transformed(z) for z in g) == set(ig.group)\n else:\n assert approx.ndim == three_var_model.ndim\n\n\[email protected](params=[\n ({}, {MeanFieldGroup: (None, {})}),\n ({}, {FullRankGroup: (None, {}), MeanFieldGroup: (['one'], {})}),\n ({}, {MeanFieldGroup: (['one'], {}), FullRankGroup: (['two'], {}),\n NormalizingFlowGroup: (['three'], {'flow': 'scale-hh*2-planar-radial-loc'})}),\n ({}, {MeanFieldGroup: (['one'], {}), FullRankGroup: (['two', 'three'], {})}),\n ({}, {MeanFieldGroup: (['one'], {}), EmpiricalGroup: (['two', 'three'], {'size': 100})})\n],\n ids=lambda t: ', '.join('%s: %s' % (k.__name__, v[0]) for k, v in t[1].items())\n)\ndef three_var_groups(request, three_var_model):\n kw, grouping = request.param\n approxes, groups = zip(*grouping.items())\n groups, gkwargs = zip(*groups)\n groups = [list(map(functools.partial(getattr, three_var_model), g))\n if g is not None else None\n for g in groups]\n inited_groups = [a(group=g, model=three_var_model, **gk) for a, g, gk in zip(approxes, groups, gkwargs)]\n return inited_groups\n\n\[email protected]\ndef three_var_approx(three_var_model, three_var_groups):\n approx = Approximation(three_var_groups, model=three_var_model)\n return approx\n\n\[email protected]\ndef three_var_approx_single_group_mf(three_var_model):\n return MeanField(model=three_var_model)\n\n\[email protected](\n params = [\n ('ndarray', None),\n ('text', 'test'),\n ('sqlite', 'test.sqlite'),\n ('hdf5', 'test.h5')\n ]\n)\ndef test_sample_simple(three_var_approx, request):\n backend, name = request.param\n trace = three_var_approx.sample(100, backend=backend, name=name)\n assert set(trace.varnames) == {'one', 'one_log__', 'three', 'two'}\n assert len(trace) == 100\n assert trace[0]['one'].shape == (10, 2)\n assert trace[0]['two'].shape == (10, )\n assert trace[0]['three'].shape == (10, 1, 2)\n\n\[email protected]\ndef aevb_initial():\n return theano.shared(np.random.rand(3, 7).astype('float32'))\n\n\[email protected](\n params=[\n (MeanFieldGroup, {}),\n (FullRankGroup, {}),\n (NormalizingFlowGroup, {'flow': 'scale'}),\n (NormalizingFlowGroup, {'flow': 'loc'}),\n (NormalizingFlowGroup, {'flow': 'hh'}),\n (NormalizingFlowGroup, {'flow': 'planar'}),\n (NormalizingFlowGroup, {'flow': 'radial'}),\n (NormalizingFlowGroup, {'flow': 'radial-loc'})\n ],\n ids=lambda t: '{c}: {d}'.format(c=t[0].__name__, d=t[1])\n)\ndef parametric_grouped_approxes(request):\n return request.param\n\n\[email protected]\ndef three_var_aevb_groups(parametric_grouped_approxes, three_var_model, aevb_initial):\n dsize = np.prod(pymc3.util.get_transformed(three_var_model.one).dshape[1:])\n cls, kw = parametric_grouped_approxes\n spec = cls.get_param_spec_for(d=dsize, **kw)\n params = dict()\n for k, v in spec.items():\n if isinstance(k, int):\n params[k] = dict()\n for k_i, v_i in v.items():\n params[k][k_i] = aevb_initial.dot(np.random.rand(7, *v_i).astype('float32'))\n else:\n params[k] = aevb_initial.dot(np.random.rand(7, *v).astype('float32'))\n aevb_g = cls([three_var_model.one], params=params, model=three_var_model, local=True)\n return [aevb_g, MeanFieldGroup(None, model=three_var_model)]\n\n\[email protected]\ndef three_var_aevb_approx(three_var_model, three_var_aevb_groups):\n approx = Approximation(three_var_aevb_groups, model=three_var_model)\n return approx\n\n\ndef test_sample_aevb(three_var_aevb_approx, aevb_initial):\n pm.KLqp(three_var_aevb_approx).fit(1, more_replacements={\n aevb_initial: np.zeros_like(aevb_initial.get_value())[:1]\n })\n aevb_initial.set_value(np.random.rand(7, 7).astype('float32'))\n trace = three_var_aevb_approx.sample(500)\n assert set(trace.varnames) == {'one', 'one_log__', 'two', 'three'}\n assert len(trace) == 500\n assert trace[0]['one'].shape == (7, 2)\n assert trace[0]['two'].shape == (10, )\n assert trace[0]['three'].shape == (10, 1, 2)\n\n aevb_initial.set_value(np.random.rand(13, 7).astype('float32'))\n trace = three_var_aevb_approx.sample(500)\n assert set(trace.varnames) == {'one', 'one_log__', 'two', 'three'}\n assert len(trace) == 500\n assert trace[0]['one'].shape == (13, 2)\n assert trace[0]['two'].shape == (10,)\n assert trace[0]['three'].shape == (10, 1, 2)\n\n\ndef test_replacements_in_sample_node_aevb(three_var_aevb_approx, aevb_initial):\n inp = tt.matrix(dtype='float32')\n three_var_aevb_approx.sample_node(\n three_var_aevb_approx.model.one, 2,\n more_replacements={aevb_initial: inp}).eval({inp: np.random.rand(7, 7).astype('float32')})\n\n three_var_aevb_approx.sample_node(\n three_var_aevb_approx.model.one, None,\n more_replacements={aevb_initial: inp}).eval({inp: np.random.rand(7, 7).astype('float32')})\n\n\ndef test_vae():\n minibatch_size = 10\n data = pm.floatX(np.random.rand(100))\n x_mini = pm.Minibatch(data, minibatch_size)\n x_inp = tt.vector()\n x_inp.tag.test_value = data[:minibatch_size]\n\n ae = theano.shared(pm.floatX([.1, .1]))\n be = theano.shared(pm.floatX(1.))\n\n ad = theano.shared(pm.floatX(1.))\n bd = theano.shared(pm.floatX(1.))\n\n enc = x_inp.dimshuffle(0, 'x') * ae.dimshuffle('x', 0) + be\n mu, rho = enc[:, 0], enc[:, 1]\n\n with pm.Model():\n # Hidden variables\n zs = pm.Normal('zs', mu=0, sigma=1, shape=minibatch_size)\n dec = zs * ad + bd\n # Observation model\n pm.Normal('xs_', mu=dec, sigma=0.1, observed=x_inp)\n\n pm.fit(1, local_rv={zs: dict(mu=mu, rho=rho)},\n more_replacements={x_inp: x_mini}, more_obj_params=[ae, be, ad, bd])\n\n\ndef test_logq_mini_1_sample_1_var(parametric_grouped_approxes, three_var_model):\n cls, kw = parametric_grouped_approxes\n approx = cls([three_var_model.one], model=three_var_model, **kw)\n logq = approx.logq\n logq = approx.set_size_and_deterministic(logq, 1, 0)\n logq.eval()\n\n\ndef test_logq_mini_2_sample_2_var(parametric_grouped_approxes, three_var_model):\n cls, kw = parametric_grouped_approxes\n approx = cls([three_var_model.one, three_var_model.two], model=three_var_model, **kw)\n logq = approx.logq\n logq = approx.set_size_and_deterministic(logq, 2, 0)\n logq.eval()\n\n\ndef test_logq_mini_sample_aevb(three_var_aevb_groups):\n approx = three_var_aevb_groups[0]\n logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 3, 0)\n e = logq.eval()\n es = symbolic_logq.eval()\n assert e.shape == ()\n assert es.shape == (3,)\n\n\ndef test_logq_aevb(three_var_aevb_approx):\n approx = three_var_aevb_approx\n logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 1, 0)\n e = logq.eval()\n es = symbolic_logq.eval()\n assert e.shape == ()\n assert es.shape == (1,)\n\n logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 2, 0)\n e = logq.eval()\n es = symbolic_logq.eval()\n assert e.shape == ()\n assert es.shape == (2,)\n\n\ndef test_logq_globals(three_var_approx):\n if not three_var_approx.has_logq:\n pytest.skip('%s does not implement logq' % three_var_approx)\n approx = three_var_approx\n logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 1, 0)\n e = logq.eval()\n es = symbolic_logq.eval()\n assert e.shape == ()\n assert es.shape == (1,)\n\n logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 2, 0)\n e = logq.eval()\n es = symbolic_logq.eval()\n assert e.shape == ()\n assert es.shape == (2,)\n\n\[email protected](\n 'raises, vfam, type_, kw',\n [\n (not_raises(), 'mean_field', MeanFieldGroup, {}),\n (not_raises(), 'mf', MeanFieldGroup, {}),\n (not_raises(), 'full_rank', FullRankGroup, {}),\n (not_raises(), 'fr', FullRankGroup, {}),\n (not_raises(), 'FR', FullRankGroup, {}),\n (not_raises(), 'loc', NormalizingFlowGroup, {}),\n (not_raises(), 'scale', NormalizingFlowGroup, {}),\n (not_raises(), 'hh', NormalizingFlowGroup, {}),\n (not_raises(), 'planar', NormalizingFlowGroup, {}),\n (not_raises(), 'radial', NormalizingFlowGroup, {}),\n (not_raises(), 'scale-loc', NormalizingFlowGroup, {}),\n (pytest.raises(ValueError, match='Need `trace` or `size`'), 'empirical', EmpiricalGroup, {}),\n (not_raises(), 'empirical', EmpiricalGroup, {'size': 100}),\n ]\n)\ndef test_group_api_vfam(three_var_model, raises, vfam, type_, kw):\n with three_var_model, raises:\n g = Group([three_var_model.one], vfam, **kw)\n assert isinstance(g, type_)\n assert not hasattr(g, '_kwargs')\n if isinstance(g, NormalizingFlowGroup):\n assert isinstance(g.flow, pm.flows.AbstractFlow)\n assert g.flow.formula == vfam\n\n\[email protected](\n 'raises, params, type_, kw, formula',\n [\n (not_raises(),\n dict(mu=np.ones((10, 2), 'float32'), rho=np.ones((10, 2), 'float32')),\n MeanFieldGroup, {}, None),\n\n (not_raises(),\n dict(mu=np.ones((10, 2), 'float32'),\n L_tril=np.ones(\n FullRankGroup.get_param_spec_for(d=np.prod((10, 2)))['L_tril'],\n 'float32'\n )),\n FullRankGroup, {}, None),\n\n (not_raises(),\n {0: dict(loc=np.ones((10, 2), 'float32'))},\n NormalizingFlowGroup, {}, 'loc'),\n\n (not_raises(),\n {0: dict(rho=np.ones((10, 2), 'float32'))},\n NormalizingFlowGroup, {}, 'scale'),\n\n (not_raises(),\n {0: dict(v=np.ones((10, 2), 'float32'),)},\n NormalizingFlowGroup, {}, 'hh'),\n\n (not_raises(),\n {0: dict(u=np.ones((10, 2), 'float32'),\n w=np.ones((10, 2), 'float32'),\n b=1.)},\n NormalizingFlowGroup, {}, 'planar'),\n\n (not_raises(),\n {0: dict(z_ref=np.ones((10, 2), 'float32'),\n a=1.,\n b=1.)},\n NormalizingFlowGroup, {}, 'radial'),\n\n (not_raises(),\n {0: dict(rho=np.ones((10, 2), 'float32')),\n 1: dict(loc=np.ones((10, 2), 'float32'))},\n NormalizingFlowGroup, {}, 'scale-loc'),\n\n (not_raises(), dict(histogram=np.ones((20, 10, 2), 'float32')), EmpiricalGroup, {}, None),\n ]\n)\ndef test_group_api_params(three_var_model, raises, params, type_, kw, formula):\n with three_var_model, raises:\n g = Group([three_var_model.one], params=params, **kw)\n assert isinstance(g, type_)\n if isinstance(g, NormalizingFlowGroup):\n assert g.flow.formula == formula\n if g.has_logq:\n # should work as well\n logq = g.logq\n logq = g.set_size_and_deterministic(logq, 1, 0)\n logq.eval()\n\n\[email protected](\n 'gcls, approx, kw',\n [\n (MeanFieldGroup, MeanField, {}),\n (FullRankGroup, FullRank, {}),\n (EmpiricalGroup, Empirical, {'size': 100}),\n (NormalizingFlowGroup, NormalizingFlow, {'flow': 'loc'}),\n (NormalizingFlowGroup, NormalizingFlow, {'flow': 'scale-loc-scale'}),\n (NormalizingFlowGroup, NormalizingFlow, {})\n ]\n)\ndef test_single_group_shortcuts(three_var_model, approx, kw, gcls):\n with three_var_model:\n a = approx(**kw)\n assert isinstance(a, Approximation)\n assert len(a.groups) == 1\n assert isinstance(a.groups[0], gcls)\n if isinstance(a, NormalizingFlow):\n assert a.flow.formula == kw.get('flow', NormalizingFlowGroup.default_flow)\n\n\ndef test_elbo():\n mu0 = 1.5\n sigma = 1.0\n y_obs = np.array([1.6, 1.4])\n\n post_mu = np.array([1.88], dtype=theano.config.floatX)\n post_sigma = np.array([1], dtype=theano.config.floatX)\n # Create a model for test\n with pm.Model() as model:\n mu = pm.Normal('mu', mu=mu0, sigma=sigma)\n pm.Normal('y', mu=mu, sigma=1, observed=y_obs)\n\n # Create variational gradient tensor\n mean_field = MeanField(model=model)\n with pm.theanof.change_flags(compute_test_value='off'):\n elbo = -pm.operators.KL(mean_field)()(10000)\n\n mean_field.shared_params['mu'].set_value(post_mu)\n mean_field.shared_params['rho'].set_value(np.log(np.exp(post_sigma) - 1))\n\n f = theano.function([], elbo)\n elbo_mc = f()\n\n # Exact value\n elbo_true = (-0.5 * (\n 3 + 3 * post_mu ** 2 - 2 * (y_obs[0] + y_obs[1] + mu0) * post_mu +\n y_obs[0] ** 2 + y_obs[1] ** 2 + mu0 ** 2 + 3 * np.log(2 * np.pi)) +\n 0.5 * (np.log(2 * np.pi) + 1))\n np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1)\n\n\[email protected](\n 'aux_total_size',\n range(2, 10, 3)\n)\ndef test_scale_cost_to_minibatch_works(aux_total_size):\n mu0 = 1.5\n sigma = 1.0\n y_obs = np.array([1.6, 1.4])\n beta = len(y_obs)/float(aux_total_size)\n post_mu = np.array([1.88], dtype=theano.config.floatX)\n post_sigma = np.array([1], dtype=theano.config.floatX)\n\n # TODO: theano_config\n # with pm.Model(theano_config=dict(floatX='float64')):\n # did not not work as expected\n # there were some numeric problems, so float64 is forced\n with pm.theanof.change_flags(floatX='float64', warn_float64='ignore'):\n with pm.Model():\n assert theano.config.floatX == 'float64'\n assert theano.config.warn_float64 == 'ignore'\n mu = pm.Normal('mu', mu=mu0, sigma=sigma)\n pm.Normal('y', mu=mu, sigma=1, observed=y_obs, total_size=aux_total_size)\n # Create variational gradient tensor\n mean_field_1 = MeanField()\n assert mean_field_1.scale_cost_to_minibatch\n mean_field_1.shared_params['mu'].set_value(post_mu)\n mean_field_1.shared_params['rho'].set_value(np.log(np.exp(post_sigma) - 1))\n\n with pm.theanof.change_flags(compute_test_value='off'):\n elbo_via_total_size_scaled = -pm.operators.KL(mean_field_1)()(10000)\n\n with pm.Model():\n mu = pm.Normal('mu', mu=mu0, sigma=sigma)\n pm.Normal('y', mu=mu, sigma=1, observed=y_obs, total_size=aux_total_size)\n # Create variational gradient tensor\n mean_field_2 = MeanField()\n assert mean_field_1.scale_cost_to_minibatch\n mean_field_2.scale_cost_to_minibatch = False\n assert not mean_field_2.scale_cost_to_minibatch\n mean_field_2.shared_params['mu'].set_value(post_mu)\n mean_field_2.shared_params['rho'].set_value(np.log(np.exp(post_sigma) - 1))\n\n with pm.theanof.change_flags(compute_test_value='off'):\n elbo_via_total_size_unscaled = -pm.operators.KL(mean_field_2)()(10000)\n\n np.testing.assert_allclose(elbo_via_total_size_unscaled.eval(),\n elbo_via_total_size_scaled.eval() * pm.floatX(1 / beta), rtol=0.02, atol=1e-1)\n\n\[email protected](\n 'aux_total_size',\n range(2, 10, 3)\n)\ndef test_elbo_beta_kl(aux_total_size):\n mu0 = 1.5\n sigma = 1.0\n y_obs = np.array([1.6, 1.4])\n beta = len(y_obs)/float(aux_total_size)\n post_mu = np.array([1.88], dtype=theano.config.floatX)\n post_sigma = np.array([1], dtype=theano.config.floatX)\n with pm.theanof.change_flags(floatX='float64', warn_float64='ignore'):\n with pm.Model():\n mu = pm.Normal('mu', mu=mu0, sigma=sigma)\n pm.Normal('y', mu=mu, sigma=1, observed=y_obs, total_size=aux_total_size)\n # Create variational gradient tensor\n mean_field_1 = MeanField()\n mean_field_1.scale_cost_to_minibatch = True\n mean_field_1.shared_params['mu'].set_value(post_mu)\n mean_field_1.shared_params['rho'].set_value(np.log(np.exp(post_sigma) - 1))\n\n with pm.theanof.change_flags(compute_test_value='off'):\n elbo_via_total_size_scaled = -pm.operators.KL(mean_field_1)()(10000)\n\n with pm.Model():\n mu = pm.Normal('mu', mu=mu0, sigma=sigma)\n pm.Normal('y', mu=mu, sigma=1, observed=y_obs)\n # Create variational gradient tensor\n mean_field_3 = MeanField()\n mean_field_3.shared_params['mu'].set_value(post_mu)\n mean_field_3.shared_params['rho'].set_value(np.log(np.exp(post_sigma) - 1))\n\n with pm.theanof.change_flags(compute_test_value='off'):\n elbo_via_beta_kl = -pm.operators.KL(mean_field_3, beta=beta)()(10000)\n\n np.testing.assert_allclose(elbo_via_total_size_scaled.eval(), elbo_via_beta_kl.eval(), rtol=0, atol=1e-1)\n\n\[email protected](\n 'module',\n params=[True, False],\n ids=['mini', 'full']\n)\ndef use_minibatch(request):\n return request.param\n\n\[email protected]\ndef simple_model_data(use_minibatch):\n n = 1000\n sigma0 = 2.\n mu0 = 4.\n sigma = 3.\n mu = -5.\n\n data = sigma * np.random.randn(n) + mu\n d = n / sigma ** 2 + 1 / sigma0 ** 2\n mu_post = (n * np.mean(data) / sigma ** 2 + mu0 / sigma0 ** 2) / d\n if use_minibatch:\n data = pm.Minibatch(data)\n return dict(\n n=n,\n data=data,\n mu_post=mu_post,\n d=d,\n mu0=mu0,\n sigma0=sigma0,\n sigma=sigma,\n )\n\n\[email protected]\ndef simple_model(simple_model_data):\n with pm.Model() as model:\n mu_ = pm.Normal(\n 'mu', mu=simple_model_data['mu0'],\n sigma=simple_model_data['sigma0'], testval=0)\n pm.Normal('x', mu=mu_, sigma=simple_model_data['sigma'],\n observed=simple_model_data['data'],\n total_size=simple_model_data['n'])\n return model\n\n\[email protected]('module', params=[\n dict(cls=NFVI, init=dict(flow='scale-loc')),\n dict(cls=ADVI, init=dict()),\n dict(cls=FullRankADVI, init=dict()),\n dict(cls=SVGD, init=dict(n_particles=500, jitter=1)),\n dict(cls=ASVGD, init=dict(temperature=1.)),\n ], ids=[\n 'NFVI=scale-loc',\n 'ADVI',\n 'FullRankADVI',\n 'SVGD',\n 'ASVGD'\n ])\ndef inference_spec(request):\n cls = request.param['cls']\n init = request.param['init']\n\n def init_(**kw):\n k = init.copy()\n k.update(kw)\n return cls(**k)\n init_.cls = cls\n return init_\n\n\[email protected]('function')\ndef inference(inference_spec, simple_model):\n with simple_model:\n return inference_spec()\n\n\[email protected]('function')\ndef fit_kwargs(inference, use_minibatch):\n _select = {\n (ADVI, 'full'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.02, n_win=50),\n n=5000\n ),\n (ADVI, 'mini'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50),\n n=12000\n ),\n (NFVI, 'full'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50),\n n=12000\n ),\n (NFVI, 'mini'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50),\n n=12000\n ),\n (FullRankADVI, 'full'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.007, n_win=50),\n n=6000\n ),\n (FullRankADVI, 'mini'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.007, n_win=50),\n n=12000\n ),\n (SVGD, 'full'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.075, n_win=7),\n n=300\n ),\n (SVGD, 'mini'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.075, n_win=7),\n n=300\n ),\n (ASVGD, 'full'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=10),\n n=500, obj_n_mc=300\n ),\n (ASVGD, 'mini'): dict(\n obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=10),\n n=500, obj_n_mc=300\n )\n }\n if use_minibatch:\n key = 'mini'\n # backward compat for PR#3071\n inference.approx.scale_cost_to_minibatch = False\n else:\n key = 'full'\n return _select[(type(inference), key)]\n\n\[email protected]('first')\ndef test_fit_oo(inference,\n fit_kwargs,\n simple_model_data):\n trace = inference.fit(**fit_kwargs).sample(10000)\n mu_post = simple_model_data['mu_post']\n d = simple_model_data['d']\n np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.05)\n np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.1)\n\n\ndef test_profile(inference):\n inference.run_profiling(n=100).summary()\n\n\ndef test_remove_scan_op():\n with pm.Model():\n pm.Normal('n', 0, 1)\n inference = ADVI()\n buff = io.StringIO()\n inference.run_profiling(n=10).summary(buff)\n assert 'theano.scan_module.scan_op.Scan' not in buff.getvalue()\n buff.close()\n\n\ndef test_clear_cache():\n import pickle\n pymc3.memoize.clear_cache()\n assert all(len(c) == 0 for c in pymc3.memoize.CACHE_REGISTRY)\n with pm.Model():\n pm.Normal('n', 0, 1)\n inference = ADVI()\n inference.fit(n=10)\n assert any(len(c) != 0 for c in inference.approx._cache.values())\n pymc3.memoize.clear_cache(inference.approx)\n # should not be cleared at this call\n assert all(len(c) == 0 for c in inference.approx._cache.values())\n new_a = pickle.loads(pickle.dumps(inference.approx))\n assert not hasattr(new_a, '_cache')\n inference_new = pm.KLqp(new_a)\n inference_new.fit(n=10)\n assert any(len(c) != 0 for c in inference_new.approx._cache.values())\n pymc3.memoize.clear_cache(inference_new.approx)\n assert all(len(c) == 0 for c in inference_new.approx._cache.values())\n\n\[email protected]('module')\ndef another_simple_model():\n _model = models.simple_model()[1]\n with _model:\n pm.Potential('pot', tt.ones((10, 10)))\n return _model\n\n\[email protected](params=[\n dict(name='advi', kw=dict(start={})),\n dict(name='fullrank_advi', kw=dict(start={})),\n dict(name='svgd', kw=dict(start={}))],\n ids=lambda d: d['name']\n)\ndef fit_method_with_object(request, another_simple_model):\n _select = dict(\n advi=ADVI,\n fullrank_advi=FullRankADVI,\n svgd=SVGD\n )\n with another_simple_model:\n return _select[request.param['name']](\n **request.param['kw'])\n\n\[email protected](\n ['method', 'kwargs', 'error'],\n [\n ('undefined', dict(), KeyError),\n (1, dict(), TypeError),\n ('advi', dict(total_grad_norm_constraint=10), None),\n ('fullrank_advi', dict(), None),\n ('svgd', dict(total_grad_norm_constraint=10), None),\n ('svgd', dict(start={}), None),\n # start argument is not allowed for ASVGD\n ('asvgd', dict(start={}, total_grad_norm_constraint=10), TypeError),\n ('asvgd', dict(total_grad_norm_constraint=10), None),\n ('nfvi', dict(start={}), None),\n ('nfvi=scale-loc', dict(start={}), None),\n ('nfvi=bad-formula', dict(start={}), KeyError),\n ],\n)\ndef test_fit_fn_text(method, kwargs, error, another_simple_model):\n with another_simple_model:\n if error is not None:\n with pytest.raises(error):\n fit(10, method=method, **kwargs)\n else:\n fit(10, method=method, **kwargs)\n\n\[email protected]('module')\ndef aevb_model():\n with pm.Model() as model:\n pm.HalfNormal('x', shape=(2,), total_size=5)\n pm.Normal('y', shape=(2,))\n x = model.x\n y = model.y\n mu = theano.shared(x.init_value)\n rho = theano.shared(np.zeros_like(x.init_value))\n return {\n 'model': model,\n 'y': y,\n 'x': x,\n 'replace': dict(mu=mu, rho=rho)\n }\n\n\ndef test_aevb(inference_spec, aevb_model):\n # add to inference that supports aevb\n x = aevb_model['x']\n y = aevb_model['y']\n model = aevb_model['model']\n replace = aevb_model['replace']\n with model:\n try:\n inference = inference_spec(local_rv={x: {'mu': replace['mu']*5, 'rho': replace['rho']}})\n approx = inference.fit(3, obj_n_mc=2, more_obj_params=list(replace.values()))\n approx.sample(10)\n approx.sample_node(\n y,\n more_replacements={x: np.asarray([1, 1], dtype=x.dtype)}\n ).eval()\n except pm.opvi.AEVBInferenceError:\n pytest.skip('Does not support AEVB')\n\n\ndef test_rowwise_approx(three_var_model, parametric_grouped_approxes):\n # add to inference that supports aevb\n cls, kw = parametric_grouped_approxes\n with three_var_model:\n try:\n approx = Approximation([cls([three_var_model.one], rowwise=True, **kw), Group(None, vfam='mf')])\n inference = pm.KLqp(approx)\n approx = inference.fit(3, obj_n_mc=2)\n approx.sample(10)\n approx.sample_node(\n three_var_model.one\n ).eval()\n except pm.opvi.BatchedGroupError:\n pytest.skip('Does not support rowwise grouping')\n\n\ndef test_pickle_approx(three_var_approx):\n import pickle\n dump = pickle.dumps(three_var_approx)\n new = pickle.loads(dump)\n assert new.sample(1)\n\n\ndef test_pickle_single_group(three_var_approx_single_group_mf):\n import pickle\n dump = pickle.dumps(three_var_approx_single_group_mf)\n new = pickle.loads(dump)\n assert new.sample(1)\n\n\ndef test_pickle_approx_aevb(three_var_aevb_approx):\n import pickle\n dump = pickle.dumps(three_var_aevb_approx)\n new = pickle.loads(dump)\n assert new.sample(1000)\n\n\[email protected]('module')\ndef binomial_model():\n n_samples = 100\n xs = intX(np.random.binomial(n=1, p=0.2, size=n_samples))\n with pm.Model() as model:\n p = pm.Beta('p', alpha=1, beta=1)\n pm.Binomial('xs', n=1, p=p, observed=xs)\n return model\n\n\[email protected]('module')\ndef binomial_model_inference(binomial_model, inference_spec):\n with binomial_model:\n return inference_spec()\n\n\[email protected](after='test_sample_replacements')\ndef test_replacements(binomial_model_inference):\n d = tt.bscalar()\n d.tag.test_value = 1\n approx = binomial_model_inference.approx\n p = approx.model.p\n p_t = p ** 3\n p_s = approx.sample_node(p_t)\n if theano.config.compute_test_value != 'off':\n assert p_s.tag.test_value.shape == p_t.tag.test_value.shape\n sampled = [p_s.eval() for _ in range(100)]\n assert any(map(\n operator.ne,\n sampled[1:], sampled[:-1])\n ) # stochastic\n\n p_d = approx.sample_node(p_t, deterministic=True)\n sampled = [p_d.eval() for _ in range(100)]\n assert all(map(\n operator.eq,\n sampled[1:], sampled[:-1])\n ) # deterministic\n\n p_r = approx.sample_node(p_t, deterministic=d)\n sampled = [p_r.eval({d: 1}) for _ in range(100)]\n assert all(map(\n operator.eq,\n sampled[1:], sampled[:-1])\n ) # deterministic\n sampled = [p_r.eval({d: 0}) for _ in range(100)]\n assert any(map(\n operator.ne,\n sampled[1:], sampled[:-1])\n ) # stochastic\n\n\ndef test_sample_replacements(binomial_model_inference):\n i = tt.iscalar()\n i.tag.test_value = 1\n approx = binomial_model_inference.approx\n p = approx.model.p\n p_t = p ** 3\n p_s = approx.sample_node(p_t, size=100)\n if theano.config.compute_test_value != 'off':\n assert p_s.tag.test_value.shape == (100, ) + p_t.tag.test_value.shape\n sampled = p_s.eval()\n assert any(map(\n operator.ne,\n sampled[1:], sampled[:-1])\n ) # stochastic\n assert sampled.shape[0] == 100\n\n p_d = approx.sample_node(p_t, size=i)\n sampled = p_d.eval({i: 100})\n assert any(map(\n operator.ne,\n sampled[1:], sampled[:-1])\n ) # deterministic\n assert sampled.shape[0] == 100\n sampled = p_d.eval({i: 101})\n assert sampled.shape[0] == 101\n\n\ndef test_discrete_not_allowed():\n mu_true = np.array([-2, 0, 2])\n z_true = np.random.randint(len(mu_true), size=100)\n y = np.random.normal(mu_true[z_true], np.ones_like(z_true))\n\n with pm.Model():\n mu = pm.Normal('mu', mu=0, sigma=10, shape=3)\n z = pm.Categorical('z', p=tt.ones(3) / 3, shape=len(y))\n pm.Normal('y_obs', mu=mu[z], sigma=1., observed=y)\n with pytest.raises(opvi.ParametrizationError):\n pm.fit(n=1) # fails\n\n\ndef test_var_replacement():\n X_mean = pm.floatX(np.linspace(0, 10, 10))\n y = pm.floatX(np.random.normal(X_mean*4, .05))\n with pm.Model():\n inp = pm.Normal('X', X_mean, shape=X_mean.shape)\n coef = pm.Normal('b', 4.)\n mean = inp * coef\n pm.Normal('y', mean, .1, observed=y)\n advi = pm.fit(100)\n assert advi.sample_node(mean).eval().shape == (10, )\n x_new = pm.floatX(np.linspace(0, 10, 11))\n assert advi.sample_node(mean, more_replacements={inp: x_new}).eval().shape == (11, )\n\n\ndef test_empirical_from_trace(another_simple_model):\n with another_simple_model:\n step = pm.Metropolis()\n trace = pm.sample(100, step=step, chains=1)\n emp = Empirical(trace)\n assert emp.histogram.shape[0].eval() == 100\n trace = pm.sample(100, step=step, chains=4)\n emp = Empirical(trace)\n assert emp.histogram.shape[0].eval() == 400\n\n\[email protected](\n params=[\n dict(cls=flows.PlanarFlow, init=dict(jitter=.1)),\n dict(cls=flows.RadialFlow, init=dict(jitter=.1)),\n dict(cls=flows.ScaleFlow, init=dict(jitter=.1)),\n dict(cls=flows.LocFlow, init=dict(jitter=.1)),\n dict(cls=flows.HouseholderFlow, init=dict(jitter=.1)),\n ],\n ids=lambda d: d['cls'].__name__\n)\ndef flow_spec(request):\n cls = request.param['cls']\n init = request.param['init']\n\n def init_(**kw):\n k = init.copy()\n k.update(kw)\n return cls(**k)\n init_.cls = cls\n return init_\n\n\ndef test_flow_det(flow_spec):\n z0 = tt.arange(0, 20).astype('float32')\n flow = flow_spec(dim=20, z0=z0.dimshuffle('x', 0))\n with change_flags(compute_test_value='off'):\n z1 = flow.forward.flatten()\n J = tt.jacobian(z1, z0)\n logJdet = tt.log(tt.abs_(tt.nlinalg.det(J)))\n det = flow.logdet[0]\n np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)\n\n\ndef test_flow_det_local(flow_spec):\n z0 = tt.arange(0, 12).astype('float32')\n spec = flow_spec.cls.get_param_spec_for(d=12)\n params = dict()\n for k, shp in spec.items():\n params[k] = np.random.randn(1, *shp).astype('float32')\n flow = flow_spec(dim=12, z0=z0.reshape((1, 1, 12)), **params)\n assert flow.batched\n with change_flags(compute_test_value='off'):\n z1 = flow.forward.flatten()\n J = tt.jacobian(z1, z0)\n logJdet = tt.log(tt.abs_(tt.nlinalg.det(J)))\n det = flow.logdet[0]\n np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)\n\n\ndef test_flows_collect_chain():\n initial = tt.ones((3, 2))\n flow1 = flows.PlanarFlow(dim=2, z0=initial)\n flow2 = flows.PlanarFlow(dim=2, z0=flow1)\n assert len(flow2.params) == 3\n assert len(flow2.all_params) == 6\n np.testing.assert_allclose(flow1.logdet.eval() + flow2.logdet.eval(), flow2.sum_logdets.eval())\n\n\[email protected](\n 'formula,length,order',\n [\n ('planar', 1, [flows.PlanarFlow]),\n ('planar*2', 2, [flows.PlanarFlow] * 2),\n ('planar-planar', 2, [flows.PlanarFlow] * 2),\n ('planar-planar*2', 3, [flows.PlanarFlow] * 3),\n ('hh-planar*2', 3, [flows.HouseholderFlow]+[flows.PlanarFlow] * 2)\n ]\n)\ndef test_flow_formula(formula, length, order):\n spec = flows.Formula(formula)\n flows_list = spec.flows\n assert len(flows_list) == length\n if order is not None:\n assert flows_list == order\n spec(dim=2, jitter=1)(tt.ones((3, 2))).eval() # should work\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros_like", "numpy.random.rand", "numpy.random.binomial", "numpy.ones_like", "numpy.random.normal", "numpy.asarray", "numpy.log", "numpy.ones", "numpy.random.randn", "numpy.exp", "numpy.mean", "numpy.std", "numpy.prod", "numpy.sqrt", "numpy.linspace" ] ]
justinfmccarty/CityEnergyAnalyst_bigmacc
[ "a7f2d6085e83730bdc4bcb2321e1613070372027" ]
[ "cea/interfaces/dashboard/api/databases.py" ]
[ "\n\n\nimport os\nfrom collections import OrderedDict\n\nfrom flask_restplus import Namespace, Resource, abort\nimport pandas as pd\n\nimport cea.schemas\nfrom cea.databases import get_regions, get_database_tree, databases_folder_path\nfrom cea.utilities.schedule_reader import schedule_to_dataframe\n\napi = Namespace(\"Databases\", description=\"Database data for technologies in CEA\")\n\nDATABASES_SCHEMA_KEYS = {\n \"CONSTRUCTION_STANDARD\": [\"get_database_construction_standards\"],\n \"USE_TYPES\": [\"get_database_standard_schedules_use\", \"get_database_use_types_properties\"],\n \"SUPPLY\": [\"get_database_supply_assemblies\"],\n \"HVAC\": [\"get_database_air_conditioning_systems\"],\n \"ENVELOPE\": [\"get_database_envelope_systems\"],\n \"CONVERSION\": [\"get_database_conversion_systems\"],\n \"DISTRIBUTION\": [\"get_database_distribution_systems\"],\n \"FEEDSTOCKS\": [\"get_database_feedstocks\"]\n}\n\n\n# FIXME: Using OrderedDict here due to Python2 unordered dict insertion, change when using Python3\ndef database_to_dict(db_path):\n out = OrderedDict()\n xls = pd.ExcelFile(db_path)\n for sheet in xls.sheet_names:\n df = xls.parse(sheet, keep_default_na=False)\n out[sheet] = df.to_dict(orient='records', into=OrderedDict)\n return out\n\n\ndef schedule_to_dict(schedule_path):\n out = OrderedDict()\n schedule_df = schedule_to_dataframe(schedule_path)\n for df_name, df in schedule_df.items():\n out[df_name] = df.to_dict(orient='records', into=OrderedDict)\n return out\n\n\ndef read_all_databases(database_path):\n out = OrderedDict()\n db_info = get_database_tree(database_path)\n for category in db_info['categories'].keys():\n out[category] = OrderedDict()\n for db_name in db_info['categories'][category]['databases']:\n db_files = db_info['databases'][db_name]['files']\n if db_name == 'USE_TYPES':\n out[category][db_name] = OrderedDict()\n out[category][db_name]['SCHEDULES'] = OrderedDict()\n for db_file in db_files:\n # Use type property file\n if db_file['name'] == 'USE_TYPE_PROPERTIES':\n out[category][db_name]['USE_TYPE_PROPERTIES'] = database_to_dict(db_file['path'])\n # Schedule files\n elif db_file['extension'] == '.csv':\n out[category][db_name]['SCHEDULES'][db_file['name']] = schedule_to_dict(db_file['path'])\n else:\n for db_file in db_files:\n out[category][db_name] = database_to_dict(db_file['path'])\n return out\n\n\[email protected](\"/region\")\nclass DatabaseRegions(Resource):\n def get(self):\n return {'regions': get_regions()}\n\n\[email protected](\"/region/<string:region>\")\nclass DatabaseRegion(Resource):\n def get(self, region):\n regions = get_regions()\n if region not in regions:\n abort(400, \"Could not find '{}' region. Try instead {}\".format(region, \", \".join(regions)))\n return {\"categories\": get_database_tree(os.path.join(databases_folder_path, region))['categories']}\n\n\[email protected](\"/region/<string:region>/databases\")\nclass DatabaseData(Resource):\n def get(self, region):\n regions = get_regions()\n if region not in regions:\n abort(400, \"Could not find '{}' region. Try instead {}\".format(region, \", \".join(regions)))\n try:\n return read_all_databases(os.path.join(databases_folder_path, region))\n except IOError as e:\n print(e)\n abort(500, e.message)\n\n\ndef convert_path_to_name(schema_dict):\n import cea.inputlocator\n locator = cea.inputlocator.InputLocator('')\n for sheet_name, sheet_info in schema_dict.items():\n for variable_name, schema in sheet_info['columns'].items():\n if 'choice' in schema and 'lookup' in schema['choice']:\n database_path = locator.__getattribute__(schema['choice']['lookup']['path'])()\n schema['choice']['lookup']['database_category'] = os.path.basename(os.path.dirname(database_path))\n schema['choice']['lookup']['database_name'] = os.path.basename(os.path.splitext(database_path)[0])\n return schema_dict\n\n\[email protected](\"/schema\")\nclass DatabaseSchema(Resource):\n def get(self):\n import cea.scripts\n schemas = cea.schemas.schemas(plugins=[])\n out = {}\n for db_name, db_schema_keys in DATABASES_SCHEMA_KEYS.items():\n out[db_name] = {}\n for db_schema_key in db_schema_keys:\n try:\n out[db_name].update(convert_path_to_name(schemas[db_schema_key]['schema']))\n except KeyError as ex:\n raise KeyError(\"Could not convert_path_to_name for {db_name}/{db_schema_key}. {ex.message}\".format(\n **locals()))\n return out\n" ]
[ [ "pandas.ExcelFile" ] ]
Rowing0914/TF2_RL
[ "c1b7f9b376cbecf01deb17f76f8e761035ed336a" ]
[ "tf_rl/examples/NerveNet/scripts/util_test/graph_operator_test.py" ]
[ "from graph_util.mujoco_parser import parse_mujoco_graph\nfrom graph_util.graph_operator import GraphOperator\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nnode_info = parse_mujoco_graph(task_name=\"WalkersHopperone-v1\")\ngraph_operator = GraphOperator(input_dict=node_info[\"input_dict\"],\n output_list=node_info[\"output_list\"],\n obs_shape=11)\n\ngraph_operator.get_all_attributes()\ng = graph_operator._create_graph()\nnx_G = g.to_networkx()\nnx.draw(nx_G)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
JiaLei123/ML_camp
[ "87ba197737160958c9e05b08b277772e80b564e4" ]
[ "MXnet/rnn/rnn_gluon_w2v.py" ]
[ "import math\nimport os\nimport time\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nfrom mxnet.gluon import nn, rnn\nimport zipfile\nfrom MXnet import utils\nfrom gensim.models import word2vec\n\nwith zipfile.ZipFile('../data/ptb.zip', 'r') as zin:\n zin.extractall('../data/')\n\n\nclass Dictionary(object):\n def __init__(self):\n self.word_to_idx = {}\n self.idx_to_word = []\n\n def add_word(self, word):\n if word not in self.word_to_idx:\n self.idx_to_word.append(word)\n self.word_to_idx[word] = len(self.idx_to_word) - 1 # 就是返回word在idx_to_word中的index值\n return self.word_to_idx[word]\n\n def __len__(self):\n return len(self.idx_to_word)\n\n\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train, _train = self.tokenize(path + 'train.txt')\n self.valid, _val = self.tokenize(path + 'valid.txt')\n self.test, _test = self.tokenize(path + 'test.txt')\n all_sentences = list()\n all_sentences.extend(_train)\n all_sentences.extend(_val)\n all_sentences.extend(_test)\n self.w2v = word2vec.Word2Vec(all_sentences)\n\n def tokenize(self, path):\n assert os.path.exists(path)\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r') as f:\n indices = np.zeros((tokens,), dtype=\"int32\")\n idx = 0\n all_sentences = list()\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n indices[idx] = self.dictionary.word_to_idx[word]\n idx += 1\n all_sentences.append(words)\n return mx.nd.array(indices, dtype='int32'), all_sentences\n\n\n\nclass RNNModel(gluon.Block):\n def __init__(self, mode, vocab_size, embed_dim, hidden_dim, num_layers, w2v_vec, drop_out=0.5, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n with self.name_scope():\n self.drop = nn.Dropout(drop_out)\n # self.encoder = nn.Embedding(grad_red='null')\n # self.encoder.weight.set_data(w2v_vec)\n\n if mode == \"rnn_relu\":\n self.rnn = rnn.RNN(hidden_dim, num_layers, activation='relu', dropout=drop_out, input_size=embed_dim)\n elif mode == \"rnn_tanh\":\n self.rnn = rnn.RNN(hidden_dim, num_layers, activation='tanh', dropout=drop_out, input_size=embed_dim)\n elif mode == \"lstm\":\n self.rnn = rnn.LSTM(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)\n elif mode == \"gru\":\n self.rnn = rnn.GRU(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)\n else:\n raise ValueError(\"Invalid Mode\")\n\n self.decoder = nn.Dense(vocab_size, in_units=hidden_dim)\n self.hidden_dim = hidden_dim\n self.w2v_vec = w2v_vec\n\n def get_vec(self,inputs):\n step, batch = inputs.shape\n input_node = inputs.reshape((-1,))\n input_vec = []\n for idx, item in enumerate(input_node):\n try:\n word = corpus.dictionary.idx_to_word[item.asscalar()]\n input_vec.append(self.w2v_vec.wv[word])\n except:\n input_vec.append(np.random.uniform(-0.25, 0.25, self.w2v_vec.vector_size))\n return mx.nd.array(input_vec).reshape((step, batch, -1))\n\n def forward(self, inputs, state):\n input_node = self.get_vec(inputs)\n emb = self.drop(input_node)\n try:\n output, state = self.rnn(emb, state)\n except:\n pass\n output = self.drop(output)\n decoded = self.decoder(output.reshape((-1, self.hidden_dim)))\n return decoded, state\n\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\n\n\nmodel_name = 'rnn_relu'\nembed_dim = 100\nhidden_dim = 100\nnum_layers = 2\nlr = 1\nclipping_norm = 0.2\nepochs = 10\nbatch_size = 32\nnum_steps = 5\ndropout_rate = 0.2\neval_period = 500\n\ncontext = utils.try_gpu()\n\n\ndef batchify(data, batch_size):\n num_batches = data.shape[0] // batch_size\n data = data[: num_batches * batch_size]\n data = data.reshape((batch_size, num_batches)).T\n return data\n\n\ndata = '../data/ptb/ptb.'\ncorpus = Corpus(data)\nvocab_size = len(corpus.dictionary)\nprint(vocab_size)\n\ntrain_data = batchify(corpus.train, batch_size).as_in_context(context)\nval_data = batchify(corpus.valid, batch_size).as_in_context(context)\ntest_data = batchify(corpus.test, batch_size).as_in_context(context)\n\nmodel = RNNModel(model_name, vocab_size, embed_dim, hidden_dim, num_layers, corpus.w2v, dropout_rate)\nmodel.collect_params().initialize(mx.init.Xavier(), ctx=context)\n\ntrainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0, 'wd': 0})\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\n\n\ndef get_batch(source, i):\n seq_len = min(num_steps, source.shape[0] - 1 - i)\n data = source[i: i + seq_len]\n target = source[i + 1: i + 1 + seq_len]\n return data, target.reshape((-1,))\n\n\ndef detach(state):\n if isinstance(state, (tuple, list)):\n state = [i.detach() for i in state]\n else:\n state = state.detach()\n return state\n\n\ndef model_eval(data_source):\n total_L = 0.0\n ntotal = 0\n hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size, ctx=context)\n for i in range(0, data_source.shape[0] - 1, num_steps):\n data, target = get_batch(data_source, i)\n output, hidden = model(data, hidden)\n L = loss(output, target)\n total_L += mx.nd.sum(L).asscalar()\n ntotal += L.size\n return total_L / ntotal\n\n\ndef train():\n for epoch in range(epochs):\n total_L = 0\n start_time = time.time()\n hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size, ctx=context)\n\n for ibatch, i in enumerate(range(0, train_data.shape[0] - 1, num_steps)):\n data, target = get_batch(train_data, i)\n hidden = detach(hidden)\n with autograd.record():\n output, hidden = model(data, hidden)\n L = loss(output, target)\n L.backward()\n grads = [i.grad(context) for i in model.collect_params().values()]\n gluon.utils.clip_global_norm(grads, clipping_norm * num_steps * batch_size)\n trainer.step(batch_size)\n total_L += mx.nd.sum(L).asscalar()\n\n if ibatch % eval_period == 0 and ibatch > 0:\n cur_L = total_L / num_steps / batch_size / eval_period\n print('[Epoch %d Batch %d] loss %.2f, perplexity %.2f' % (\n epoch + 1, ibatch, cur_L, math.exp(cur_L)))\n total_L = 0.0\n\n val_L = model_eval(val_data)\n print('[Epoch %d] time cost %.2fs, validation loss %.2f, validation '\n 'perplexity %.2f' % (epoch + 1, time.time() - start_time, val_L,\n math.exp(val_L)))\n\n\n# model_eval(val_data)\ntrain()\ntest_L = model_eval(test_data)\nprint('Test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))\n" ]
[ [ "numpy.random.uniform", "numpy.zeros" ] ]
brightcoder01/models
[ "bf95351ea51b5dfb192e42a02117a5fade498af3" ]
[ "tests/test_rnnts.py" ]
[ "import sqlflow_models\nfrom tests.base import BaseTestCases\n\nimport tensorflow as tf\nimport numpy as np\nnp.random.seed(22)\nimport unittest\n\n\nclass TestRNNBasedTimeSeriesModel(BaseTestCases.BaseTest):\n def setUp(self):\n # We use sin data plus perturbation to simulate time series data\n time_series_data = np.sin(np.arange(56)) + np.random.normal(0, 0.01, 56)\n x = np.array(time_series_data).reshape(8, 7)\n y = np.array(np.arange(8).reshape(8, 1))\n self.features = {\"col1\": x}\n self.label = y\n self.n_in = 7\n self.n_out = 1\n # time_window=n_in, num_features=n_out\n feature_columns = [tf.feature_column.numeric_column(key, shape=(self.n_in, self.n_out)) for key in self.features]\n self.model = sqlflow_models.RNNBasedTimeSeriesModel(\n feature_columns=feature_columns, \n stack_units=[50, 50], \n n_in=self.n_in,\n n_out=self.n_out,\n model_type='rnn')\n self.model_class = sqlflow_models.RNNBasedTimeSeriesModel\n\nclass TestLSTMBasedTimeSeriesModel(BaseTestCases.BaseTest):\n def setUp(self):\n # We use sin data plus perturbation to simulate time series data\n time_series_data = np.sin(np.arange(56)) + np.random.normal(0, 0.01, 56)\n x = np.array(time_series_data).reshape(8, 7)\n y = np.array(np.arange(8).reshape(8, 1))\n self.features = {\"col1\": x}\n self.label = y\n self.n_in = 7\n self.n_out = 1\n # time_window=n_in, num_features=n_out\n feature_columns = [tf.feature_column.numeric_column(key, shape=(self.n_in, self.n_out)) for key in self.features]\n self.model = sqlflow_models.RNNBasedTimeSeriesModel(\n feature_columns=feature_columns, \n stack_units=[50, 50], \n n_in=self.n_in,\n n_out=self.n_out,\n model_type='lstm')\n self.model_class = sqlflow_models.RNNBasedTimeSeriesModel\n\nclass TestGRUBasedTimeSeriesModel(BaseTestCases.BaseTest):\n def setUp(self):\n # We use sin data plus perturbation to simulate time series data\n time_series_data = np.sin(np.arange(56)) + np.random.normal(0, 0.01, 56)\n x = np.array(time_series_data).reshape(8, 7)\n y = np.array(np.arange(8).reshape(8, 1))\n self.features = {\"col1\": x}\n self.label = y\n self.n_in = 7\n self.n_out = 1\n # time_window=n_in, num_features=n_out\n feature_columns = [tf.feature_column.numeric_column(key, shape=(self.n_in, self.n_out)) for key in self.features]\n self.model = sqlflow_models.RNNBasedTimeSeriesModel(\n feature_columns=feature_columns, \n stack_units=[50, 50], \n n_in=self.n_in,\n n_out=self.n_out,\n model_type='gru')\n self.model_class = sqlflow_models.RNNBasedTimeSeriesModel\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.random.seed", "tensorflow.feature_column.numeric_column", "numpy.arange" ] ]
vishalbelsare/topopy
[ "73ccc9510bd34be2ead875bc3bc1081ccad26b1f" ]
[ "topopy/TopologicalObject.py" ]
[ "import sys\nimport time\nimport warnings\n\nimport numpy as np\nimport sklearn.preprocessing\n\nimport nglpy as ngl\n# import nglpy_cuda as ngl\n\n\nclass TopologicalObject(object):\n \"\"\" A base class for housing common interactions between Morse and\n Morse-Smale complexes, and Contour and Merge Trees\n\n Parameters\n ----------\n graph : nglpy.Graph\n A graph object used for determining neighborhoods in gradient estimation\n gradient : str\n An optional string specifying the type of gradient estimator to use.\n Currently the only available option is 'steepest'.\n normalization : str\n An optional string specifying whether the inputs/output should be\n scaled before computing. Currently, two modes are supported 'zscore'\n and 'feature'. 'zscore' will ensure the data has a mean of zero and a\n standard deviation of 1 by subtracting the mean and dividing by the\n variance. 'feature' scales the data into the unit hypercube.\n aggregator : str\n An optional string that specifies what type of aggregation to do when\n duplicates are found in the domain space. Default value is None meaning\n the code will error if duplicates are identified.\n debug : bool\n An optional boolean flag for whether debugging output should be enabled.\n short_circuit : bool\n An optional boolean flag for whether the contour tree should be short\n circuited. Enabling this will speed up the processing by bypassing the\n fully augmented search and only focusing on partially augmented split\n and join trees\n\n \"\"\"\n\n precision = 16\n\n @staticmethod\n def aggregate_duplicates(X, Y, aggregator=\"mean\", precision=precision):\n \"\"\" A function that will attempt to collapse duplicates in domain\n space, X, by aggregating values over the range space, Y.\n\n Parameters\n ----------\n X : np.ndarray\n An m-by-n array of values specifying m n-dimensional samples\n Y : np.array\n A m vector of values specifying the output responses corresponding\n to the m samples specified by X\n aggregator : str\n An optional string or callable object that specifies what type of\n aggregation to do when duplicates are found in the domain space.\n Default value is mean meaning the code will calculate the mean range\n value over each of the unique, duplicated samples.\n precision : int\n An optional positive integer specifying how many digits numbers\n should be rounded to in order to determine if they are unique or\n not.\n\n Returns\n -------\n tuple(np.ndarray, np.array)\n A tuple where the first value is an m'-by-n array specifying the\n unique domain samples and the second value is an m' vector\n specifying the associated range values. m' <= m.\n\n \"\"\"\n if callable(aggregator):\n pass\n elif \"min\" in aggregator.lower():\n aggregator = np.min\n elif \"max\" in aggregator.lower():\n aggregator = np.max\n elif \"median\" in aggregator.lower():\n aggregator = np.median\n elif aggregator.lower() in [\"average\", \"mean\"]:\n aggregator = np.mean\n elif \"first\" in aggregator.lower():\n\n def aggregator(x):\n return x[0]\n\n elif \"last\" in aggregator.lower():\n\n def aggregator(x):\n return x[-1]\n\n else:\n warnings.warn(\n 'Aggregator \"{}\" not understood. Skipping sample '\n \"aggregation.\".format(aggregator)\n )\n return X, Y\n\n is_y_multivariate = Y.ndim > 1\n\n X_rounded = X.round(decimals=precision)\n unique_xs = np.unique(X_rounded, axis=0)\n\n old_size = len(X_rounded)\n new_size = len(unique_xs)\n if old_size == new_size:\n return X, Y\n\n if not is_y_multivariate:\n Y = np.atleast_2d(Y).T\n\n reduced_y = np.empty((new_size, Y.shape[1]))\n\n warnings.warn(\n \"Domain space duplicates caused a data reduction. \"\n + \"Original size: {} vs. New size: {}\".format(old_size, new_size)\n )\n for col in range(Y.shape[1]):\n for i, distinct_row in enumerate(unique_xs):\n filtered_rows = np.all(X_rounded == distinct_row, axis=1)\n reduced_y[i, col] = aggregator(Y[filtered_rows, col])\n\n if not is_y_multivariate:\n reduced_y = reduced_y.flatten()\n\n return unique_xs, reduced_y\n\n def __init__(\n self,\n graph=None,\n gradient=\"steepest\",\n normalization=None,\n aggregator=None,\n debug=False,\n ):\n super(TopologicalObject, self).__init__()\n self.reset()\n\n if graph is None:\n graph = ngl.EmptyRegionGraph()\n self.graph = graph\n self.gradient = gradient\n self.normalization = normalization\n self.debug = debug\n self.aggregator = aggregator\n\n def reset(self):\n \"\"\" Empties all internal storage containers\n\n\n Returns\n -------\n None\n\n \"\"\"\n self.X = []\n self.Y = []\n self.w = []\n\n self.Xnorm = []\n\n def __set_data(self, X, Y, w=None):\n \"\"\" Internally assigns the input data and normalizes it\n according to the user's specifications\n @ In, X, an m-by-n array of values specifying m\n n-dimensional samples\n @ In, Y, a m vector of values specifying the output\n responses corresponding to the m samples specified by X\n @ In, w, an optional m vector of values specifying the\n weights associated to each of the m samples used. Default of\n None means all points will be equally weighted\n \"\"\"\n self.X = X\n self.Y = Y\n self.check_duplicates()\n\n if w is not None:\n self.w = np.array(w)\n else:\n self.w = np.ones(len(Y)) * 1.0 / float(len(Y))\n\n if self.normalization == \"feature\":\n # This doesn't work with one-dimensional arrays on older\n # versions of sklearn\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n self.Xnorm = min_max_scaler.fit_transform(np.atleast_2d(self.X))\n elif self.normalization == \"zscore\":\n self.Xnorm = sklearn.preprocessing.scale(\n self.X, axis=0, with_mean=True, with_std=True, copy=True\n )\n else:\n self.Xnorm = np.array(self.X)\n\n def build(self, X, Y, w=None):\n \"\"\" Assigns data to this object and builds the requested topological\n structure\n\n Uses an internal graph given in the constructor to build a topological\n object on the passed in data. Weights are currently ignored.\n\n Parameters\n ----------\n X : np.ndarray\n An m-by-n array of values specifying m n-dimensional samples\n Y : np.array\n An m vector of values specifying the output responses corresponding\n to the m samples specified by X\n w : np.array\n An optional m vector of values specifying the weights associated to\n each of the m samples used. Default of None means all points will be\n equally weighted\n\n Returns\n -------\n None\n\n \"\"\"\n self.reset()\n\n if X is None or Y is None:\n return\n\n self.__set_data(X, Y, w)\n\n if self.debug:\n sys.stdout.write(\"Graph Preparation: \")\n start = time.perf_counter()\n\n self.graph.build(self.Xnorm)\n\n if self.debug:\n end = time.perf_counter()\n sys.stdout.write(\"%f s\\n\" % (end - start))\n\n def load_data_and_build(self, filename, delimiter=\",\"):\n \"\"\" Convenience function for directly working with a data file.\n\n This opens a file and reads the data into an array, sets the data as an\n nparray and list of dimnames\n\n Parameters\n ----------\n filename : str\n string representing the data file\n\n Returns\n -------\n None\n\n \"\"\"\n data = np.genfromtxt(\n filename, dtype=float, delimiter=delimiter, names=True\n )\n data = data.view(np.float64).reshape(data.shape + (-1,))\n\n X = data[:, 0:-1]\n Y = data[:, -1]\n\n self.build(X=X, Y=Y)\n\n def get_normed_x(self, rows=None, cols=None):\n \"\"\" Returns the normalized input data requested by the user.\n\n\n Parameters\n ----------\n rows : list of int\n A list of non-negative integers specifying the row indices to return\n cols : list of int\n A list of non-negative integers specifying the column indices to\n return\n\n Returns\n -------\n np.ndarray\n A matrix of floating point values specifying the normalized data\n values used in internal computations filtered by the three input\n parameters.\n\n \"\"\"\n if rows is None:\n rows = list(range(0, self.get_sample_size()))\n if cols is None:\n cols = list(range(0, self.get_dimensionality()))\n\n if not hasattr(rows, \"__iter__\"):\n rows = [rows]\n rows = sorted(list(set(rows)))\n\n retValue = self.Xnorm[rows, :]\n return retValue[:, cols]\n\n def get_x(self, rows=None, cols=None):\n \"\"\" Returns the input data requested by the user\n\n\n Parameters\n ----------\n rows : list of int\n A list of non-negative integers specifying the row indices to return\n cols : list of int\n A list of non-negative integers specifying the column indices to\n return\n\n Returns\n -------\n np.ndarray\n A matrix of floating point values specifying the input data values\n filtered by the two input parameters.\n\n \"\"\"\n if rows is None:\n rows = list(range(0, self.get_sample_size()))\n if cols is None:\n cols = list(range(0, self.get_dimensionality()))\n\n if not hasattr(rows, \"__iter__\"):\n rows = [rows]\n rows = sorted(list(set(rows)))\n\n retValue = self.X[rows, :]\n if len(rows) == 0:\n return []\n return retValue[:, cols]\n\n def get_y(self, indices=None):\n \"\"\" Returns the output data requested by the user\n\n Parameters\n ----------\n indices : list of int\n A list of non-negative integers specifying the row indices to return\n\n Returns\n -------\n np.array\n An array of floating point values specifying the output data values\n filtered by the indices input parameter.\n\n \"\"\"\n if indices is None:\n indices = list(range(0, self.get_sample_size()))\n else:\n if not hasattr(indices, \"__iter__\"):\n indices = [indices]\n indices = sorted(list(set(indices)))\n\n if len(indices) == 0:\n return []\n return self.Y[indices]\n\n def get_weights(self, indices=None):\n \"\"\" Returns the weights requested by the user\n\n Parameters\n ----------\n indices : list of int\n A list of non-negative integers specifying the row indices to return\n\n Returns\n -------\n np.array\n An array of floating point values specifying the weights associated\n to the input data rows filtered by the indices input parameter.\n\n \"\"\"\n if indices is None:\n indices = list(range(0, self.get_sample_size()))\n else:\n indices = sorted(list(set(indices)))\n\n if len(indices) == 0:\n return []\n return self.w[indices]\n\n def get_sample_size(self):\n \"\"\" Returns the number of samples in the input data\n\n\n Returns\n -------\n int\n Integer specifying the number of samples.\n\n \"\"\"\n return len(self.Y)\n\n def get_dimensionality(self):\n \"\"\" Returns the dimensionality of the input space of the input data\n\n\n Returns\n -------\n int\n Integer specifying the dimensionality of the input samples.\n\n \"\"\"\n return self.X.shape[1]\n\n def get_neighbors(self, idx):\n \"\"\" Returns a list of neighbors for the specified index\n\n\n Parameters\n ----------\n idx : int\n An integer specifying the query point\n\n Returns\n -------\n list of int\n Integer list of neighbors indices\n\n \"\"\"\n return self.graph.neighbors(int(idx))\n\n def check_duplicates(self):\n \"\"\" Function to test whether duplicates exist in the input or output\n space.\n\n First, if an aggregator function has been specified, the domain space\n duplicates will be consolidated using the function to generate a new\n range value for that shared point. Otherwise, it will raise a\n ValueError. The function will raise a warning if duplicates exist in the\n output space\n\n\n Returns\n -------\n None\n\n \"\"\"\n\n if self.aggregator is not None:\n X, Y = TopologicalObject.aggregate_duplicates(\n self.X, self.Y, self.aggregator\n )\n self.X = X\n self.Y = Y\n\n temp_x = self.X.round(decimals=TopologicalObject.precision)\n unique_xs = len(np.unique(temp_x, axis=0))\n\n # unique_ys = len(np.unique(self.Y, axis=0))\n # if len(self.Y) != unique_ys:\n # warnings.warn('Range space has duplicates. Simulation of '\n # 'simplicity may help, but artificial noise may '\n # 'occur in flat regions of the domain. Sample size:'\n # '{} vs. Unique Records: {}'.format(len(self.Y),\n # unique_ys))\n\n if len(self.X) != unique_xs:\n raise ValueError(\n \"Domain space has duplicates. Try using an \"\n \"aggregator function to consolidate duplicates \"\n \"into a single sample with one range value. \"\n \"e.g., \" + self.__class__.__name__ + \"(aggregator='max'). \"\n \"\\n\\tNumber of \"\n \"Records: {}\\n\\tNumber of Unique Records: {}\\n\".format(\n len(self.X), unique_xs\n )\n )\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.genfromtxt", "numpy.all", "numpy.unique", "numpy.atleast_2d" ] ]
JWDebelius/monte_carlo_power
[ "36c625bb01cf1019a05669a0b4938774c66fea1f" ]
[ "machivellian/tests/test_power.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\n\nimport numpy as np\nimport numpy.testing as npt\nfrom scipy.stats import kruskal\n\nfrom machivellian.power import (subsample_power,\n confidence_bound,\n _calculate_power,\n _compare_distributions,\n _check_subsample_power_inputs,\n )\n\n\nclass PowerAnalysisTest(TestCase):\n\n def setUp(self):\n\n def f(x):\n \"\"\"returns the p value of a kruskal wallis test\"\"\"\n return kruskal(*x)[1]\n\n self.f = f\n self.num_p = 1\n\n # Sets the random seed\n np.random.seed(5)\n # Sets up the distributions of data for use\n self.s1 = np.arange(0, 10, 1)\n # Sets up two distributions which will never be equal by a rank-sum\n # test.\n self.samps = [np.ones((10))/10., np.ones((10))]\n self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]\n self.counts = np.array([5, 15, 25, 35, 45])\n # Sets up a vector of alpha values\n self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)\n\n def test_subsample_power_defaults(self):\n test_p = subsample_power(self.f, self.pop, self.counts,\n num_iter=10, num_runs=5)\n self.assertEqual(test_p.shape, (5, 5))\n\n def test_subsample_power_matches(self):\n test_p = subsample_power(self.f,\n samples=self.pop,\n counts=self.counts,\n num_iter=10,\n num_runs=5,\n draw_mode=\"matched\")\n self.assertEqual(test_p.shape, (5, 5))\n\n def test_subsample_power_multi_p(self):\n test_p = subsample_power(lambda x: np.array([0.5, 0.5]),\n samples=self.pop,\n counts=self.counts,\n num_iter=10,\n num_runs=5)\n self.assertEqual(test_p.shape, (5, 5, 2))\n\n def test_subsample_power_kwargs(self):\n def test(x, b=True):\n if b:\n return self.f(x)\n else:\n return np.array([self.f(x)] * 2)\n\n test_p_bt = subsample_power(test,\n samples=self.pop,\n counts=self.counts,\n num_iter=10,\n num_runs=5,\n test_kwargs={'b': True})\n test_p_bf = subsample_power(test,\n samples=self.pop,\n counts=self.counts,\n num_iter=10,\n num_runs=5,\n test_kwargs={'b': False})\n self.assertEqual(test_p_bt.shape, (5, 5))\n self.assertEqual(test_p_bf.shape, (5, 5, 2))\n\n def test_confidence_bound_default(self):\n # Sets the know confidence bound\n known = 2.2830070\n test = confidence_bound(self.s1)\n npt.assert_almost_equal(test, known, 3)\n\n def test_confidence_bound_df(self):\n known = 2.15109\n test = confidence_bound(self.s1, df=15)\n npt.assert_almost_equal(known, test, 3)\n\n def test_confidence_bound_alpha(self):\n known = 3.2797886\n test = confidence_bound(self.s1, alpha=0.01)\n npt.assert_almost_equal(known, test, 3)\n\n def test_confidence_bound_nan(self):\n # Sets the value to test\n samples = np.array([[4, 3.2, 3.05],\n [2, 2.8, 2.95],\n [5, 2.9, 3.07],\n [1, 3.1, 2.93],\n [3, np.nan, 3.00]])\n # Sets the know value\n known = np.array([2.2284, 0.2573, 0.08573])\n # Tests the function\n test = confidence_bound(samples, axis=0)\n npt.assert_almost_equal(known, test, 3)\n\n def test_confidence_bound_axis_none(self):\n # Sets the value to test\n samples = np.array([[4, 3.2, 3.05],\n [2, 2.8, 2.95],\n [5, 2.9, 3.07],\n [1, 3.1, 2.93],\n [3, np.nan, 3.00]])\n # Sest the known value\n known = 0.52852\n # Tests the output\n test = confidence_bound(samples, axis=None)\n npt.assert_almost_equal(known, test, 3)\n\n def test_calculate_power_numeric(self):\n # Sets up the values to test\n crit = 0.025\n # Sets the known value\n known = 0.5\n # Calculates the test value\n test = _calculate_power(p_values=self.alpha,\n alpha=crit,\n numeric=True)\n # Checks the test value\n npt.assert_almost_equal(known, test)\n\n def test_calculate_power_reject(self):\n crit = 0.025\n reject = self.alpha < crit\n known = 0.5\n test = _calculate_power(p_values=reject, alpha=crit, numeric=False)\n npt.assert_almost_equal(known, test)\n\n def test_calculate_power_n(self):\n crit = 0.025\n known = np.array([0.5, 0.5])\n alpha = np.vstack((self.alpha, self.alpha))\n test = _calculate_power(alpha, crit)\n npt.assert_almost_equal(known, test)\n\n def test_compare_distributions_all_mode(self):\n known = np.ones((100))*0.0026998\n test = _compare_distributions(self.f, self.samps, 1, num_iter=100)\n npt.assert_allclose(known, test, 5)\n\n def test_compare_distributions_matched_mode(self):\n # Sets the known value\n known_mean = 0.162195\n known_std = 0.121887\n known_shape = (100,)\n # Tests the sample value\n test = _compare_distributions(self.f, self.pop, self.num_p,\n mode='matched', num_iter=100,\n bootstrap=False)\n npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)\n npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)\n self.assertEqual(known_shape, test.shape)\n\n def test_compare_distributions_multiple_returns(self):\n known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])\n\n def f(x):\n return np.array([1, 2, 3])\n\n test = _compare_distributions(f, self.pop, 3, mode='matched',\n num_iter=3, bootstrap=False)\n npt.assert_array_equal(known, test)\n\n def test_compare_distributions_bootstrap_more(self):\n known = np.array([-76.10736642, -82.08492357, -74.45798197,\n -72.0498448, -82.54530595])\n test = _compare_distributions(self.f, self.pop, self.num_p,\n counts=1000,\n num_iter=5)\n npt.assert_almost_equal(known, np.log10(test), 5)\n\n def test_check_subsample_power_inputs_draw_mode_error(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=[np.ones((2)), np.ones((5))],\n counts=self.counts,\n draw_mode=\"Alice Price Healy\")\n\n def test_check_subsample_power_inputs_matched_mode(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=[np.ones((2)), np.ones((5))],\n counts=self.counts,\n draw_mode=\"matched\")\n\n def test_check_subsample_power_inputs_low_counts(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=self.samps,\n counts=np.arange(-5, 0)\n )\n\n def test_check_subsample_power_inputs_bootstrap_counts(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=[np.ones((3)), np.ones((5))],\n counts=self.counts,\n bootstrap=False)\n\n def test_check_subsample_power_inputs_ratio(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=self.samps,\n counts=self.counts,\n ratio=np.array([1, 2, 3]))\n\n def test_check_subsample_power_inputs_test(self):\n # Defines a test function\n def test(x):\n return 'Hello World!'\n with self.assertRaises(TypeError):\n _check_subsample_power_inputs(test=test,\n samples=self.samps,\n counts=self.counts)\n\n def test_check_subsample_power_inputs_bootstrap_error(self):\n with self.assertRaises(ValueError):\n _check_subsample_power_inputs(test=self.f,\n samples=self.samps,\n counts=np.arange(10, 1000, 10),\n bootstrap=False)\n\n def test_check_sample_power_inputs(self):\n # Defines the know returns\n known_num_p = 1\n known_ratio = np.ones((2))\n # Runs the code for the returns\n test_ratio, test_num_p = \\\n _check_subsample_power_inputs(test=self.f,\n samples=self.samps,\n counts=self.counts,\n )\n # Checks the returns are sane\n self.assertEqual(known_num_p, test_num_p)\n npt.assert_array_equal(known_ratio, test_ratio)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.random.seed", "numpy.testing.assert_almost_equal", "numpy.testing.assert_array_equal", "numpy.ones", "scipy.stats.kruskal", "numpy.arange", "numpy.log10", "numpy.vstack" ] ]
nocmok/ridesharing-dispatcher
[ "cb982028250b3131479797826ada57062d10c5b6" ]
[ "playground/benchmarks/metrics_per_n_vehicles.py" ]
[ "import numpy as np\nimport pandas\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\nimport sys\n\nscript_dir=sys.path[0]\nds_name=sys.argv[1]\ncsv_path=script_dir + \"/\" + ds_name + \"/\" + ds_name + \"_metrics_per_n_vehicles.csv\"\npng_plot_path=script_dir + \"/\" + ds_name + \"/\" + ds_name + \"_metrics_per_n_vehicles.png\"\ndataset = read_csv(csv_path)\n\n# 1. service rate\n# 2. distance savings relative to personal transport\n# 3. distance savings relative to taxi\n# 4. avg processing time\n\nfig, axs = plt.subplots(4)\nfig.set_size_inches(8,8)\naxs[0].set_title(\"service rate (%)\")\naxs[1].set_title(\"distance savings (%) relative to personal transport\")\naxs[2].set_title(\"distance savings (%) relative to taxi\")\naxs[3].set_title(\"request processing time average (ms)\")\n\nfor algo in np.unique(dataset[\"algo\"].values):\n\tsamples = dataset[dataset[\"algo\"] == algo][\"n_vehicles\"].values\n\n\tservice_rate = dataset[dataset[\"algo\"] == algo][\"service_rate\"].values * 100\n\tline, = axs[0].plot(samples, service_rate)\n\n\tdistance_savings = dataset[dataset[\"algo\"] == algo][\"distance_savings\"].values * 100\n\tline, = axs[1].plot(samples, distance_savings)\n\n\tdistance_savings_taxi = (1 - dataset[dataset[\"algo\"] == algo][\"total_travelled_distance\"].values / dataset[dataset[\"algo\"] == \"TaxiSolver\"][\"total_travelled_distance\"].values) * 100\n\tline, = axs[2].plot(samples, distance_savings_taxi)\n\n\tprocessing_time_avg = dataset[dataset[\"algo\"] == algo][\"processing_time_avg\"].values\n\tline, = axs[3].plot(samples, processing_time_avg)\n\tline.set_label(algo)\n\nplt.subplots_adjust(hspace=0.4, top=1, right=0.8)\nhandles, labels = axs[3].get_legend_handles_labels()\nfig.legend(handles, labels, loc='upper right')\nplt.savefig(png_plot_path, dpi=300, bbox_inches=\"tight\", pad_inches=0.2)" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "pandas.read_csv", "matplotlib.pyplot.subplots_adjust", "numpy.unique" ] ]
bhaney/endjinn
[ "14b2da858892d685903a11c223b963339e4a8aa3" ]
[ "endjinn/state_block/graph.py" ]
[ "import numpy as np\nimport networkx as nx\n\n\nclass Graph(object):\n \"\"\"\n Class for representing state graph with valid transitions.\n \"\"\"\n def __init__(self, node_labels=None, edgelist=None, stateful=False, weighted=False):\n \"\"\"\n\n :param node_labels: List of strings.\n :param edgelist: List of tuples/lists of integers, in (source, destination) format.\n :param stateful: Bool. False is graph-structured data, True is state graph.\n :param weighted: Bool. Denotes whether edges have weights or not.\n\n Example:\n g = Graph(node_labels=['sitting', 'sleeping', 'running'], edgelist=[(0, 1), (1, 0), (1, 2)], stateful=True)\n \"\"\"\n self.stateful = stateful\n self.weighted = weighted\n self.graph = None\n self.current_state = None\n self.n_nodes = None\n\n if edgelist and node_labels:\n self.from_edgelist(edgelist, node_labels)\n\n def from_edgelist(self, edgelist, node_labels=None):\n \"\"\"\n\n :param edgelist: (source, dest) numbers.\n :param node_labels: List of strings. Labels for each node.\n :return:\n \"\"\"\n if node_labels:\n if np.max(edgelist) != len(node_labels) - 1:\n raise Exception(\"Node labels must be of same size as number of nodes.\")\n\n self.graph = nx.DiGraph()\n\n for i in range(len(node_labels)):\n self.graph.add_node(i, attr_dict={\"label\": node_labels[i]})\n\n self.n_nodes = len(node_labels)\n\n for thing in edgelist:\n self.graph.add_edge(thing[0], thing[1])\n\n def is_valid_edge(self, source, dest):\n return self.graph.has_edge(source, dest)\n\n def set_initial_state(self, state):\n self.current_state = state\n\n def transition(self, dest):\n \"\"\"\n :param dest: String. Node label of state to which to transition.\n :return:\n \"\"\"\n if not self.stateful:\n raise Exception(\"stateful flag is false on this class instance. Set stateful=True during instantiaion to \"\n \"enable state transitions.\")\n else:\n if not self.current_state:\n raise Exception(\"Call set_initial_state before attempting transition to another state.\")\n\n if self.is_valid_edge(self.current_state, dest):\n self.current_state = dest\n\n def get_current_state_as_one_hot(self):\n vec = np.zeros(self.n_nodes)\n vec[self.current_state] = 1\n\n return vec\n\n\n" ]
[ [ "numpy.max", "numpy.zeros" ] ]
shuyangli94/PyTorch_Speaker_Verification
[ "22c4457398d1eae4488f15fbbb524c38afb1f028", "22c4457398d1eae4488f15fbbb524c38afb1f028" ]
[ "utils.py", "train_speech_embedder.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 16:56:19 2018\n\n@author: harry\n\"\"\"\nimport librosa\nimport numpy as np\nimport torch\nimport torch.autograd as grad\nimport torch.nn.functional as F\n\nfrom hparam import hparam as hp\n\ndef get_device(use_cuda=True):\n cuda_available = torch.cuda.is_available()\n use_cuda = use_cuda and cuda_available\n\n # Prompt user to use CUDA if available\n if cuda_available and not use_cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n # Set device\n device = torch.device('cuda:0' if use_cuda else 'cpu')\n\n print('Device: {}'.format(device))\n if use_cuda:\n print('Using CUDA {}'.format(torch.cuda.current_device()))\n return use_cuda, device\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef get_centroids(embeddings):\n centroids = []\n for speaker in embeddings:\n centroid = 0\n for utterance in speaker:\n centroid = centroid + utterance\n centroid = centroid/len(speaker)\n centroids.append(centroid)\n centroids = torch.stack(centroids)\n return centroids\n\ndef get_centroid(embeddings, speaker_num, utterance_num):\n centroid = 0\n for utterance_id, utterance in enumerate(embeddings[speaker_num]):\n if utterance_id == utterance_num:\n continue\n centroid = centroid + utterance\n centroid = centroid/(len(embeddings[speaker_num])-1)\n return centroid\n\ndef get_cossim(embeddings, centroids):\n # Calculates cosine similarity matrix. Requires (N, M, feature) input\n cossim = torch.zeros(embeddings.size(0),embeddings.size(1),centroids.size(0))\n for speaker_num, speaker in enumerate(embeddings):\n for utterance_num, utterance in enumerate(speaker):\n for centroid_num, centroid in enumerate(centroids):\n if speaker_num == centroid_num:\n centroid = get_centroid(embeddings, speaker_num, utterance_num)\n output = F.cosine_similarity(utterance,centroid,dim=0)+1e-6\n cossim[speaker_num][utterance_num][centroid_num] = output\n return cossim\n\ndef calc_loss(sim_matrix):\n # Calculates loss from (N, M, K) similarity matrix\n per_embedding_loss = torch.zeros(sim_matrix.size(0), sim_matrix.size(1))\n for j in range(len(sim_matrix)):\n for i in range(sim_matrix.size(1)):\n per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - ((torch.exp(sim_matrix[j][i]).sum()+1e-6).log_()))\n loss = per_embedding_loss.sum() \n return loss, per_embedding_loss\n\ndef normalize_0_1(values, max_value, min_value):\n normalized = np.clip((values - min_value) / (max_value - min_value), 0, 1)\n return normalized\n\ndef mfccs_and_spec(wav_file, wav_process = False, calc_mfccs=False, calc_mag_db=False): \n sound_file, _ = librosa.core.load(wav_file, sr=hp.data.sr)\n window_length = int(hp.data.window*hp.data.sr)\n hop_length = int(hp.data.hop*hp.data.sr)\n duration = hp.data.tisv_frame * hp.data.hop + hp.data.window\n \n # Cut silence and fix length\n if wav_process == True:\n sound_file, index = librosa.effects.trim(sound_file, frame_length=window_length, hop_length=hop_length)\n length = int(hp.data.sr * duration)\n sound_file = librosa.util.fix_length(sound_file, length)\n \n spec = librosa.stft(sound_file, n_fft=hp.data.nfft, hop_length=hop_length, win_length=window_length)\n mag_spec = np.abs(spec)\n \n mel_basis = librosa.filters.mel(hp.data.sr, hp.data.nfft, n_mels=hp.data.nmels)\n mel_spec = np.dot(mel_basis, mag_spec)\n \n mag_db = librosa.amplitude_to_db(mag_spec)\n #db mel spectrogram\n mel_db = librosa.amplitude_to_db(mel_spec).T\n \n mfccs = None\n if calc_mfccs:\n mfccs = np.dot(librosa.filters.dct(40, mel_db.shape[0]), mel_db).T\n \n return mfccs, mel_db, mag_db\n\nif __name__ == \"__main__\":\n w = grad.Variable(torch.tensor(1.0))\n b = grad.Variable(torch.tensor(0.0))\n embeddings = torch.tensor([[0,1,0],[0,0,1], [0,1,0], [0,1,0], [1,0,0], [1,0,0]]).to(torch.float).reshape(3,2,3)\n centroids = get_centroids(embeddings)\n cossim = get_cossim(embeddings, centroids)\n sim_matrix = w*cossim + b\n loss, per_embedding_loss = calc_loss(sim_matrix)\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 5 21:49:16 2018\n\n@author: harry\n\"\"\"\n\nimport os\nimport random\nimport time\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom hparam import hparam as hp\nfrom data_load import SpeakerDatasetTIMIT, SpeakerDatasetTIMITPreprocessed\nfrom speech_embedder_net import SpeechEmbedder, GE2ELoss, get_centroids, get_cossim\n\ndef train(model_path):\n device = torch.device(hp.device)\n \n if hp.data.data_preprocessed:\n train_dataset = SpeakerDatasetTIMITPreprocessed(is_test=False)\n else:\n train_dataset = SpeakerDatasetTIMIT(is_test=False)\n train_loader = DataLoader(train_dataset, batch_size=hp.train.N, shuffle=True, num_workers=hp.train.num_workers, drop_last=True) \n \n embedder_net = SpeechEmbedder().to(device)\n if hp.train.restore:\n embedder_net.load_state_dict(torch.load(model_path))\n ge2e_loss = GE2ELoss(device)\n #Both net and loss have trainable parameters\n optimizer = torch.optim.SGD([\n {'params': embedder_net.parameters()},\n {'params': ge2e_loss.parameters()}\n ], lr=hp.train.lr)\n \n os.makedirs(hp.train.checkpoint_dir, exist_ok=True)\n \n embedder_net.train()\n iteration = 0\n for e in range(hp.train.epochs):\n total_loss = 0\n for batch_id, mel_db_batch in enumerate(train_loader): \n mel_db_batch = mel_db_batch.to(device)\n \n mel_db_batch = torch.reshape(mel_db_batch, (hp.train.N*hp.train.M, mel_db_batch.size(2), mel_db_batch.size(3)))\n perm = random.sample(range(0, hp.train.N*hp.train.M), hp.train.N*hp.train.M)\n unperm = list(perm)\n for i,j in enumerate(perm):\n unperm[j] = i\n mel_db_batch = mel_db_batch[perm]\n #gradient accumulates\n optimizer.zero_grad()\n \n embeddings = embedder_net(mel_db_batch)\n embeddings = embeddings[unperm]\n embeddings = torch.reshape(embeddings, (hp.train.N, hp.train.M, embeddings.size(1)))\n \n #get loss, call backward, step optimizer\n loss = ge2e_loss(embeddings) #wants (Speaker, Utterances, embedding)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(embedder_net.parameters(), 3.0)\n torch.nn.utils.clip_grad_norm_(ge2e_loss.parameters(), 1.0)\n optimizer.step()\n \n total_loss = total_loss + loss\n iteration += 1\n if (batch_id + 1) % hp.train.log_interval == 0:\n mesg = \"{0}\\tEpoch:{1}[{2}/{3}],Iteration:{4}\\tLoss:{5:.4f}\\tTLoss:{6:.4f}\\t\\n\".format(time.ctime(), e+1,\n batch_id+1, len(train_dataset)//hp.train.N, iteration,loss, total_loss / (batch_id + 1))\n print(mesg)\n if hp.train.log_file is not None:\n with open(hp.train.log_file,'a') as f:\n f.write(mesg)\n \n if hp.train.checkpoint_dir is not None and (e + 1) % hp.train.checkpoint_interval == 0:\n embedder_net.eval().cpu()\n ckpt_model_filename = \"ckpt_epoch_\" + str(e+1) + \"_batch_id_\" + str(batch_id+1) + \".pth\"\n ckpt_model_path = os.path.join(hp.train.checkpoint_dir, ckpt_model_filename)\n torch.save(embedder_net.state_dict(), ckpt_model_path)\n embedder_net.to(device).train()\n\n #save model\n embedder_net.eval().cpu()\n save_model_filename = \"final_epoch_\" + str(e + 1) + \"_batch_id_\" + str(batch_id + 1) + \".model\"\n save_model_path = os.path.join(hp.train.checkpoint_dir, save_model_filename)\n torch.save(embedder_net.state_dict(), save_model_path)\n \n print(\"\\nDone, trained model saved at\", save_model_path)\n\ndef test(model_path):\n \n if hp.data.data_preprocessed:\n test_dataset = SpeakerDatasetTIMITPreprocessed(is_test=True)\n else:\n test_dataset = SpeakerDatasetTIMIT(is_test=True)\n test_loader = DataLoader(test_dataset, batch_size=hp.test.N, shuffle=True, num_workers=hp.test.num_workers, drop_last=True)\n \n embedder_net = SpeechEmbedder()\n embedder_net.load_state_dict(torch.load(model_path))\n embedder_net.eval()\n \n avg_EER = 0\n for e in range(hp.test.epochs):\n batch_avg_EER = 0\n for batch_id, mel_db_batch in enumerate(test_loader):\n assert hp.test.M % 2 == 0\n enrollment_batch, verification_batch = torch.split(mel_db_batch, int(mel_db_batch.size(1)/2), dim=1)\n \n enrollment_batch = torch.reshape(enrollment_batch, (hp.test.N*hp.test.M//2, enrollment_batch.size(2), enrollment_batch.size(3)))\n verification_batch = torch.reshape(verification_batch, (hp.test.N*hp.test.M//2, verification_batch.size(2), verification_batch.size(3)))\n \n perm = random.sample(range(0,verification_batch.size(0)), verification_batch.size(0))\n unperm = list(perm)\n for i,j in enumerate(perm):\n unperm[j] = i\n \n verification_batch = verification_batch[perm]\n enrollment_embeddings = embedder_net(enrollment_batch)\n verification_embeddings = embedder_net(verification_batch)\n verification_embeddings = verification_embeddings[unperm]\n \n enrollment_embeddings = torch.reshape(enrollment_embeddings, (hp.test.N, hp.test.M//2, enrollment_embeddings.size(1)))\n verification_embeddings = torch.reshape(verification_embeddings, (hp.test.N, hp.test.M//2, verification_embeddings.size(1)))\n \n enrollment_centroids = get_centroids(enrollment_embeddings)\n \n sim_matrix = get_cossim(verification_embeddings, enrollment_centroids)\n \n # calculating EER\n diff = 1; EER=0; EER_thresh = 0; EER_FAR=0; EER_FRR=0\n \n for thres in [0.01*i+0.5 for i in range(50)]:\n sim_matrix_thresh = sim_matrix>thres\n \n FAR = (sum([sim_matrix_thresh[i].float().sum()-sim_matrix_thresh[i,:,i].float().sum() for i in range(int(hp.test.N))])\n /(hp.test.N-1.0)/(float(hp.test.M/2))/hp.test.N)\n \n FRR = (sum([hp.test.M/2-sim_matrix_thresh[i,:,i].float().sum() for i in range(int(hp.test.N))])\n /(float(hp.test.M/2))/hp.test.N)\n \n # Save threshold when FAR = FRR (=EER)\n if diff> abs(FAR-FRR):\n diff = abs(FAR-FRR)\n EER = (FAR+FRR)/2\n EER_thresh = thres\n EER_FAR = FAR\n EER_FRR = FRR\n batch_avg_EER += EER\n print(\"\\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)\"%(EER,EER_thresh,EER_FAR,EER_FRR))\n avg_EER += batch_avg_EER/(batch_id+1)\n avg_EER = avg_EER / hp.test.epochs\n print(\"\\n EER across {0} epochs: {1:.4f}\".format(hp.test.epochs, avg_EER))\n \nif __name__==\"__main__\":\n import glob\n import os\n import argparse\n\n # Set up parser\n parser = argparse.ArgumentParser(\n description='Train speech embeddings.')\n parser.add_argument(\"--test\", '-T', action='store_true', default=False,\n help=\"Test model\")\n args = parser.parse_args()\n\n # Parse arguments\n is_test = args.test\n\n # Load latest model\n model_dir = hp.model.model_path\n latest_model_path = max(glob.glob(os.path.join(model_dir, '*')), key=os.path.getctime)\n\n if is_test:\n print('============ TESTING ==============')\n test(latest_model_path)\n else:\n print('============ TRAINING ==============')\n train(latest_model_path)\n\n" ]
[ [ "torch.device", "numpy.dot", "torch.stack", "torch.cuda.current_device", "torch.cuda.is_available", "torch.tensor", "numpy.abs", "numpy.clip", "torch.nn.functional.cosine_similarity", "torch.exp" ], [ "torch.device", "torch.utils.data.DataLoader", "torch.load" ] ]
xuanyuanXIV/carveme
[ "889f4d06d000f90711d92a9fada6bd413d353263" ]
[ "carveme/reconstruction/eggnog.py" ]
[ "import pandas as pd\n\n\ndef split_and_expand(df, col, sep):\n split_col = df[col].str.split(sep).apply(pd.Series, 1).stack()\n split_col.index = split_col.index.droplevel(-1)\n split_col.name = col\n df = df.drop(col, axis=1).join(split_col)\n df.reset_index(drop=True, inplace=True)\n return df\n\n\ndef load_eggnog_data(filename, drop_unannotated=True, drop_unused_cols=True):\n \"\"\" Load and parse an eggnog results for new eggnog-mapper version.\n\n Args:\n filename (str): input file\n drop_unannotated (bool): remove entries without BiGG annotation (default: True)\n drop_unused_cols (bool): remove columns not used for model carving (default: True)\n\n Returns:\n pandas.DataFrame: eggnog data\n\n \"\"\"\n columns = ['query_gene', 'seed_eggNOG_ortholog', 'evalue', 'score',\n 'predicted_gene_name', 'GO_terms', 'KEGG_pathways', 'Annotation_tax_scope', 'OGs',\n 'bestOG_evalue_score', 'COG_cat', 'eggNOG_annotation', 'BiGG_gene']\n\n data = pd.read_csv(filename, comment='#', sep='\\t', names=columns)\n\n if drop_unannotated:\n data.dropna(subset=['BiGG_gene'], inplace=True)\n\n if drop_unused_cols:\n data = data[['query_gene', 'BiGG_gene', 'score']]\n\n data = split_and_expand(data, 'BiGG_gene', ',')\n\n return data\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
Froskekongen/oslodatascience-rl
[ "f265d50ded86075787d2696719d543615c941e29" ]
[ "common.py" ]
[ "import logging\nimport pandas as pd\nimport time\n\nclass LogProgress(object):\n '''\n Class for logging progress of RL agents\n logfile: file name of log file.\n console: output to console.\n level: logging level.\n name: name of logger. (not really relevant as of now...)\n '''\n def __init__(self, logfile, console=False, level=logging.INFO, name=None):\n self.logfile = logfile\n self.level = level\n if name is None: name = __name__\n self.logger = logging.getLogger(name)\n self.logger.setLevel(level)\n \n formatter = logging.Formatter('%(asctime)s;%(message)s')\n self.fh = logging.FileHandler(logfile)\n self.fh.setFormatter(formatter)\n self.logger.addHandler(self.fh)\n\n if console:\n self.ch = logging.StreamHandler()\n self.ch.setFormatter(formatter)\n self.logger.addHandler(self.ch)\n\n def info(self, msg):\n '''\n Use logger info method to write to logfile (and console).\n msg: string message\n '''\n self.logger.info(msg)\n # if msg.__class__ is str:\n # self.logger.info(msg)\n # elif hasattr(msg, '__iter__'):\n # self.logger.info(';'.join(msg))\n # else:\n # raise ValueError('msg should be string or iterable')\n\nclass LogPong(LogProgress):\n '''\n Class for logging progress in pong game.\n logfile: file name of log file.\n console: output to console.\n **kwargs: arguments passed to LogProgress.\n '''\n def __init__(self, logfile, console=False, **kwargs):\n super().__init__(logfile, console, **kwargs)\n\n def log(self, episode, rewardSum):\n '''\n Function for writing to log file (and console).\n episole: episode number.\n rewardSum: sum of rewards in episode.\n '''\n msg = '%d;%f' % (episode, rewardSum)\n self.info(msg)\n\ndef readLogPong(filename, **kwargs):\n '''\n Get pong log file (LogPong) as a dataframe.\n filename: file name of log file.\n **kwargs: arguments passed to pd.read_csv.\n '''\n df = pd.read_csv(filename, sep=';', names=('time', 'episode', 'rewardSum'), **kwargs)\n df.time = pd.to_datetime(df.time)\n return df\n\n\nclass StreamLog(object):\n '''A way to read a file as it is written.\n fileName: file name\n start: 0 for beginnning of file, 2 for end of file.\n sleepSec: number of seconds to sleep when we reach end of file.\n '''\n def __init__(self, fileName, start=0, sleepSec=2):\n self.fileName = fileName\n self.handle = open(self.fileName, 'r')\n self.start = start\n self.sleepSec=sleepSec\n\n def close(self):\n '''Close file handle.'''\n self.handle.close()\n\n def streamContinuously(self):\n '''Stream continuously. \n Run a for loop over <object>.streamContinuously()'''\n while True:\n line = self.handle.readline()\n if not line:\n time.sleep(self.sleepSec)\n continue\n yield line\n\n def streamAvailable(self):\n '''Read available file, and break. \n Continues where the previous read ended.\n Run a for loop over <object>.streamAvailable()'''\n line = 'something'\n while line:\n line = self.handle.readline()\n yield line\n\n @staticmethod\n def removeNewLineCharacter(line):\n return line[:-1]\n\n \ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.to_datetime", "pandas.read_csv" ] ]
nft-appraiser/nft-appraiser-api
[ "6d6495049851afd3d9bfc6969d0e1c9bc430dc81" ]
[ "code/taskA/views_utils/utils.py" ]
[ "import os\nimport gc\nimport time\nimport imghdr\nfrom io import BytesIO\nfrom typing import List, Optional\n\nimport requests\nimport numpy as np\nimport pandas as pd\nfrom tqdm.notebook import tqdm # if you don't use IPython Kernel like jupyter, you should change \"tqdm.notebook\" to \"tqdm\"\nfrom cairosvg import svg2png\nfrom PIL import Image\nimport cv2\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\nimport tensorflow.keras.models as models\nimport tensorflow.keras.losses as losses\nimport tensorflow.keras.optimizers as optim\nimport tensorflow.keras.activations as activations\nfrom tensorflow.keras.utils import Sequence\nimport tensorflow.keras.callbacks as callbacks\nfrom tensorflow.keras.wrappers.scikit_learn import KerasRegressor\nimport cloudpickle\n\ndef is_image(url) -> bool:\n \"\"\"\n Determine if it is an image of png or jpeg.\n Parameters\n ----------\n url : str\n Target url.\n Returns\n -------\n True or False: Return True if this url content is an image of png or jpeg else returns False.\n \"\"\"\n img = requests.get(url).content\n img_type = imghdr.what(None, h=img)\n\n if img_type in ['png', 'jpeg']:\n return True\n else:\n return False\n\n\ndef is_svg(url) -> bool:\n \"\"\"\n Determine if it is an image of svg.\n Parameters\n ----------\n url : str\n Target url.\n Returns\n -------\n True or False: Return True if this url content is an image of svg else returns False.\n \"\"\"\n if url.endswith(\".svg\"):\n return True\n else:\n return False\n\n\ndef save_png(url, file_name) -> None:\n \"\"\"\n Save an image of png or jpeg as a png file. \n Parameters\n ----------\n url : str\n Target url.\n file_name : str\n The file path of a saved png file.\n Returns\n -------\n None\n \"\"\"\n img = requests.get(url).content\n img = Image.open(BytesIO(img)).convert(\"RGBA\")\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(file_name, img, [int(cv2.IMWRITE_PNG_COMPRESSION), 3])\n\n\ndef save_svg(url, file_name) -> None:\n \"\"\"\n Save an image of svg as an svg file. The content that is svg data of animation can't save. \n Parameters\n ----------\n url : str\n Target url.\n file_name : str\n The file path of a saved png file.\n Returns\n -------\n None\n \"\"\"\n img = requests.get(url).content\n img = svg2png(bytestring=img)\n img = Image.open(BytesIO(img)).convert(\"RGBA\")\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(file_name, img)\n\ndef load_model(file_name: str):\n \"\"\"\n Load the model file of pickle.\n Parameters\n ----------\n file_name : str\n The absolute path of the model file.\n Returns\n -------\n model : tf.keras.models.Model\n Trained model object.\n \"\"\"\n with open(file_name, mode='rb') as f:\n model = cloudpickle.load(f)\n\n return model\n\ndef get_img(asset_contract_address: str, token_id: str):\n \"\"\"\n Get the asset image data.\n\n Parameters\n ----------\n asset_contract_address : str\n The string of asset contract address.\n token_id : str\n The string of token id.\n\n Returns\n -------\n img : np.ndarray \n Get image from opensea API data.\n \"\"\"\n\n if type(token_id) != str:\n token_id = str(token_id)\n url = f\"https://api.opensea.io/api/v1/asset/{asset_contract_address}/{token_id}/\"\n headers = {\"Accept\": \"application/json\"}\n\n response = requests.request(\"GET\", url, headers=headers)\n\n data = response.json()\n asset_df = pd.json_normalize(data)\n\n img_url = asset_df['image_url'].values[0]\n img = requests.get(img_url).content\n if is_svg(img_url):\n img = svg2png(bytestring=img)\n img = Image.open(BytesIO(img)).convert(\"RGBA\")\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGR)\n\n return img\n" ]
[ [ "numpy.array", "pandas.json_normalize" ] ]
lreis2415/PyGeoC
[ "ad2b3166a58ed5d0a8005e33873bd861305e514f" ]
[ "pygeoc/raster.py" ]
[ "\n# -*- coding: utf-8 -*-\n\"\"\"Raster Utility Class.\n用于创建栅格数据对象并进行简单操作,如另存为ASCII格式栅格、栅格重分类等。\n\n author: Liangjun Zhu\n\n changlog:\n\n - 12-04-12 jz - origin version.\n - 16-07-01 lj - reorganized for pygeoc.\n - 17-06-25 lj - check by pylint and reformat by Google style.\n - 17-07-20 lj - add GDALDataType dict, and WhiteBox GAT D8 code.\n - 17-11-21 yw - add raster_binarization, raster_erosion, raster_dilation, openning, closing.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom future.utils import iteritems\nfrom builtins import range\n\nimport os\nimport subprocess\nfrom io import open\n\nimport numpy\nfrom osgeo.gdal import GDT_CInt16, GDT_CInt32, GDT_CFloat32, GDT_CFloat64\nfrom osgeo.gdal import GDT_UInt32, GDT_Int32, GDT_Float32, GDT_Float64\nfrom osgeo.gdal import GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_Int16\nfrom osgeo.gdal import GetDriverByName as gdal_GetDriverByName\nfrom osgeo.gdal import Open as gdal_Open\nfrom osgeo.ogr import Open as ogr_Open\nfrom osgeo.osr import SpatialReference as osr_SpatialReference\n\nfrom pygeoc.utils import MathClass, UtilClass, FileClass, DEFAULT_NODATA, DELTA\nfrom pygeoc.utils import is_string\n\nGDALDataType = {0: GDT_Unknown, # Unknown or unspecified type\n 1: GDT_Byte, # Eight bit unsigned integer\n 2: GDT_UInt16, # Sixteen bit unsigned integer\n 3: GDT_Int16, # Sixteen bit signed integer\n 4: GDT_UInt32, # Thirty two bit unsigned integer\n 5: GDT_Int32, # Thirty two bit signed integer\n 6: GDT_Float32, # Thirty two bit floating point\n 7: GDT_Float64, # Sixty four bit floating point\n 8: GDT_CInt16, # Complex Int16\n 9: GDT_CInt32, # Complex Int32\n 10: GDT_CFloat32, # Complex Float32\n 11: GDT_CFloat64 # Complex Float64\n }\n\"\"\"dict: GDAL DataType\n\n +--------------+----------------+---------------------------------+\n | Type | GDAL Datatype | Description |\n +==============+================+=================================+\n | 0 | GDT_Unknown | Unknown or unspecified type |\n +--------------+----------------+---------------------------------+\n | 1 | GDT_Byte | Eight bit unsigned integer |\n +--------------+----------------+---------------------------------+\n | 2 | GDT_UInt16 | Sixteen bit unsigned integer |\n +--------------+----------------+---------------------------------+\n | 3 | GDT_Int16 | Sixteen bit signed integer |\n +--------------+----------------+---------------------------------+\n | 4 | GDT_UInt32 | Thirty two bit unsigned integer |\n +--------------+----------------+---------------------------------+\n | 5 | GDT_Int32 | Thirty two bit signed integer |\n +--------------+----------------+---------------------------------+\n | 6 | GDT_Float32 | Thirty two bit floating point |\n +--------------+----------------+---------------------------------+\n | 7 | GDT_Float64 | Sixty four bit floating point |\n +--------------+----------------+---------------------------------+\n | 8 | GDT_CInt16 | Complex Int16 |\n +--------------+----------------+---------------------------------+\n | 9 | GDT_CInt32 | Complex Int32 |\n +--------------+----------------+---------------------------------+\n | 10 | GDT_CFloat32 | Complex Float32 |\n +--------------+----------------+---------------------------------+\n | 11 | GDT_CFloat64 | Complex Float64 |\n +--------------+----------------+---------------------------------+\n \n\"\"\"\n\n\nclass Raster(object):\n \"\"\"Basic Raster Class.\n\n Args:\n n_rows: row count.\n n_cols: col count.\n data: 2D array data.\n nodata_value: NODATA value, None as default.\n geotransform: geographic transformation, None as default.\n srs: coordinate system, None as default.\n datatype(:obj:`pygeoc.raster.GDALDataType`): Raster datatype.\n\n Attributes:\n nRows (int): Row number.\n nCols (int): Column number.\n data (:obj:`numpy.array`): 2D array raster data.\n noDataValue (float): NoData value.\n geotrans (list): geographic transformation list.\n srs (:obj:`osgeo.osr.SpatialReference`): Spatial reference.\n dataType (:obj:`pygeoc.raster.GDALDataType`): Raster datatype.\n dx (float): cell size.\n xMin (float): left X coordinate.\n xMax (float): right X coordinate.\n yMin (float): lower Y coordinate.\n yMax (float): upper Y coordinate.\n validZone (:obj:`numpy.array`): 2D boolean array that NoDataValue is False.\n validValues (:obj:`numpy.array`): 2D raster array with None in NoDataValue.\n\n Examples:\n The common usage is read raster data from a raster file (e.g., geotiff) and get the\n Raster instance.\n\n >>> from pygeoc.raster import RasterUtilClass\n >>> rst_file = r'tests/data/Jamaica_dem.tif'\n >>> rst_obj = RasterUtilClass.read_raster(rst_file)\n >>> print(rst_obj)\n <pygeoc.raster.Raster object at 0x...>\n\n See Also:\n :func:`pygeoc.raster.RasterUtilClass.read_raster`\n \"\"\"\n\n def __init__(self, n_rows, n_cols, data, nodata_value=None, geotransform=None,\n srs=None, datatype=GDT_Float32):\n \"\"\"Constructor.\"\"\"\n self.nRows = n_rows\n self.nCols = n_cols\n self.data = numpy.copy(data)\n self.noDataValue = nodata_value\n self.geotrans = geotransform\n self.srs = srs\n self.dataType = datatype\n\n self.dx = geotransform[1]\n self.xMin = geotransform[0]\n self.xMax = geotransform[0] + n_cols * geotransform[1]\n self.yMax = geotransform[3]\n self.yMin = geotransform[3] + n_rows * geotransform[5]\n self.validZone = self.data != self.noDataValue\n self.validValues = numpy.where(self.validZone, self.data, numpy.nan)\n\n def get_type(self):\n \"\"\"get datatype as GDALDataType.\n\n Returns:\n dataType\n\n See Also:\n :obj:`pygeoc.raster.GDALDataType`\n \"\"\"\n assert self.dataType in GDALDataType\n return GDALDataType.get(self.dataType)\n\n def get_average(self):\n \"\"\"Get average exclude NODATA.\"\"\"\n return numpy.nanmean(self.validValues)\n\n def get_max(self):\n \"\"\"Get maximum exclude NODATA.\"\"\"\n return numpy.nanmax(self.validValues)\n\n def get_min(self):\n \"\"\"Get minimum exclude NODATA.\"\"\"\n return numpy.nanmin(self.validValues)\n\n def get_std(self):\n \"\"\"Get Standard Deviation exclude NODATA.\"\"\"\n return numpy.nanstd(self.validValues)\n\n def get_sum(self):\n \"\"\"Get sum exclude NODATA.\"\"\"\n return numpy.nansum(self.validValues)\n\n def get_value_by_row_col(self, row, col):\n \"\"\"Get raster value by (row, col).\n\n Args:\n row: row number.\n col: col number.\n\n Returns:\n raster value, None if the input are invalid.\n \"\"\"\n if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n raise ValueError(\"The row or col must be >=0 and less than \"\n \"nRows (%d) or nCols (%d)!\" % (self.nRows, self.nCols))\n else:\n value = self.data[int(round(row))][int(round(col))]\n if value == self.noDataValue:\n return None\n else:\n return value\n\n def get_value_by_xy(self, x, y):\n \"\"\"Get raster value by xy coordinates.\n\n Args:\n x: X Coordinate.\n y: Y Coordinate.\n\n Returns:\n raster value, None if the input are invalid.\n \"\"\"\n if x < self.xMin or x > self.xMax or y < self.yMin or y > self.yMax:\n return None\n # raise ValueError(\"The x or y value must be within the Min and Max!\")\n else:\n row = self.nRows - int(numpy.ceil((y - self.yMin) / self.dx))\n col = int(numpy.floor((x - self.xMin) / self.dx))\n value = self.data[row][col]\n if value == self.noDataValue:\n return None\n else:\n return value\n\n def get_central_coors(self, row, col):\n \"\"\"Get the coordinates of central grid.\n\n Args:\n row: row number, range from 0 to (nRows - 1).\n col: col number, range from 0 to (nCols - 1).\n\n Returns:\n XY coordinates. If the row or col are invalid, raise ValueError.\n \"\"\"\n if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n raise ValueError(\"The row (%d) or col (%d) must be >=0 and less than \"\n \"nRows (%d) or nCols (%d)!\" % (row, col, self.nRows, self.nCols))\n else:\n tmpx = self.xMin + (col + 0.5) * self.dx\n tmpy = self.yMax - (row + 0.5) * self.dx\n return tmpx, tmpy\n\n\nclass RasterUtilClass(object):\n \"\"\"Utility function to handle raster data.\n\n See Also:\n :class:`pygeoc.raster.raster.Raster`.\n \"\"\"\n\n def __init__(self):\n \"\"\"Empty.\"\"\"\n pass\n\n @staticmethod\n def read_raster(raster_file):\n \"\"\"Read raster by GDAL.\n\n Args:\n raster_file: raster file path.\n\n Returns:\n Raster object.\n \"\"\"\n ds = gdal_Open(raster_file)\n band = ds.GetRasterBand(1)\n data = band.ReadAsArray()\n xsize = band.XSize\n ysize = band.YSize\n\n nodata_value = band.GetNoDataValue()\n geotrans = ds.GetGeoTransform()\n dttype = band.DataType\n\n srs = osr_SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n # print(srs.ExportToProj4())\n if nodata_value is None:\n nodata_value = DEFAULT_NODATA\n band = None\n ds = None\n return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)\n\n @staticmethod\n def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):\n \"\"\"Generate mask data from a given raster data.\n\n Args:\n rasterfile: raster file path.\n outmaskfile: output mask file path.\n\n Returns:\n Raster object of mask data.\n \"\"\"\n raster_r = RasterUtilClass.read_raster(rasterfile)\n xsize = raster_r.nCols\n ysize = raster_r.nRows\n nodata_value = raster_r.noDataValue\n srs = raster_r.srs\n x_min = raster_r.xMin\n y_max = raster_r.yMax\n dx = raster_r.dx\n data = raster_r.data\n\n if not keep_nodata:\n i_min = ysize - 1\n i_max = 0\n j_min = xsize - 1\n j_max = 0\n for i in range(ysize):\n for j in range(xsize):\n if abs(data[i][j] - nodata_value) > DELTA:\n i_min = min(i, i_min)\n i_max = max(i, i_max)\n j_min = min(j, j_min)\n j_max = max(j, j_max)\n\n # print(i_min, i_max, j_min, j_max)\n y_size_mask = i_max - i_min + 1\n x_size_mask = j_max - j_min + 1\n x_min_mask = x_min + j_min * dx\n y_max_mask = y_max - i_min * dx\n else:\n y_size_mask = ysize\n x_size_mask = xsize\n x_min_mask = x_min\n y_max_mask = y_max\n i_min = 0\n j_min = 0\n print('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask))\n\n mask = numpy.zeros((y_size_mask, x_size_mask))\n\n for i in range(y_size_mask):\n for j in range(x_size_mask):\n if abs(data[i + i_min][j + j_min] - nodata_value) > DELTA:\n mask[i][j] = 1\n else:\n mask[i][j] = DEFAULT_NODATA\n\n mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, -dx]\n RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask,\n mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)\n return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs)\n\n @staticmethod\n def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):\n \"\"\"Reclassify raster by given classifier dict.\n\n Args:\n srcfile: source raster file.\n v_dict: classifier dict.\n dstfile: destination file path.\n gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.\n \"\"\"\n src_r = RasterUtilClass.read_raster(srcfile)\n src_data = src_r.data\n dst_data = numpy.copy(src_data)\n if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:\n gdaltype = src_r.dataType\n no_data = src_r.noDataValue\n new_no_data = DEFAULT_NODATA\n if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:\n new_no_data = 0\n if not MathClass.floatequal(new_no_data, src_r.noDataValue):\n if src_r.noDataValue not in v_dict:\n v_dict[src_r.noDataValue] = new_no_data\n no_data = new_no_data\n\n for (k, v) in iteritems(v_dict):\n dst_data[src_data == k] = v\n RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,\n src_r.geotrans, src_r.srs, no_data, gdaltype)\n\n @staticmethod\n def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,\n gdal_type=GDT_Float32):\n \"\"\"Output Raster to GeoTiff format file.\n\n Args:\n f_name: output gtiff file name.\n n_rows: Row count.\n n_cols: Col count.\n data: 2D array data.\n geotransform: geographic transformation.\n srs: coordinate system.\n nodata_value: nodata value.\n gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,\n GDT_Float32 as default.\n \"\"\"\n UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))\n driver = gdal_GetDriverByName(str('GTiff'))\n try:\n ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)\n except Exception:\n print('Cannot create output file %s' % f_name)\n return\n ds.SetGeoTransform(geotransform)\n try:\n ds.SetProjection(srs.ExportToWkt())\n except AttributeError or Exception:\n ds.SetProjection(srs)\n ds.GetRasterBand(1).SetNoDataValue(nodata_value)\n # if data contains numpy.nan, then replaced by nodata_value\n if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),\n numpy.dtype('float')]:\n data = numpy.where(numpy.isnan(data), nodata_value, data)\n ds.GetRasterBand(1).WriteArray(data)\n ds = None\n\n @staticmethod\n def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):\n \"\"\"Output Raster to ASCII file.\n\n Args:\n filename: output ASCII filename.\n data: 2D array data.\n xsize: Col count.\n ysize: Row count.\n geotransform: geographic transformation.\n nodata_value: nodata_flow value.\n \"\"\"\n UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))\n header = 'NCOLS %d\\n' \\\n 'NROWS %d\\n' \\\n 'XLLCENTER %f\\n' \\\n 'YLLCENTER %f\\n' \\\n 'CELLSIZE %f\\n' \\\n 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],\n geotransform[3] - (ysize - 0.5) * geotransform[1],\n geotransform[1], nodata_value)\n\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(header)\n for i in range(0, ysize):\n for j in range(0, xsize):\n f.write('%s\\t' % repr(data[i][j]))\n f.write('\\n')\n f.close()\n\n @staticmethod\n def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):\n \"\"\"Converting Raster format to GeoTIFF.\n\n Args:\n tif: source raster file path.\n geotif: output raster file path.\n change_nodata: change NoDataValue to -9999 or not.\n gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.\n change_gdal_type: If True, output the Float32 data type.\n \"\"\"\n rst_file = RasterUtilClass.read_raster(tif)\n nodata = rst_file.noDataValue\n if change_nodata:\n if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA):\n nodata = DEFAULT_NODATA\n nodata_array = numpy.ones((rst_file.nRows, rst_file.nCols)) * rst_file.noDataValue\n nodata_check = numpy.isclose(rst_file.data, nodata_array)\n rst_file.data[nodata_check] = DEFAULT_NODATA\n # rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA\n gdal_type = rst_file.dataType\n if change_gdal_type:\n gdal_type = GDT_Float32\n RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data,\n rst_file.geotrans, rst_file.srs, nodata,\n gdal_type)\n\n @staticmethod\n def raster_to_asc(raster_f, asc_f):\n \"\"\"Converting Raster format to ASCII raster.\n\n Args:\n raster_f: raster file.\n asc_f: output ASCII file.\n \"\"\"\n raster_r = RasterUtilClass.read_raster(raster_f)\n RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows,\n raster_r.geotrans, raster_r.noDataValue)\n\n @staticmethod\n def raster_statistics(raster_file):\n \"\"\"Get basic statistics of raster data.\n\n Args:\n raster_file: raster file path.\n\n Returns:\n min, max, mean, std.\n \"\"\"\n ds = gdal_Open(raster_file)\n band = ds.GetRasterBand(1)\n minv, maxv, meanv, std = band.ComputeStatistics(False)\n return minv, maxv, meanv, std\n\n @staticmethod\n def split_raster(rs, split_shp, field_name, temp_dir):\n \"\"\"Split raster by given shapefile and field name.\n\n Args:\n rs: origin raster file.\n split_shp: boundary (ESRI Shapefile) used to spilt raster.\n field_name: field name identify the spilt value.\n temp_dir: directory to store the spilt rasters.\n \"\"\"\n UtilClass.rmmkdir(temp_dir)\n ds = ogr_Open(split_shp)\n lyr = ds.GetLayer(0)\n lyr.ResetReading()\n ft = lyr.GetNextFeature()\n while ft:\n cur_field_name = ft.GetFieldAsString(field_name)\n for r in rs:\n cur_file_name = r.split(os.sep)[-1]\n outraster = temp_dir + os.sep + \\\n cur_file_name.replace('.tif', '_%s.tif' %\n cur_field_name.replace(' ', '_'))\n subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp,\n '-crop_to_cutline', '-cwhere',\n \"'%s'='%s'\" % (field_name, cur_field_name), '-dstnodata',\n '-9999'])\n ft = lyr.GetNextFeature()\n ds = None\n\n @staticmethod\n def get_negative_dem(raw_dem, neg_dem):\n \"\"\"Get negative DEM data.\"\"\"\n origin = RasterUtilClass.read_raster(raw_dem)\n max_v = numpy.max(origin.data)\n temp = origin.data < 0\n neg = numpy.where(temp, origin.noDataValue, max_v - origin.data)\n RasterUtilClass.write_gtiff_file(neg_dem, origin.nRows, origin.nCols, neg, origin.geotrans,\n origin.srs, origin.noDataValue, origin.dataType)\n\n @staticmethod\n def mask_raster(in_raster, mask, out_raster):\n \"\"\"\n Mask raster data.\n Args:\n in_raster: list or one raster\n mask: Mask raster data\n out_raster: list or one raster\n\n \"\"\"\n if is_string(in_raster) and is_string(out_raster):\n in_raster = [str(in_raster)]\n out_raster = [str(out_raster)]\n if len(in_raster) != len(out_raster):\n raise RuntimeError('input raster and output raster must have the same size.')\n\n maskr = RasterUtilClass.read_raster(mask)\n rows = maskr.nRows\n cols = maskr.nCols\n maskdata = maskr.data\n temp = maskdata == maskr.noDataValue\n for inr, outr in zip(in_raster, out_raster):\n origin = RasterUtilClass.read_raster(inr)\n if origin.nRows == rows and origin.nCols == cols:\n masked = numpy.where(temp, origin.noDataValue, origin.data)\n else:\n masked = numpy.ones((rows, cols)) * origin.noDataValue\n # TODO, the following loop should be optimized by numpy or numba\n for i in range(rows):\n for j in range(cols):\n if maskdata[i][j] == maskr.noDataValue:\n continue\n # get the center point coordinate of current cell\n tempx, tempy = maskr.get_central_coors(i, j)\n tempv = origin.get_value_by_xy(tempx, tempy)\n if tempv is None:\n continue\n masked[i][j] = tempv\n RasterUtilClass.write_gtiff_file(outr, maskr.nRows, maskr.nCols, masked,\n maskr.geotrans, maskr.srs,\n origin.noDataValue, origin.dataType)\n\n @staticmethod\n def raster_binarization(given_value, rasterfilename):\n \"\"\"Make the raster into binarization.\n\n The opening and closing are based on binary image. Therefore we need to\n make the raster into binarization.\n\n Args:\n given_value: The given value's pixels will be value in 1,\n other pixels will be value in 0.\n rasterfilename: The initial rasterfilena,e.\n\n Returns:\n binary_raster: Raster after binarization.\n \"\"\"\n origin_raster = RasterUtilClass.read_raster(rasterfilename)\n binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)\n return binary_raster\n\n @staticmethod\n def raster_erosion(rasterfile):\n \"\"\"Erode the raster image.\n\n Find the min pixel's value in 8-neighborhood. Then change the compute\n pixel's value into the min pixel's value.\n\n Args:\n rasterfile: input original raster image, type can be filename(string,\n like \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\n Returns:\n erosion_raster: raster image after erosion, type is numpy.ndarray.\n \"\"\"\n if is_string(rasterfile):\n origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n elif isinstance(rasterfile, Raster):\n origin_raster = rasterfile.data\n elif isinstance(rasterfile, numpy.ndarray):\n origin_raster = rasterfile\n else:\n return \"Your rasterfile has a wrong type. Type must be string or \" \\\n \"numpy.array or class Raster in pygeoc.\"\n max_value_raster = origin_raster.max()\n erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n # In order to compute the raster edges, we need to expand the original\n # raster's rows and cols. We need to add the edges whose pixels' value is\n # the max pixel's value in raster.\n add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)\n temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)\n expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n # Erode the raster.\n for i in range(origin_raster.shape[0]):\n for j in range(origin_raster.shape[1]):\n min_pixel_value = max_value_raster\n # Find the min pixel value in the 8-neighborhood.\n for k in range(3):\n for l in range(3):\n if expand_origin_raster[i + k, j + l] <= min_pixel_value:\n min_pixel_value = expand_origin_raster[i + k, j + l]\n # After this loop, we get the min pixel's value of the\n # 8-neighborhood. Then we change the compute pixel's value into\n # the min pixel's value.\n erosion_raster[i, j] = min_pixel_value\n # Return the result.\n return erosion_raster\n\n @staticmethod\n def raster_dilation(rasterfile):\n \"\"\"Dilate the raster image.\n\n Find the max pixel's value in 8-neighborhood. Then change the compute\n pixel's value into the max pixel's value.\n\n Args:\n rasterfile: input original raster image, type can be filename(string,\n like \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\n Returns:\n dilation_raster: raster image after dilation, type is numpy.ndarray.\n \"\"\"\n if is_string(rasterfile):\n origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n elif isinstance(rasterfile, Raster):\n origin_raster = rasterfile.data\n elif isinstance(rasterfile, numpy.ndarray):\n origin_raster = rasterfile\n else:\n return 'Your rasterfile has a wrong type. Type must be string or ' \\\n 'numpy.array or class Raster in pygeoc.'\n min_value_raster = origin_raster.min()\n dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n # In order to compute the raster edges, we need to expand the original\n # raster's rows and cols. We need to add the edges whose pixels' value is\n # the max pixel's value in raster.\n add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster)\n temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n add_col = numpy.full((origin_raster.shape[0] + 2, 1), min_value_raster)\n expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n # Dilate the raster.\n for i in range(origin_raster.shape[0]):\n for j in range(origin_raster.shape[1]):\n max_pixel_value = min_value_raster\n # Find the max pixel value in the 8-neighborhood.\n for k in range(3):\n for l in range(3):\n if expand_origin_raster[i + k, j + l] >= max_pixel_value:\n max_pixel_value = expand_origin_raster[i + k, j + l]\n # After this loop, we get the max pixel's value of the\n # 8-neighborhood. Then we change the compute pixel's value into\n # the max pixel's value.\n dilation_raster[i, j] = max_pixel_value\n # Return the result.\n return dilation_raster\n\n @staticmethod\n def openning(input_rasterfilename, times):\n \"\"\"Do openning.\n\n Openning: Erode firstly, then Dilate.\n\n Args:\n input_rasterfilename: input original raster image filename.\n times: Erode and Dilate times.\n\n Returns:\n openning_raster: raster image after open.\n \"\"\"\n input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n openning_raster = input_raster\n for i in range(times):\n openning_raster = RasterUtilClass.raster_erosion(openning_raster)\n for i in range(times):\n openning_raster = RasterUtilClass.raster_dilation(openning_raster)\n return openning_raster\n\n @staticmethod\n def closing(input_rasterfilename, times):\n \"\"\"Do closing.\n\n Closing: Dilate firstly, then Erode.\n\n Args:\n input_rasterfilename: input original raster image filename.\n times: Erode and Dilate times.\n\n Returns:\n closing_raster: raster image after close.\n \"\"\"\n input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n closing_raster = input_raster\n for i in range(times):\n closing_raster = RasterUtilClass.raster_dilation(closing_raster)\n for i in range(times):\n closing_raster = RasterUtilClass.raster_erosion(closing_raster)\n return closing_raster\n\n\nif __name__ == '__main__':\n # Run doctest in docstrings of Google code style\n # python -m doctest raster.py (only when doctest.ELLIPSIS is not specified)\n # or python raster.py -v\n # or py.test --doctest-modules raster.py\n import doctest\n\n doctest.testmod(optionflags=doctest.ELLIPSIS)\n" ]
[ [ "numpy.max", "numpy.full", "numpy.ceil", "numpy.isnan", "numpy.isclose", "numpy.zeros", "numpy.copy", "numpy.nansum", "numpy.ones", "numpy.nanmin", "numpy.where", "numpy.nanmean", "numpy.floor", "numpy.hstack", "numpy.nanmax", "numpy.dtype", "numpy.vstack", "numpy.nanstd" ] ]
3tew/rox-auto-fishing
[ "7887d563a088533e8326f8cac6572da6e473526c" ]
[ "repositories/render_repo.py" ]
[ "import psutil\nimport ctypes\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport config\n\n\ndef tracking_info(curTime):\n # Initialize FPS variables\n sec = curTime - config.PREV_TIME\n config.PREV_TIME = curTime\n fps = 1 / sec\n count_str = \"%d time\" % config.COUNT\n fps_str = \"FPS: %0.1f\" % fps\n # Right side\n cv2.rectangle(config.FRAME, (config.BOUNDING_BOX['width'] - (\n 65 if not config.HOLD else 40), config.BOUNDING_BOX['width'] - 15), (config.BOUNDING_BOX['width'] - (\n 30 if not config.HOLD else 40) + 70, config.BOUNDING_BOX['height']), (0, 255, 0) if not config.HOLD else (0, 0, 255), -1)\n cv2.putText(config.FRAME, str(\"Fishing...\" if not config.HOLD else \"Stop\"), (config.BOUNDING_BOX['width'] - (\n 60 if not config.HOLD else 35), config.FRAME.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0) if not config.HOLD else (255, 255, 255), 1)\n # Left side\n cv2.putText(config.FRAME, str(\"Limit: \" + str(\"No\" if config.LIMIT == -1 else config.LIMIT)), (0,\n config.FRAME.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(config.FRAME, str(\"Count: \" + count_str), (0,\n config.FRAME.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(config.FRAME, str(\n fps_str), (0, config.FRAME.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)\n\n\ndef center_point():\n cv2.circle(config.FRAME, (0 + int(config.BOUNDING_BOX['width'] / 2), 0 + int(\n config.BOUNDING_BOX['height'] / 2)), config.RADIUS, (255, 255, 255), thickness=1, lineType=8, shift=0)\n cv2.rectangle(config.FRAME, (0 + int(config.BOUNDING_BOX['width'] / 2), 0 + int(config.BOUNDING_BOX['height'] / 2)),\n (0 + int(config.BOUNDING_BOX['width'] / 2),\n 0 + int(config.BOUNDING_BOX['height'] / 2)),\n (255, 255, 255), 5)\n\n\ndef crop_screenshot(sct):\n config.BOUNDING_BOX = {\n 'left': int((75.52 * int(config.SCREEN_WIDTH)) / 100),\n 'top': int((60.2 * int(config.SCREEN_HEIGHT)) / 100),\n 'width': 240,\n 'height': 250\n }\n config.CENTER_X = int((75.52 * int(config.SCREEN_WIDTH)) /\n 100) + (config.BOUNDING_BOX[\"width\"] / 2)\n config.CENTER_Y = int((60.2 * int(config.SCREEN_HEIGHT)) /\n 100) + (config.BOUNDING_BOX[\"height\"] / 2)\n sct.get_pixels(config.BOUNDING_BOX)\n config.FRAME = Image.frombytes(\n 'RGB', (sct.width, sct.height), sct.image)\n\n\ndef show(detector):\n python_process = psutil.Process(config.PID)\n memoryUse = str(\"{:.2f}\".format(\n python_process.memory_info().rss / 1024 ** 2))\n # Set title\n title = config.TITLE + \" \" + str(config.VERSION) + \" - Status: \" + str(\"Fishing\" if not config.HOLD else \"Stop\") + \", Limit: \" + str(\n config.LIMIT) + \", Count: \" + str(config.COUNT) + \" | MemoryUse: \" + memoryUse + \" MB\"\n ctypes.windll.kernel32.SetConsoleTitleW(title)\n # แสดงรายละเอียดบนจอ\n tracking_info(config.CURRENT_TIME)\n # Center point\n center_point()\n # Rendering\n cv2.imshow('RO:X - Auto Fishing v%s' % config.VERSION,\n np.hstack([config.FRAME]))\n" ]
[ [ "numpy.hstack" ] ]
RadZaeem/ml-testbed
[ "a5ea7716c5df9e3d8b53422c4fe62336c18a54d9" ]
[ "bit-rnn/train.py" ]
[ "import time\nimport functools\nimport importlib\n\nimport numpy as np\nimport tensorflow as tf\n\nimport reader\n\nimport bit_utils\nfrom bit_rnn_cell import BitGRUCell\nfrom model import PTBModel\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string('data_path', None, 'data_path')\nflags.DEFINE_string('config', None, 'config')\n\nFLAGS = flags.FLAGS\n\n\ndef run_epoch(session, m, data, eval_op, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = m.initial_state.eval() #assert t.eval() == sess.run(t)\n for step, (x, y) in enumerate(\n reader.ptb_iterator(data, m.batch_size, m.num_steps)):\n cost, state, _ = session.run([m.cost, m.final_state, eval_op],\n {m.input_data: x,\n m.targets: y,\n m.initial_state: state})\n costs += cost\n iters += m.num_steps\n\n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * m.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\ndef get_config():\n return importlib.import_module(FLAGS.config).Config()\n\ntf.app.flags.DEFINE_string('gpu', True,\n \"\"\"use gpu.\"\"\")\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path)\n train_data, valid_data, test_data, _ = raw_data\n\n config = get_config()\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(log_device_placement=True)) as session:\n\n tf.set_random_seed(1)\n\n initializer = tf.uniform_unit_scaling_initializer()\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config)\n with tf.variable_scope(\"model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config)\n mtest = PTBModel(is_training=False, config=eval_config)\n\n tf.global_variables_initializer().run()\n\n def get_learning_rate(epoch, config):\n base_lr = config.learning_rate\n if epoch <= config.nr_epoch_first_stage:\n return base_lr\n elif epoch <= config.nr_epoch_second_stage:\n return base_lr * 0.1\n else:\n return base_lr * 0.01\n\n for i in range(config.max_epoch):\n m.assign_lr(session, get_learning_rate(i, config))\n\n print(\"Epoch: %d Learning rate: %f\"\n % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(\n session, m, train_data, m.train_op, verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\"\n % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(\n session, mvalid, valid_data, tf.no_op())\n print(\"Epoch: %d Valid Perplexity: %.3f\"\n % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(\n session, mtest, test_data, tf.no_op())\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.uniform_unit_scaling_initializer", "tensorflow.app.flags.DEFINE_string", "tensorflow.Graph", "numpy.exp", "tensorflow.ConfigProto", "tensorflow.variable_scope", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.no_op" ] ]
lijiangfang/pytorch
[ "8824f49e686fbfcf895e3e9414c843cfbc4f31a5" ]
[ "torch/testing/_internal/common_methods_invocations.py" ]
[ "from functools import reduce, wraps, partial\nfrom itertools import product\nfrom operator import mul\nimport collections\nimport operator\nimport random\n\nimport torch\nimport numpy as np\nfrom torch._six import inf\nfrom torch.autograd import Variable\nimport collections.abc\n\nfrom typing import List, Sequence, Tuple, Dict, Any, Union\n\nfrom torch.testing import \\\n (make_non_contiguous, floating_types, floating_types_and, complex_types,\n floating_and_complex_types, floating_and_complex_types_and,\n all_types_and_complex_and, all_types_and, all_types_and_complex,\n integral_types_and, all_types)\nfrom .._core import _dispatch_dtypes\nfrom torch.testing._internal.common_device_type import \\\n (skipIf, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver,\n skipCPUIfNoLapack, skipCPUIfNoMkl, skipCUDAIfRocm, precisionOverride,)\nfrom torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater\nfrom torch.testing._internal.common_utils import \\\n (is_iterable_of_tensors,\n random_symmetric_matrix, random_symmetric_psd_matrix,\n make_fullrank_matrices_with_distinct_singular_values,\n random_symmetric_pd_matrix, make_symmetric_matrices,\n make_symmetric_pd_matrices,\n random_fullrank_matrix_distinct_singular_value, set_rng_seed, SEED,\n TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY,\n torch_to_numpy_dtype_dict, slowTest, TEST_WITH_ASAN, _wrap_warn_once,\n GRADCHECK_NONDET_TOL,)\n\nfrom setuptools import distutils\n\nif TEST_SCIPY:\n import scipy.special\n\n\nclass DecorateInfo(object):\n \"\"\"Describes which test, or type of tests, should be wrapped in the given\n decorators when testing an operator. Any test that matches all provided\n arguments will be decorated. The decorators will only be applied if the\n active_if argument is True.\"\"\"\n\n __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']\n\n def __init__(self, decorators, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]\n self.cls_name = cls_name\n self.test_name = test_name\n self.device_type = device_type\n self.dtypes = dtypes\n self.active_if = active_if\n\n def is_active(self, cls_name, test_name, device_type, dtype):\n return (\n self.active_if and\n (self.cls_name is None or self.cls_name == cls_name) and\n (self.test_name is None or self.test_name == test_name) and\n (self.device_type is None or self.device_type == device_type) and\n (self.dtypes is None or dtype in self.dtypes)\n )\n\n\nclass SkipInfo(DecorateInfo):\n \"\"\"Describes which test, or type of tests, should be skipped when testing\n an operator. Any test that matches all provided arguments will be skipped.\n The skip will only be checked if the active_if argument is True.\"\"\"\n\n def __init__(self, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n super().__init__(decorators=skipIf(True, \"Skipped!\"), cls_name=cls_name,\n test_name=test_name, device_type=device_type, dtypes=dtypes,\n active_if=active_if)\n\nclass SampleInput(object):\n \"\"\"Represents sample inputs to a function.\"\"\"\n\n __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input']\n\n def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=None, broadcasts_input=False):\n # input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).\n # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).\n # op with TensorList inputs do not support method or inplace variants.\n assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)\n self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input\n self.args = args\n self.kwargs = kwargs if kwargs is not None else {}\n self.output_process_fn_grad = output_process_fn_grad\n\n # Specifies if `self.input` is broadcasted or not,\n # given that the operator supports broadcasting.\n # This field is used to verify the behavior for inplace variant.\n #\n # If a SampleInput is marked with `broadcasts_input=True`,\n # it is verified that we get a `RuntimerError` with this sample,\n # and inplace variant. Also inplace grad{grad} tests are skipped,\n # for such inputs (as they will error out otherwise).\n self.broadcasts_input = broadcasts_input\n\n def __repr__(self):\n arguments = [\n 'input=Tensor' if isinstance(self.input, torch.Tensor) else f'input=TensorList[{len(self.input)}]',\n f'args={self.args}' if len(self.args) > 0 else None,\n f'kwargs={self.kwargs}' if len(self.kwargs) > 0 else None,\n (f'output_process_fn_grad={self.output_process_fn_grad}'\n if self.output_process_fn_grad is not None else None),\n f'broadcasts_input={self.broadcasts_input}']\n\n return f'SampleInput({\", \".join(a for a in arguments if a is not None)})'\n\nclass AliasInfo(object):\n \"\"\"Class holds alias information. For example, torch.abs ->\n torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_\n \"\"\"\n\n def __init__(self, alias_name):\n self.name = alias_name\n self.op = _getattr_qual(torch, alias_name)\n self.method_variant = getattr(torch.Tensor, alias_name, None)\n self.inplace_variant = getattr(torch.Tensor, alias_name + \"_\", None)\n\n def __call__(self, *args, **kwargs):\n return self.op(*args, **kwargs)\n\n\n_NOTHING = object() # Unique value to distinguish default from anything else\n\n\n# Extension of getattr to support qualified names\n# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm\ndef _getattr_qual(obj, name, default=_NOTHING):\n try:\n for path in name.split('.'):\n obj = getattr(obj, path)\n return obj\n except AttributeError:\n if default is not _NOTHING:\n return default\n else:\n raise\n\n\n# Classes and methods for the operator database\nclass OpInfo(object):\n \"\"\"Operator information and helper functions for acquiring it.\"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n op=None, # the function variant of the operation, populated as torch.<name> if None\n dtypes=floating_types(), # dtypes this function is expected to work with\n dtypesIfCPU=None, # dtypes this function is expected to work with on CPU\n dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA\n dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM\n backward_dtypes=None, # backward dtypes this function is expected to work with\n backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU\n backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA\n backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM\n default_test_dtypes=None, # dtypes to test with by default. Gets intersected\n # with the dtypes support on the tested device\n assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed\n autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a\n # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],\n # default is populated to be ['aten::(name of Python operator)']\n autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups\n # inside of DifferentiableGraphs when this operation is autodiffed.\n # Ex: ['aten::add', 'aten::mm'], defaults to an empty list\n # Note: currently no ops use fusible nodes\n supports_out=True, # whether the op supports the out kwarg\n skips=tuple(), # information about which tests to skip\n decorators=None, # decorators to apply to generated tests\n safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments\n sample_inputs_func=None, # function to generate sample inputs\n aten_name=None, # name of the corresponding aten:: operator\n aliases=None, # iterable of aliases, e.g. (\"absolute\",) for torch.abs\n variant_test_name='', # additional string to include in the test name\n supports_autograd=True, # support for autograd\n supports_gradgrad=True, # support second order gradients (this value is ignored if supports_autograd=False)\n supports_inplace_autograd=None, # whether the operation supports inplace autograd\n # defaults to supports_autograd's value\n supports_sparse=False, # whether the op supports sparse inputs\n gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck\n check_batched_grad=True, # check batched grad when doing gradcheck\n check_batched_gradgrad=True, # check batched grad grad when doing gradgradcheck\n gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck\n gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.\n # When set to None, defers to the default value provided by the wrapper\n # function around gradcheck (testing._internal.common_utils.gradcheck)\n ):\n\n # Validates the dtypes are generated from the dispatch-related functions\n for dtype_list in (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM):\n assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))\n\n self.name = name\n self.aten_name = aten_name if aten_name is not None else name\n self.variant_test_name = variant_test_name\n\n self.dtypes = set(dtypes)\n self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes\n self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes\n self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA\n\n self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes\n self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (\n self.dtypesIfCPU if dtypesIfCPU is not None else self.backward_dtypes)\n self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (\n self.dtypesIfCUDA if dtypesIfCUDA is not None else self.backward_dtypes)\n self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (\n self.dtypesIfROCM if dtypesIfROCM is not None else self.backward_dtypesIfCUDA)\n\n self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None\n\n # NOTE: if the op is unspecified it is assumed to be under the torch namespace\n self.op = op if op else _getattr_qual(torch, self.name)\n method_variant = getattr(torch.Tensor, name, None)\n # attributes like real, imag are not callable\n self.method_variant = method_variant if callable(method_variant) else None\n inplace_name = name + \"_\"\n self.inplace_variant = getattr(torch.Tensor, inplace_name, None)\n self.operator_variant = getattr(operator, name, None)\n\n self.supports_out = supports_out\n self.safe_casts_outputs = safe_casts_outputs\n\n self.skips = skips\n self.decorators = decorators\n self.sample_inputs_func = sample_inputs_func\n\n self.assert_autodiffed = assert_autodiffed\n self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []\n if autodiff_nonfusible_nodes is None:\n self.autodiff_nonfusible_nodes = ['aten::' + self.name]\n else:\n self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes\n\n # autograd support\n self.supports_autograd = supports_autograd\n self.supports_inplace_autograd = supports_inplace_autograd\n if self.supports_inplace_autograd is None:\n self.supports_inplace_autograd = supports_autograd\n\n self.gradcheck_wrapper = gradcheck_wrapper\n self.supports_gradgrad = supports_gradgrad\n self.check_batched_grad = check_batched_grad\n self.check_batched_gradgrad = check_batched_gradgrad\n self.gradcheck_nondet_tol = gradcheck_nondet_tol\n self.gradcheck_fast_mode = gradcheck_fast_mode\n\n self.supports_sparse = supports_sparse\n\n self.aliases = ()\n if aliases is not None:\n self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls the function variant of the operator.\"\"\"\n return self.op(*args, **kwargs)\n\n def get_op(self):\n \"\"\"Returns the function variant of the operator, torch.<op_name>.\"\"\"\n return self.op\n\n def get_method(self):\n \"\"\"Returns the method variant of the operator, torch.Tensor.<op_name>.\n Returns None if the operator has no method variant.\n \"\"\"\n return self.method_variant\n\n def get_inplace(self):\n \"\"\"Returns the inplace variant of the operator, torch.Tensor.<op_name>_.\n Returns None if the operator has no inplace variant.\n \"\"\"\n return self.inplace_variant\n\n def get_operator_variant(self):\n \"\"\"Returns operator variant of the operator, e.g. operator.neg\n Returns None if the operator has no operator variant.\n \"\"\"\n return self.operator_variant\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs.\n\n These samples should be sufficient to test the function works correctly\n with autograd, TorchScript, etc.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n return samples\n\n # Returns True if the test should be skipped and False otherwise\n def should_skip(self, cls_name, test_name, device_type, dtype):\n return any(si.is_active(cls_name, test_name, device_type, dtype)\n for si in self.skips)\n\n def supported_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.dtypesIfCPU\n if device_type == 'cuda':\n return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA\n else:\n return self.dtypes\n\n def supported_backward_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.backward_dtypesIfCPU\n if device_type == 'cuda':\n return self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA\n else:\n return self.backward_dtypes\n\n def supports_complex_autograd(self, device_type):\n if device_type == 'cpu':\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)\n if device_type == 'cuda':\n if TEST_WITH_ROCM:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypes)\n\n def supports_dtype(self, dtype, device_type):\n return dtype in self.supported_dtypes(device_type)\n\n def default_test_dtypes(self, device_type):\n \"\"\"Returns the default dtypes used to test this operator on the device.\n\n Equal to the operator's default_test_dtypes filtered to remove dtypes\n not supported by the device.\n \"\"\"\n supported = self.supported_dtypes(device_type)\n return (supported if self._default_test_dtypes is None\n else supported.intersection(self._default_test_dtypes))\n\n\nL = 20\nM = 10\nS = 5\n\n\ndef sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n low = low if low is None else low + op_info._domain_eps\n high = high if high is None else high - op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)))\n\n# Metadata class for unary \"universal functions (ufuncs)\" that accept a single\n# tensor and have common properties like:\nclass UnaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal unary functions (unary ufuncs).'\n These are functions of a single tensor with common properties like:\n - they are elementwise functions\n - the input shape is the output shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCPU=None,\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n default_test_dtypes=(\n torch.uint8, torch.long, torch.half, torch.bfloat16,\n torch.float32, torch.cfloat), # dtypes which tests check by default\n domain=(None, None), # the [low, high) domain of the function\n handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)\n handles_extremals=True, # whether the op correctly handles extremal values (like inf)\n handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)\n supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle\n sample_inputs_func=sample_inputs_unary,\n sample_kwargs=lambda device, dtype, input: ({}, {}),\n supports_sparse=False,\n **kwargs):\n super(UnaryUfuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n default_test_dtypes=default_test_dtypes,\n sample_inputs_func=sample_inputs_func,\n supports_sparse=supports_sparse,\n **kwargs)\n self.ref = ref\n self.domain = domain\n self.handles_large_floats = handles_large_floats\n self.handles_extremals = handles_extremals\n self.handles_complex_extremals = handles_complex_extremals\n self.supports_complex_to_float = supports_complex_to_float\n\n # test_unary_ufuncs.py generates its own inputs to test the consistency\n # of the operator on sliced tensors, non-contig tensors, etc.\n # `sample_kwargs` is a utility function to provide kwargs\n # along with those inputs if required (eg. clamp).\n # It should return two dictionaries, first holding kwarg for\n # torch operator and second one for reference NumPy operator.\n self.sample_kwargs = sample_kwargs\n\n # Epsilon to ensure grad and gradgrad checks don't test values\n # outside a function's domain.\n self._domain_eps = 1e-5\n\ndef sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor([1, 2, 3]),),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor(1),),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor([1, 2, 3]),),\n kwargs=dict(dim=1)),)\n\ndef sample_inputs_linalg_det(op_info, device, dtype, requires_grad):\n kw = dict(device=device, dtype=dtype)\n inputs = [\n make_tensor((S, S), **kw),\n make_tensor((1, 1), **kw), # 1x1\n random_symmetric_matrix(S, **kw), # symmetric\n random_symmetric_psd_matrix(S, **kw), # symmetric_psd\n random_symmetric_pd_matrix(S, **kw), # symmetric_pd\n\n # dim2_null, rank1 and rank2 are disabled because of\n # https://github.com/pytorch/pytorch/issues/53364\n # we should re-enable them once the issue is solved\n # random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null\n # random_square_matrix_of_rank(S, 1, **kw), # rank1\n # random_square_matrix_of_rank(S, 2, **kw), # rank2\n\n random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value\n make_tensor((3, 3, S, S), **kw), # batched\n make_tensor((3, 3, 1, 1), **kw), # batched_1x1\n random_symmetric_matrix(S, 3, **kw), # batched_symmetric\n random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd\n random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd\n random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values\n make_tensor((0, 0), **kw),\n make_tensor((0, S, S), **kw),\n ]\n for t in inputs:\n t.requires_grad = requires_grad\n return [SampleInput(t) for t in inputs]\n\ndef sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):\n # (<matrix_size>, (<batch_sizes, ...>))\n test_sizes = [\n (1, ()),\n (2, (0,)),\n (2, (2,)),\n ]\n\n inputs = []\n for matrix_size, batch_sizes in test_sizes:\n size = batch_sizes + (matrix_size, matrix_size)\n for n in (0, 3, 5):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(n,)))\n for n in [-4, -2, -1]:\n t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)\n t.requires_grad = requires_grad\n inputs.append(SampleInput(t, args=(n,)))\n\n return inputs\n\ndef sample_inputs_hsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6,), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_vsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_dsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),\n SampleInput(make_tensor((S, S, 6), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),)\n\ndef sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):\n # Each test case consists of the sizes in the chain of multiplications\n # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)\n test_cases = [\n [1, 2, 1],\n [2, 0, 2],\n [0, 2, 2],\n [2, 2, 2, 2],\n [2, 3, 4, 5],\n [5, 4, 0, 2],\n [2, 4, 3, 5, 3, 2]\n ]\n\n result = []\n for sizes in test_cases:\n tensors = []\n for size in zip(sizes[:-1], sizes[1:]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n tensors.append(t)\n result.append(SampleInput(tensors))\n\n return result\n\ndef sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((2, 2), (2, 3, 2))\n ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)\n dims = ((-2, -1), (-1, 0))\n\n inputs: List[SampleInput] = []\n for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(ord, dim, keepdim)))\n\n return inputs\n\ndef sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):\n test_sizes = [\n (S,),\n (0,),\n (S, S),\n (0, 0),\n (S, 0),\n (0, S),\n (S, S, S),\n (0, S, S),\n (S, 0, S),\n (0, 0, 0),\n ]\n\n vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)\n matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)\n\n inputs = []\n\n for test_size in test_sizes:\n is_vector_norm = len(test_size) == 1\n is_matrix_norm = len(test_size) == 2\n\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype, low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n keepdim=keepdim)))\n\n if not (is_vector_norm or is_matrix_norm):\n continue\n\n ords = vector_ords if is_vector_norm else matrix_ords\n\n for ord in ords:\n\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim)))\n\n if ord in ['nuc', 'fro']:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n ord=ord,\n keepdim=keepdim,\n dim=(0, 1))))\n return inputs\n\ndef sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):\n size_1D = (S,)\n size_2D = (2, 2)\n\n test_cases = [\n # input size, ord, dim args\n (size_1D, 2, None),\n (size_1D, 2, (0,)),\n (size_1D, 0, None),\n (size_1D, 0, (0,)),\n (size_1D, 0.9, None),\n (size_1D, 0.9, (0,)),\n (size_1D, 1, None),\n (size_1D, 1, (0,)),\n (size_1D, -2.1, None),\n (size_1D, -2.1, (0,)),\n (size_1D, inf, None),\n (size_1D, inf, (0,)),\n (size_1D, -inf, None),\n (size_1D, -inf, (0,)),\n\n (size_2D, 2, None),\n (size_2D, 2, (0,)),\n (size_2D, 2, (-1, 0)),\n (size_2D, 0, None),\n (size_2D, 0, (0,)),\n (size_2D, 0, (-1, 0)),\n (size_2D, 0.9, None),\n (size_2D, 0.9, (0,)),\n (size_2D, 0.9, (-1, 0)),\n (size_2D, 1, None),\n (size_2D, 1, (0,)),\n (size_2D, 1, (-1, 0)),\n (size_2D, -2.1, None),\n (size_2D, -2.1, (0,)),\n (size_2D, -2.1, (-1, 0)),\n (size_2D, inf, None),\n (size_2D, inf, (0,)),\n (size_2D, inf, (-1, 0)),\n (size_2D, -inf, None),\n (size_2D, -inf, (0,)),\n (size_2D, -inf, (-1, 0)),\n ]\n inputs = []\n\n for test_size, ord, dim in test_cases:\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim,\n dim=dim)))\n\n return inputs\n\n# In order to use the kwarg alpha, partials should be used in an OpInfo's sample_inputs_func\n# eg. sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2)\n# Then one sample input would also be generated corresponding to the value of alpha provided.\n# In the future, kwargs 'alpha_floating', 'alpha_integral' & 'alpha_complex' can be used to\n# specify scalars of floating, integral & complex types as values for \"alpha\".\ndef sample_inputs_binary_pwise(op_info, device, dtype, requires_grad, **kwargs):\n scalar = 3.14 + 3.14j if dtype.is_complex else (3.14 if dtype.is_floating_point else 3)\n scalar = 1 if dtype is torch.bool else scalar\n tests_list = [\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (S, S), False),\n ((), (), False),\n ((S, S, S), (), False),\n ((S, S, S), scalar, False),\n ((), scalar, False)\n ]\n tests_with_lhs_broadcasting = [\n ((S, S), (S, S, S), True),\n ((), (S, S, S), True),\n ((S, 1, S), (M, S), True),\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n samples = []\n for first_shape, shape_or_scalar, broadcasts_input in test_cases:\n arg = shape_or_scalar\n if isinstance(shape_or_scalar, tuple):\n arg = make_tensor(shape_or_scalar, device=device, dtype=dtype,\n requires_grad=requires_grad)\n samples.append(SampleInput(make_tensor(first_shape, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(arg,),\n broadcasts_input=broadcasts_input))\n # Adds an extra sample using \"alpha\" if it's passed in kwargs\n if 'alpha' in kwargs:\n a = make_tensor((S, S, S), device=device, dtype=dtype, requires_grad=requires_grad)\n b = make_tensor((S, S, S), device=device, dtype=dtype, requires_grad=requires_grad)\n sample = SampleInput(a, args=(b,), kwargs={'alpha': kwargs['alpha']})\n samples.append(sample)\n return tuple(samples)\n\ndef sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):\n args_list = (\n ((S, M), (M, S)),\n )\n inputs = tuple(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),))\n for first_shape, second_shape in args_list)\n return inputs\n\ndef sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):\n alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)\n beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)\n tests_list = [\n ((2, 3), (2, 2), (2, 3), False)\n ]\n tests_with_lhs_broadcasting = [\n ((1,), (2, 2), (2, 3), True),\n ((), (2, 2), (2, 3), True)\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n inputs = tuple(SampleInput(make_tensor(shape_a, device, dtype, requires_grad=requires_grad),\n args=(make_tensor(shape_b, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape_c, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},\n broadcasts_input=broadcasts_input)\n for shape_a, shape_b, shape_c, broadcasts_input in test_cases)\n return inputs\n\ndef sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (((S,), (S, M), (M,), 1, 1, False),\n ((S,), (S, M), (M,), 0.2, 0.6, False),\n )\n\n test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),\n ((1,), (S, M), (M,), 0.2, 0.6, True),\n ((), (S, M), (M,), 1, 1, True),\n ((), (S, M), (M,), 0.2, 0.6, True),\n )\n\n cases = test_cases + test_cases_with_broadcast\n sample_inputs = []\n for input_args in cases:\n args = (make_tensor(input_args[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n alpha, beta = input_args[3], input_args[4]\n broadcasts_input = input_args[5]\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha),\n broadcasts_input=broadcasts_input))\n return tuple(sample_inputs)\n\ndef sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1),\n ((1,), (S, S, S), (S, S, M), 1, 1),\n ((S, M), (S, S, S), (S, S, M), 0.6, 0.2),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2),\n ((), (S, S, S), (S, S, M), 1, 1),\n ((), (S, S, S), (S, S, M), 0.6, 0.2),\n ]\n sample_inputs = []\n for input_args in test_cases:\n args = (make_tensor(input_args[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n alpha, beta = input_args[3], input_args[4]\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha)))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j))))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S), (S, S), (S, S)),\n ((S, S), (S, 1), (1, S)),\n ((1,), (S, S, 1), (1, S)),\n ((), (), ()),\n ((S, S), (), ()),\n ((), (S, S, 1), (1, S)),\n ]\n\n sample_inputs = []\n for input_args in test_cases:\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(args[0], args=args[1:]))\n\n sample_inputs.append(SampleInput(args[0], args=args[1:], kwargs=dict(value=3.14)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n sample_inputs = []\n for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:\n args = (make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch1_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch2_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),\n broadcasts_input=broadcasts_input))\n return tuple(sample_inputs)\n\ndef sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):\n input1 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n input2 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n if dtype.is_complex:\n alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j\n elif dtype.is_floating_point:\n alpha, beta = 0.2, 0.6\n else:\n alpha, beta = 2, 3\n\n input3 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n input4 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n return (input1, input2, input3, input4)\n\ndef sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\n\ndef sample_inputs_xlog1py(self, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def generator():\n # same shape\n yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))\n # rhs broadcast\n yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))\n # all zero `x`\n with torch.no_grad():\n x = make_arg((S, S))\n x.fill_(0)\n yield SampleInput(x, args=(make_arg((S, S), low=-1),))\n\n # randomly zero-masked `x`\n x = make_arg((S, S))\n y = make_arg((S, S), low=-1)\n with torch.no_grad():\n x[torch.rand(x.shape) > 0.5] = 0\n yield SampleInput(x, args=(y,))\n\n # Scalar x\n # `input` has to be a tensor\n # yield SampleInput(0, args=(make_arg((S, S), low=-1),))\n # yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))\n\n # Scalar y\n yield SampleInput(make_arg((S, S)), args=(-0.5,))\n yield SampleInput(make_arg((S, S)), args=(1.2,))\n\n return list(generator())\n\n\ndef sample_inputs_logsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((), (0,), True),\n ((S, S), (1,), True),\n ((S, S), (1,), False)\n )\n samples = []\n\n for shape, dim, keepdim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim, keepdim)))\n\n return tuple(samples)\n\ndef sample_inputs_logcumsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((S, S, S), 0),\n ((S, S, S), 1),\n ((), 0),\n )\n samples = []\n\n for shape, dim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim,)))\n\n return tuple(samples)\n\ndef sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):\n return (SampleInput((make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))),)\n\n\ndef sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((1, 2, 3), (-1, -2)),\n ((1, 2, 3), (-1, 2)),\n ((1, 2, 3), (1, -2)),\n ((1, 2, 3), (1, 2)),\n ((), (0, 0)),\n ((1, ), (0, 0)),\n ((M, M), (0, 1)),\n ((S, S, S), (2, 0)), )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always invertible input for linear algebra ops using\n random_fullrank_matrix_distinct_singular_value.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n in product(batches, ns):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n return out\n\ndef np_sinc_with_fp16_as_fp32(x):\n # Wraps numpy's sinc function so that fp16 values are promoted to fp32\n # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated\n # at 0 for fp16.\n if x.dtype == np.float16:\n return np.sinc(x.astype(np.float32))\n else:\n return np.sinc(x)\n\ndef sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n return tuple(\n SampleInput(\n make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(shape,)) for size, shape in test_cases)\n\ndef sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):\n small_S = 2\n test_cases = (\n ((S, S, 2), (S, S + 1, 2)),\n ((S, S), (S, S)),\n ((S, S, S), (S, S, S)),\n ((3, 5), (3, 5)),\n ((2, 3, 5), (2, 3, 5)),\n ((1, 2, 3), (1, 2, 3)),\n ((1, 1), (S, 1)),\n ((0, 5), (4, 5)),\n ((4, 5), (0, 5)),\n ((0, 4, 5), (3, 5)),\n ((4, 5), (0, 3, 5)),\n ((0, 4, 5), (1, 3, 5)),\n ((1, 4, 5), (0, 3, 5)),\n # Using S here would make this one test take 9s\n ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),\n ((small_S, 1, 1, small_S), (1, small_S, small_S)),\n ((1, 1, small_S), (small_S, 1, small_S, small_S)),\n )\n\n samples = []\n for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:\n for p in [0, 1, 2, 3, 0.5, 1.5, 2.5, float(\"inf\")]:\n for t1_size, t2_size in test_cases:\n # The args should never be non-contiguous as this is not supported in the backward\n samples.append(SampleInput(\n make_tensor(t1_size, device, dtype, requires_grad=requires_grad, noncontiguous=False),\n args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad, noncontiguous=False), p, cm)))\n\n return samples\n\ndef sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (), False),\n ((S, S, S), (1,), False),\n ((S,), (1,), False),\n ((), (), False),\n )\n test_cases_lhs_broadcasting = (\n ((S, 1, S), (S, S, S), True),\n ((1,), (S, S, S), True),\n ((1, S), (1, 1, S), True),\n ((), (0,), True),\n ((), (S, S, S), True),\n )\n cases = test_cases + test_cases_lhs_broadcasting\n sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n broadcasts_input=broadcasts_input)\n for first_shape, second_shape, broadcasts_input in cases)\n equal_tensors_non_bool = (\n ([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),\n ([[[6, 5]], [[1, -5]]]),\n ([[2], [-1]]),\n ([0, -6]),\n ([3],),\n )\n equal_tensors_bool = (\n ([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),\n ([[[1, 1]], [[1, 0]]]),\n ([[1], [0]]),\n ([0, 1]),\n ([1],),\n )\n more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool\n more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),))\n for elements in more_cases)\n sample_inputs = [*sample_inputs, *more_inputs]\n return tuple(sample_inputs)\n\ndef sample_inputs_div(self, device, dtype, requires_grad, rounding_mode=None, **kwargs):\n a = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n is_integral = not dtype.is_floating_point and not dtype.is_complex\n b = make_tensor((S, S, S), device, dtype, low=1 if is_integral else 0.1, high=None,\n requires_grad=requires_grad)\n\n kwargs = None # type: ignore[assignment]\n if rounding_mode is not None:\n kwargs = dict(rounding_mode=rounding_mode)\n\n return (\n SampleInput(a, args=(b,), kwargs=kwargs),\n SampleInput(a, args=(2,)),\n )\n\ndef sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors, args=(0,)),)\n\ndef sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors),)\n\ndef sample_inputs_hypot(op_info, device, dtype, requires_grad):\n input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n\n return (\n SampleInput(input, args=(args,)),\n )\n\ndef sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, gather_variable((S, S), 1, M, True, device=device))),\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\n\ndef sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S), 1, S, True, device=device), 0)),\n\n # `indices` broadcast\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),\n\n # `self` broadcast\n SampleInput(make_tensor((1, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),\n\n # without `dim` arg\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), )),\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device),)),\n )\n\ndef sample_inputs_amax_amin(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), ()),\n ((S, S, S), (1,)),\n ((S, S, S), ((1, 2,),)),\n ((S, S, S), (1, True,)),\n ((), (0,)),\n ((), ()),\n ((), (0, True,)),\n )\n return tuple(SampleInput((make_tensor(size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n args=args)\n for size, args in test_cases)\n\ndef sample_inputs_argmax_argmin(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((2, 2, 2), ()),\n ((2, 2, 2), (0,)),\n ((2, 2, 2), (1,)),\n ((2, 2, 2), (2,)),\n ((2, 2, 2), (2, True,)),\n ((2, 2, 2), (None,)),\n ((), (0,)),\n ((), ()),\n ((), (None, True,)),\n ((1,), ()),\n ((1,), (0,)),\n ((1,), (0, True)),\n ((2,), ()),\n ((2,), (0,)),\n ((2,), (0, True)),\n ((2, 2, 3), ()),\n ((2, 2, 3), (0,)),\n ((2, 2, 3), (1,)),\n ((2, 2, 3), (None, True)),\n )\n return tuple(SampleInput((make_tensor(size, device, dtype,\n requires_grad=requires_grad)),\n args=args)\n for size, args in test_cases)\n\ndef sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((1,), 0, None, None),\n ((S,), 0, None, None),\n ((S, 1), 0, None, None),\n ((S, 1), 1, None, None),\n ((S, S), 0, None, None),\n ((S, S), 1, None, None),\n ((S, S), 0, (1, S), (2, S)),\n ((S, S), 0, None, (2, S)),\n ((S, S, S), 1, None, None),\n ((S, S, S), 1, (S, 1, S), (S, 1, S)),)\n\n sample_inputs = []\n for size, dim, size_prepend, size_append in test_cases:\n args = (make_tensor(size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad), 1, dim,\n make_tensor(size_prepend, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) if size_prepend else None,\n make_tensor(size_append, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) if size_append else None)\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2])))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_gradient(op_info, device, dtype, requires_grad):\n sample_inputs = []\n test_cases_float = (\n ((S,), None, None),\n ((S,), 2., None),\n ((S, S), None, None),\n ((S, S), [2.0, 2.1], None),\n ((S, S), [2.0, 2.1], (0, 1)),\n ((4, 4, 4), [2., 1.], (0, 1)),\n )\n for size, spacing, dim in test_cases_float:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing)))\n\n test_cases_tensor = (\n ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1)),\n ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1)),\n )\n for size, coordinates, dim in test_cases_tensor:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n coordinates_tensor_list = []\n for coords in coordinates:\n a = torch.tensor(coords, dtype=dtype, device=device)\n coordinates_tensor_list.append(a)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_index_select(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, index_variable(2, S, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\ndef sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):\n test_args = [\n (dont_convert([1, 2]),),\n (slice(0, 3),),\n (dont_convert([slice(0, 3), 1]),),\n (dont_convert([[0, 2, 3], [1, 3, 3], [0, 0, 2]]),),\n (dont_convert([[0, 0, 3], [1, 1, 3], [0, 0, 2]]),),\n (dont_convert([slice(None), slice(None), [0, 3]]),),\n (dont_convert([slice(None), [0, 3], slice(None)]),),\n (dont_convert([[0, 3], slice(None), slice(None)]),),\n (dont_convert([[0, 3], [1, 2], slice(None)]),),\n (dont_convert([[0, 3], ]),),\n (dont_convert([[0, 3], slice(None)]),),\n (dont_convert([[0, 3], Ellipsis]),),\n (dont_convert([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])]),),\n (index_variable(2, S, device=device),),\n (mask_not_all_zeros((S,)),),\n ]\n\n return tuple(SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=args)\n for args in test_args)\n\ndef sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n for accumulate in [False, True]:\n # Test with indices arg\n inputs.append(SampleInput(\n make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (index_variable(2, S, device=device), ),\n make_tensor((2, S), device, dtype, low=None, high=None)),\n kwargs=dict(accumulate=accumulate)))\n\n # Test with mask arg\n mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))\n inputs.append(SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (mask, ),\n make_tensor((S,), device, dtype, low=None, high=None),),\n kwargs=dict(accumulate=accumulate)))\n\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):\n # These testa are pretty much the same as those from index_copy.\n # Perhaps merge?\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n # non-contiguous target\n t_nonctg = t.transpose(0, 1)\n # non-contiguous source\n s_nonctg = s.transpose(0, 1)\n\n idx = make_arg((S,), dtype=torch.int64, low=0, high=S)\n idx_nonctg = make_arg((S,), dtype=torch.int64, low=0, high=S, noncontiguous=True)\n samples = [SampleInput(tensor, args=(1, idx, source))\n for tensor, idx, source in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg])]\n samples.extend(SampleInput(tensor, args=(1, idx, source), kwargs=dict(alpha=a))\n for tensor, idx, source, a in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg], [-1, 0, 2]))\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))\n samples.extend(SampleInput(t, args=(0, idx, s), kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))\n return samples\n\ndef sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def small_3d_unique(dtype, device):\n res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n samples.append(largesample)\n\n # Test cases for small 3d tensors.\n # Imitates legacy tests from test/test_torch.py\n t = small_3d_unique(dtype, device)\n dims = range(-3, 3)\n flag = [True, False]\n for dim, descending, stable in product(dims, flag, flag):\n # default schema without stable sort\n samples.append(SampleInput(t, args=(dim, descending)))\n # schema with stable sort, no CUDA support yet\n if torch.device(device).type == 'cpu':\n samples.append(\n SampleInput(t, kwargs=dict(dim=dim, descending=descending, stable=stable))\n )\n\n # Test cases for scalar tensor\n scalar = torch.tensor(1, dtype=dtype, device=device)\n apply_grad(scalar)\n samples.append(SampleInput(scalar))\n samples.append(SampleInput(scalar, args=(0,)))\n samples.append(SampleInput(scalar, args=(0, True)))\n # no CUDA support for stable sort yet\n if not device.startswith('cuda'):\n samples.append(SampleInput(scalar, kwargs=dict(stable=True)))\n samples.append(SampleInput(scalar, kwargs=dict(dim=0, stable=True)))\n samples.append(SampleInput(scalar, kwargs=dict(dim=0, descending=True, stable=True)))\n return samples\n\ndef sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n t = make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n fill_val = torch.tensor(-1 + 1j if t.is_complex() else -1)\n # non-contiguous input\n t01 = t.transpose(0, 1)\n t02 = t.transpose(0, 2)\n t12 = t.transpose(1, 2)\n idx = index_variable(1, S, device=device)\n # non-contiguous index\n idx_nonctg = torch.empty_strided((S,), (2,), device=device, dtype=torch.int64)\n idx_nonctg.copy_(idx)\n for d in range(t.dim()):\n for tensor in [t, t01, t02, t12]:\n samples.append(SampleInput(tensor, args=(d, idx, fill_val)))\n samples.append(SampleInput(tensor, args=(d, -idx - 1, fill_val)))\n samples.append(SampleInput(tensor, args=(d, idx_nonctg, fill_val)))\n\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n index_tensor = partial(torch.tensor, device=device, dtype=torch.long)\n\n def unique_idx(numel, max_idx):\n # Generate unique random indices vector of `numel`\n # elements in range [0, max_idx).\n indices = random.sample(range(max_idx), numel)\n return index_tensor(indices)\n\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))\n\n # Duplicate indices\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))\n\n return samples\n\ndef sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_binary_op = (\n ((S, S, S), (S, S, S),),\n ((S, S, S), (S,),),\n ((S,), (S, S, S),),\n ((S, 1, S), (S, S),),\n ((S, S), (S, S),),\n ((), (),),\n ((S, S, S), (),),\n ((), (S, S, S),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(make_tensor(other_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),),))\n for input_tensor, other_tensor in args_for_binary_op)\n return inputs\n\ndef sample_inputs_hardswish(self, device, dtype, requires_grad):\n N = 5\n # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?\n tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_reduction_with_dim = (\n ((S, S, S), (1,),),\n ((S, S, S), (1, True, ),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args_for_reduction_with_dim)\n return inputs\n\ndef sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n inputs.append(SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n return inputs\n\n# Generates input tensors for testing reduction ops\ndef _generate_reduction_inputs(device, dtype, requires_grad):\n yield make_tensor((), device, dtype, requires_grad=requires_grad)\n yield make_tensor((2,), device, dtype, requires_grad=requires_grad)\n yield make_tensor((2, 3), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n yield make_tensor((3, 2, 1, 2, 2), device, dtype, requires_grad=requires_grad)\n\n# Generates a subset of possible dim and keepdim kwargs for a tensor\n# with ndim dims appropriate for testing. If supports_multiple_dims\n# is True (default) then dim kwarg can be a list of dims.\ndef _generate_reduction_kwargs(ndim, supports_multiple_dims=True):\n for keepdim in [True, False]:\n # Always test reducing inner and outer most dimensions\n yield {'dim': 0, 'keepdim': keepdim}\n yield {'dim': -1, 'keepdim': keepdim}\n\n # Also reduce middle dimension\n if ndim > 2:\n yield {'dim': ndim // 2, 'keepdim': keepdim}\n\n if supports_multiple_dims:\n # Always test reducing all dims\n yield {'dim': tuple(range(ndim)), 'keepdim': keepdim}\n\n # Test reducing both first and last dimensions\n if ndim > 1:\n yield {'dim': (0, ndim - 1), 'keepdim': keepdim}\n\n # Test reducing every other dimension starting with the second\n if ndim > 3:\n yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': keepdim}\n\n# Wraps sample_inputs_reduction function to provide the additional supports_multiple_dims args\ndef sample_inputs_reduction_wrapper(supports_multiple_dims):\n # Generates sample inputs for reduction ops that contain the input tensor\n # and dim and keepdim kwargs. If a reduction op needs to test additional\n # args/kwargs then create a separate sample_inputs function\n def fn(op_info, device, dtype, requires_grad):\n inputs = []\n\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n inputs.append(SampleInput(t, kwargs=kwargs))\n\n return inputs\n\n return fn\n\ndef sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):\n test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))\n test_interpolations = ['linear', 'midpoint']\n\n inputs = []\n for quantiles in test_quantiles:\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t, args=(quantiles,)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):\n # Interpolation kwarg for now is only supported when providing both dim and keepdim\n for interpolation in test_interpolations:\n kwargs['interpolation'] = interpolation\n inputs.append(SampleInput(t, args=(quantiles,), kwargs=kwargs))\n\n return inputs\n\ndef sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):\n def get_tensor_input(size):\n return make_tensor(size, device, dtype, requires_grad=requires_grad)\n\n inputs = []\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))\n\n inputs.append(SampleInput(get_tensor_input(()), args=(1,)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))\n\n return inputs\n\ndef sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)\n arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(arg_a, args=(arg_b,)))\n return inputs\n\ndef sample_inputs_dist(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))\n ps = (2, 4)\n\n def generate_samples():\n for size_x, size_y, p in product(sizes, sizes, ps):\n yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))\n\n return list(generate_samples())\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape, low=None, high=None, dtype=dtype):\n return make_tensor(shape, device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n # non-contiguous input\n t01 = t.transpose(0, 1)\n # non-contiguous input\n s01 = s.transpose(0, 1)\n\n # idx is a permutation of 0...S-1 for this function to be deterministic\n idx = torch.randperm(S, device=device, dtype=torch.int64)\n # non-contiguous index\n idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]\n # index_copy_ does not support negative indices\n # idx_neg = -idx - 1\n samples = [SampleInput(tensor, args=(1, idx, source))\n for tensor, idx, source in product([t, t01], [idx, idx_nonctg], [s, s01])]\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))\n return samples\n\ndef sample_inputs_mode(op_info, device, dtype, requires_grad):\n inputs = []\n args = (\n ((S, S, S), (),),\n ((S, S, S), (1, ),),\n ((S, S, S), (1, True, ),),\n ((), (),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args)\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_put(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs\n tgt_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))\n src_gen = (make_arg((S,), noncontiguous=not ctg) for ctg in (True, False))\n idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]\n idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]\n idx_neg = -idx - 1\n idx_list = [idx, idx_nonctg, idx_neg]\n for tgt, idx, src, acc in product(tgt_gen, idx_list, src_gen, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n tgt_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n src_gen = (make_arg(size) for size in scalar_sizes)\n for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n # Empty cases\n tgt_sizes = [(0,), (), (1,), (3, 2)]\n tgt_gen = (make_arg(size) for size in tgt_sizes)\n idx = make_idx((0,), high=1)\n src = make_arg((0,))\n for tgt, acc in product(tgt, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n return list(gen_inputs())\n\ndef sample_inputs_take(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs: take S elements out of S * S\n src_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))\n idx = make_idx((S,), high=S * S)\n idx_nonctg = make_idx((S,), high=S * S, noncontiguous=True)\n idx_neg = -idx - 1\n idx_list = [idx, idx_nonctg, idx_neg]\n for src, idx in product(src_gen, idx_list):\n yield SampleInput(input=src, args=(idx,))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n src_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n for src, idx in product(src_gen, idx_gen):\n yield SampleInput(input=src, args=(idx,))\n\n # Empty cases\n src_sizes = [(0,), (), (1,), (3, 2)]\n src_gen = (make_arg(size) for size in src_sizes)\n idx = make_idx((0,), high=1)\n for src in src_gen:\n yield SampleInput(input=src, args=(idx,))\n\n return list(gen_inputs())\n\ndef sample_movedim_moveaxis(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=((0, 1, 2, 3), (3, 2, 1, 0))),\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=((0, -1, -2, -3), (-3, -2, -1, -0)))\n )\n\n\ndef sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):\n rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)\n shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))\n\n if requires_grad:\n # Tests for variant_consistency_jit, grad, gradgrad\n # are slower. Use smaller bags of `rep_dims` and `shapes`\n # in this case.\n rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]\n shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]\n\n tensors = [make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) for shape in shapes]\n\n samples = []\n for rep_dim, tensor in product(rep_dims, tensors):\n for t in (tensor, tensor.T):\n if op_info.name == 'repeat' and len(rep_dim) >= t.dim():\n # `torch.repeat` errors for `len(rep_dims) < t.dim()`,\n # so we filter such combinations.\n samples.append(SampleInput(t, args=(rep_dim,),))\n elif op_info.name == 'tile':\n samples.append(SampleInput(t, args=(rep_dim,),))\n\n return samples\n\ndef sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_axes = [\n ((3, 4, 5), 0),\n ((3, 4, 5), 1),\n ((3, 4, 5), 3),\n ((3, 4, 5), -1),\n ((3, 4, 5), -3),\n ((), 0)\n ]\n\n samples = []\n for shape, axis in shapes_and_axes:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(tensor, args=(axis,),))\n\n return samples\n\n# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet\n# Creates matrices with a positive nonzero determinant\ndef sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):\n def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):\n u, s, vh = torch.linalg.svd(A, full_matrices=False)\n s.clamp_(min=min_singular_value)\n A = (u * s.unsqueeze(-2)) @ vh\n det = A.det()\n if sign is not None:\n if A.dim() == 2:\n if (det < 0) ^ (sign < 0):\n A[0, :].neg_()\n else:\n cond = ((det < 0) ^ (sign < 0)).nonzero()\n if cond.size(0) > 0:\n for i in range(cond.size(0)):\n A[list(cond[i])][0, :].neg_()\n return A\n\n samples = []\n\n # cases constructed using make_tensor()\n tensor_shapes = (\n (S, S),\n (1, 1),\n (3, 3, S, S),\n (3, 3, 1, 1)\n )\n\n for shape in tensor_shapes:\n t = make_tensor(shape, device=device, dtype=dtype)\n d = make_nonzero_det(t).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n # cases constructed using:\n # 1) make_symmetric_matrices\n # 2) make_symmetric_pd_matrices\n # 3) make_fullrank_matrices_with_distinct_singular_values\n symmetric_shapes = (\n (S, S),\n (3, S, S),\n )\n\n\n def _helper(constructor, *shape, **kwargs):\n t = constructor(*shape, device=device, dtype=dtype)\n d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n for shape in symmetric_shapes:\n _helper(make_symmetric_matrices, *shape)\n _helper(make_symmetric_pd_matrices, *shape)\n _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)\n\n return tuple(samples)\n\ndef np_unary_ufunc_integer_promotion_wrapper(fn):\n # Wrapper that passes PyTorch's default scalar\n # type as an argument to the wrapped NumPy\n # unary ufunc when given an integer input.\n # This mimicks PyTorch's integer->floating point\n # type promotion.\n #\n # This is necessary when NumPy promotes\n # integer types to double, since PyTorch promotes\n # integer types to the default scalar type.\n\n # Helper to determine if promotion is needed\n def is_integral(dtype):\n return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]\n\n # NOTE: Promotion in PyTorch is from integer types to the default dtype\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n\n @wraps(fn)\n def wrapped_fn(x):\n if is_integral(x.dtype):\n return fn(x, dtype=np_dtype)\n return fn(x)\n\n return wrapped_fn\n\n\n# Metadata class for Fast Fourier Transforms in torch.fft.\nclass SpectralFuncInfo(OpInfo):\n \"\"\"Operator information for torch.fft transforms. \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # Reference implementation (probably in np.fft namespace)\n dtypes=floating_and_complex_types(),\n ndimensional: bool, # Whether dim argument can be a tuple\n decorators=None,\n **kwargs):\n decorators = list(decorators) if decorators is not None else []\n decorators += [\n skipCPUIfNoMkl,\n skipCUDAIfRocm,\n # gradgrad is quite slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ]\n\n super().__init__(name=name,\n dtypes=dtypes,\n decorators=decorators,\n **kwargs)\n self.ref = ref if ref is not None else _getattr_qual(np, name)\n self.ndimensional = ndimensional\n\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n nd_tensor = make_tensor((S, S + 1, S + 2), device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n tensor = make_tensor((31,), device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n\n if self.ndimensional:\n return [\n SampleInput(nd_tensor, kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(s=(8,))),\n SampleInput(tensor),\n\n *(SampleInput(nd_tensor, kwargs=dict(dim=dim))\n for dim in [-1, -2, -3, (0, -1)]),\n ]\n else:\n return [\n SampleInput(nd_tensor, kwargs=dict(n=10, dim=1, norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(n=7)),\n SampleInput(tensor),\n\n *(SampleInput(nd_tensor, kwargs=dict(dim=dim))\n for dim in [-1, -2, -3]),\n ]\n\n\nclass ShapeFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for Shape manipulating operations like tile and roll\"\"\"\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCPU=None,\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n sample_inputs_func=None,\n **kwargs):\n super(ShapeFuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n\ndef sample_inputs_foreach(self, device, dtype, N):\n tensors = [make_tensor((N, N), device, dtype) for _ in range(N)]\n return tensors\n\n\ndef get_foreach_method_names(name):\n # get torch inplace reference function\n method_name = \"_foreach_\" + name\n method_name_inplace = \"_foreach_\" + name + \"_\"\n\n method = getattr(torch, method_name, None)\n method_inplace = getattr(torch, method_name_inplace, None)\n\n ref = getattr(torch.Tensor, name, None)\n\n return method, method_inplace, ref\n\nclass ForeachUnaryFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for foreach unary functions\"\"\"\n def __init__(self,\n name,\n dtypes=floating_and_complex_types(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n dtypesIfROCM=None,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_foreach,\n **kwargs):\n super(ForeachUnaryFuncInfo, self).__init__(\"_foreach_\" + name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n safe_casts_outputs=safe_casts_outputs,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n\n foreach_method, foreach_method_inplace, torch_ref_method = get_foreach_method_names(name)\n self.method_variant = foreach_method\n self.inplace_variant = foreach_method_inplace\n self.ref = torch_ref_method\n\n\ndef sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):\n # Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n inputs = (\n torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix\n torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices\n random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix\n random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices\n )\n test_cases = (torch.linalg.cholesky(a) for a in inputs)\n out = []\n for a in test_cases:\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n out.append(SampleInput(a, kwargs=dict(upper=True)))\n return out\n\ndef sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n out = []\n for batch in ((), (3,), (3, 3)):\n shape = batch + (3, 3)\n # NOTE: inputs are not marked with `requires_grad` since\n # linalg_lstsq is not differentiable\n a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)\n b = make_tensor(shape, device, dtype, low=None, high=None)\n out.append(SampleInput(a, args=(b,)))\n return out\n\ndef sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.householder_product (torch.orgqr).\n The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.\n Empty, square, rectangular, batched square and batched rectangular input is generated.\n \"\"\"\n # Each column of the matrix is getting multiplied many times leading to very large values for\n # the Jacobian matrix entries and making the finite-difference result of grad check less accurate.\n # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n )\n\n return samples\n\ndef sample_inputs_ormqr(op_info, device, dtype, requires_grad):\n # create a helper function wrapping `make_tensor`\n make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def gen_inputs():\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n tf = [True, False]\n for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):\n reflectors = make_input((*batch, m, n))\n tau = make_input((*batch, min(m, n)))\n other_matrix_shape = (m, n) if left else (n, m)\n other = make_input((*batch, *other_matrix_shape))\n kwargs = {\"left\": left, \"transpose\": transpose}\n yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)\n\n return tuple(gen_inputs())\n\ndef sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always positive-definite input for torch.linalg.cholesky using\n random_hermitian_pd_matrix.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n in product(batches, ns):\n a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_symeig(op_info, device, dtype, requires_grad=False):\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n\n for o in out:\n o.kwargs = {\"upper\": bool(np.random.choice([True, False])),\n \"eigenvectors\": True}\n # A gauge-invariant function\n o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))\n return out\n\n\ndef sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.eigh/eigvalsh with UPLO=\"U\" or \"L\" keyword argument.\n \"\"\"\n def out_fn(output):\n if isinstance(output, tuple):\n # eigh function\n return output[0], abs(output[1])\n else:\n # eigvalsh function\n return output\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.kwargs = {\"UPLO\": np.random.choice([\"L\", \"U\"])}\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):\n def out_fn(output):\n return output[1]\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=True keyword argument.\n \"\"\"\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)\n for o in out:\n o.kwargs = {\"hermitian\": True}\n return out\n\ndef sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):\n \"\"\"\n This function generates always solvable input for torch.linalg.solve\n Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.\n The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.\n The second input is generated as the product of 'batches', 'ns' and 'nrhs'.\n In total this function generates 18 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices.\n 'ns' gives 0x0 and 5x5 matrices.\n and 'nrhs' controls the number of vectors to solve for:\n () - using 1 as the number of vectors implicitly\n (1,) - same as () but explicit\n (3,) - solve for 3 vectors.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.\n torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow\n 1D tensors (vectors) as the right-hand-side.\n Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,\n 'vector_rhs_allowed' may be removed here as well.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, )]\n ns = [5, 0]\n if vector_rhs_allowed:\n nrhs = [(), (1,), (3,)]\n else:\n nrhs = [(1,), (3,)]\n out = []\n for n, batch, rhs in product(ns, batches, nrhs):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)\n b.requires_grad = requires_grad\n out.append(SampleInput(a, args=(b,)))\n return out\n\n\ndef sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always solvable input for legacy solve functions\n (the ones that are not in torch.linalg module).\n The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation\n should have b.ndim >= 2, vectors are not allowed.\n Also the arguments order is swapped.\n \"\"\"\n out = sample_inputs_linalg_solve(\n op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False\n )\n\n # Reverses tensor order\n for sample in out:\n sample.input, sample.args = sample.args[0], (sample.input,)\n\n return out\n\n\ndef sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):\n # not needed once OpInfo tests support Iterables\n def generate_samples():\n batch_shapes = ((), (3,), (3, 3))\n for batch_shape, get_infos in product(batch_shapes, (True, False)):\n shape = batch_shape + (S, S)\n input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)\n yield SampleInput(input, args=(True, get_infos))\n\n return list(generate_samples())\n\n\ndef sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((1, (0, 1),),\n (1, (1, 2),),\n (1, (1, -1),),\n ())\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):\n tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n tensor_1d = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n return [\n SampleInput(tensor_nd),\n SampleInput(tensor_nd, kwargs=dict(dim=1)),\n SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),\n\n SampleInput(tensor_nd, kwargs=dict(dim=(1,), correction=S // 2)),\n SampleInput(tensor_nd, kwargs=dict(dim=None, correction=0, keepdim=True)),\n ]\n\n\ndef _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):\n \"\"\"\n This function generates input for torch.svd with distinct singular values so that autograd is always stable.\n Matrices of different size:\n square matrix - S x S size\n tall marix - S x (S-2)\n wide matrix - (S-2) x S\n and batched variants of above are generated.\n Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd\n It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n # svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice\n # along different dimensions when needed (this is used by\n # test_cases2:wide_all and wide_all_batched below)\n if is_linalg_svd:\n def slice_V(v):\n return v[..., :(S - 2), :]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0]\n return u00 * v00_conj\n else:\n def slice_V(v):\n return v[..., :, :(S - 2)]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0].conj()\n return u00 * v00_conj\n\n test_cases1 = ( # some=True (default)\n # loss functions for complex-valued svd have to be \"gauge invariant\",\n # i.e. loss functions shouldn't change when sigh of the singular vectors change.\n # the simplest choice to satisfy this requirement is to apply 'abs'.\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: usv[1]), # 'check_grad_s'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: abs(usv[0])), # 'check_grad_u'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: abs(usv[2])), # 'check_grad_v'\n # this test is important as it checks the additional term that is non-zero only for complex-valued inputs\n # and when the loss function depends both on 'u' and 'v'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n uv_loss), # 'check_grad_uv'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'\n )\n test_cases2 = ( # some=False\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],\n lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'\n )\n\n out = []\n for a, out_fn in test_cases1:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': False}\n else:\n kwargs = {'some': True}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n for a, out_fn in test_cases2:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': True}\n else:\n kwargs = {'some': False}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n return out\n\n\ndef sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [((1, 2, 3, 4), (0, 2, 3, 1)),\n ((1, 2, 3, 4), (0, -2, -1, 1)),\n ((), ()),\n ((1, 2, 3, 4), (2, 1, 3, 0))]\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=(args,))\n\n return list(generator())\n\n\n# Based on erstwhile method_tests tests & some tensor_op_tests for pow\ndef sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n\n if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:\n test_cases = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),\n ((), 1e-3, 1e-3 + 1, 0, True, (), 0.1, 1.1, 0, False, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),\n )\n tests_require_resizing = (\n ((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, True),\n ((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, True),\n ((), 1e-3, 1e-3 + 1, 0, True, (1, S, 1), 0, 1, 0.1, requires_grad, True),\n )\n cases = test_cases + tests_require_resizing\n samples = list(SampleInput(make_tensor(shape_b, low=low_b, high=high_b,\n requires_grad=b_grad, device=device,\n dtype=dtype) + additive_b,\n args=(make_tensor(shape_e, low=low_e, high=high_e,\n requires_grad=e_grad, device=device,\n dtype=dtype) + additive_e,),\n broadcasts_input=broadcasts_input)\n for shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,\n high_e, additive_e, e_grad, broadcasts_input in cases)\n tensor_scalar_inputs = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),\n ((), 1e-3, 1e-3 + 1, 0, True, (3.14,))\n )\n more_samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,\n high=high, low=low,\n requires_grad=b_grad) + additive,\n args=exp)\n for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)\n samples = [*samples, *more_samples]\n elif dtype in [torch.complex64, torch.complex128]:\n args_tuple = (\n ((2, 2), 0, 5, requires_grad, (3.14,)),\n ((), 0, 1, True, (3.14,)),\n ((), 0, 1, True, (3.14j,))\n )\n samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,\n high=high, low=low,\n requires_grad=b_grad) + 1e-3 * (1 + 1j),\n args=arg)\n for shape, low, high, b_grad, arg in args_tuple)\n elif dtype == torch.bool:\n arg_tuple = (0, 1, 1., 2.3)\n samples = list(SampleInput(make_tensor((2, 2), device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(arg,))\n for arg in arg_tuple)\n dtypes_list = [torch.float64, torch.float32, torch.int64, torch.int32]\n more_samples = list(SampleInput(make_tensor((2, 2), device, dtype=torch.bool,\n requires_grad=requires_grad),\n args=(make_tensor((2, 2), device, dtype=dtype,\n requires_grad=requires_grad),))\n for dtype in dtypes_list)\n samples = [*samples, *more_samples]\n samples.append(SampleInput(make_tensor((2, 2, 2), device, dtype=torch.bool,\n requires_grad=requires_grad),\n args=(make_tensor((2, 1), device, dtype=torch.float64,\n requires_grad=requires_grad),)))\n else:\n exp_tuple = (1, 2, 3)\n samples = list(SampleInput(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),\n args=(arg,))\n for arg in exp_tuple)\n samples.append(SampleInput(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),)))\n return tuple(samples)\n\ndef sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)\n\ndef sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)\n\ndef sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):\n eigvecs = make_tensor((S, S), device=device, dtype=dtype,\n low=None, high=None)\n eigvals = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None)\n # we produce only diagonazible inputs which do not have\n # complex eigenvalues for real inputs, as there is no\n # backward implementation for real inputs with complex\n # eigenvalues yet.\n input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()\n input.requires_grad_(requires_grad)\n\n def process_output(eigpair):\n eigvals, eigvecs = eigpair\n if dtype.is_complex:\n # eig produces eigenvectors which are normalized to 1 norm.\n # Note that if v is an eigenvector, so is v * e^{i \\phi},\n # and |v| = |v * e^{i \\phi}| = 1.\n # This, however, makes the eigenvector backward computation process\n # rather unstable unless the objective function is gauge-invariant,\n # that is if f(z) == f(|z|), for example.\n # Hence for complex inputs we ignore the phases and return only\n # the absolute values.\n return eigvals, eigvecs.abs()\n else:\n return eigvals, eigvecs\n\n return [\n SampleInput(\n input,\n kwargs=dict(eigenvectors=True),\n output_process_fn_grad=process_output\n ),\n ]\n\n\ndef sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):\n x = make_tensor((3,), device, dtype, requires_grad=requires_grad)\n y = make_tensor((4,), device, dtype, requires_grad=requires_grad)\n A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)\n C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)\n D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)\n H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)\n\n inputs = []\n\n # Vector operations\n inputs.append(SampleInput([x], args=('i->',))) # sum\n inputs.append(SampleInput([x, y], args=('i,j->ij',))) # outer\n\n # Matrix operations\n inputs.append(SampleInput([A], args=(\"ij->i\",))) # col sum\n inputs.append(SampleInput([A, B], args=(\"ij,kj->ik\",))) # matmul\n inputs.append(SampleInput([A, E], args=(\"ij,Ab->ijAb\",))) # matrix outer product\n\n # Tensor operations\n inputs.append(SampleInput([C, D], args=(\"aij,ajk->aik\",))) # batch matmul\n inputs.append(SampleInput([D, E], args=(\"aij,jk->aik\",))) # tensor matrix contraction\n inputs.append(SampleInput([C, B], args=(\"ijk,ik->j\",))) # non contiguous\n\n # Test diagonals\n inputs.append(SampleInput([I], args=('iji->j',))) # non-contiguous trace\n\n # Test ellipsis\n inputs.append(SampleInput([H], args=(\"i...->...\",)))\n inputs.append(SampleInput([C, x], args=('...ik, ...j -> ij',)))\n\n return inputs\n\n\ndef sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.qr\n The input is generated as the itertools.product of 'batches' and 'ns'.\n \"\"\"\n batches = [(), (0,), (2, ), (1, 1)]\n ns = [5, 2, 0]\n out = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n # TODO: CUDA path doesn't work with batched or empty inputs\n if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):\n continue\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n\n dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())\n\n samples = [SampleInput(tensor, kwargs={'dims': dim}) for tensor, dim in product(tensors, dims)]\n\n return samples\n\ndef sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\n# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!\ndef sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):\n x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n\n def detach(tensor):\n return tensor.clone().detach_().requires_grad_(requires_grad)\n\n return [\n SampleInput(detach(x), args=(lb, ub)),\n SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),\n SampleInput(detach(x), args=(detach(lb[:, :1]),)),\n ]\n\ndef sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):\n tensors = (\n make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n if dtype is torch.uint8:\n min_max_vals = ((2, 5), (3, 7))\n else:\n min_max_vals = ((0, 1), (-1, 1))\n output = [SampleInput(tensor, args=vals) for tensor, vals in product(tensors, min_max_vals)]\n output += [SampleInput(tensors[0], args=(0.5, None)), SampleInput(tensors[0], args=(None, 0.5))]\n empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)\n output += [SampleInput(empty_tensor, args=(0.0, 1.0)), ]\n return output\n\ndef sample_kwargs_clamp_scalar(device, dtype, input):\n if dtype is torch.uint8:\n min_val, max_val = (random.randint(1, 3), random.randint(4, 8))\n elif dtype.is_floating_point:\n min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]\n else:\n min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))\n return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}\n\ndef sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_zeros(dim_select):\n assert len(dim_select) == 2\n result = make_arg(3 * (S,))\n with torch.no_grad():\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n # will not be needed once OpInfo tests suport Iterables\n def sample_generator():\n for dim in range(3):\n yield SampleInput(make_arg((S, S, S)), args=(dim,))\n # Scalar tensors and empty tensor\n for size in [(), (1,), (0,)]:\n yield SampleInput(make_arg(size), args=(0,))\n\n yield SampleInput(prod_zeros([0, 1]), args=(1,))\n yield SampleInput(prod_zeros([0, 2]), args=(1,))\n yield SampleInput(prod_zeros([1, 2]), args=(1,))\n\n # test dtype kwarg\n yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})\n\n return list(sample_generator())\n\ndef sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]\n\ndef sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((), device, dtype, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor(*shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n cases = [\n # no broadcast\n ((S, S, S), (S, S, S), False),\n # broadcast rhs\n ((S, S, S), (S, S), False),\n\n # scalar\n ((S, S), 3.14, False),\n # scalar positive zero\n ((S, S), 0.0, False),\n # scalar negative zero\n ((S, S), -0.0, False),\n ]\n\n # broadcast lhs\n cases.append(((S, S), (S, S, S), True))\n # broadcast all\n cases.append(((S, 1, S), (M, S), True))\n\n def generator():\n for input_shape, arg_val, broadcasts_input in cases:\n if isinstance(arg_val, tuple):\n arg = _make_tensor(*arg_val)\n else:\n # arg_val is scalar\n arg = arg_val\n\n yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_prod(op_info, device, dtype, requires_grad):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_single_zero():\n result = make_arg(2 * (S,))\n with torch.no_grad():\n result[0, 1] = 0\n return result\n\n # will not be needed once OpInfo tests support Iterables\n def sample_generator():\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n yield SampleInput(sample.input) # only Tensor, ignore other inputs\n yield sample\n sample.kwargs['keepdim'] = True\n yield sample\n yield SampleInput(prod_single_zero())\n yield SampleInput(make_arg((3, 3, 3)), args=(1,))\n yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})\n\n # test zero scalar tensor\n zero = make_arg(())\n with torch.no_grad():\n zero.zero_()\n yield SampleInput(zero)\n yield SampleInput(zero, args=(0,))\n yield SampleInput(zero, args=(0,), kwargs={'keepdim': True})\n\n return list(sample_generator())\n\ndef sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):\n vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))\n\n tensors = (\n make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n args = ((), (2,), (-2,), (1,), (2,))\n\n samples = []\n for tensor, arg in product(tensors, args):\n samples.append(SampleInput(tensor, args=arg))\n\n return samples + [vec_sample]\n\ndef sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n\n # Note: Operator is very sensitive at points near the\n # start and end of domain and leads to NaN for float16\n # if domain_eps is 1e-5.\n domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2\n\n low = low + domain_eps\n high = high - domain_eps\n\n samples = (\n SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n )\n\n return samples\n\ndef sample_inputs_floor_divide(op_info, device, dtype, requires_grad, **kwargs):\n lhs = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n # Avoid integer divide by 0\n if not (dtype.is_floating_point or dtype.is_complex):\n rhs[rhs == 0] = 1\n\n return [\n SampleInput(lhs, args=(rhs,)),\n SampleInput(lhs, args=(rhs[0],)),\n SampleInput(lhs, args=(3.14,)),\n ]\n\n\ndef sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def samples_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),\n broadcasts_input=True)\n\n samples = tuple(samples_generator())\n return samples\n\n\ndef sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def sample_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))\n\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg(())),\n broadcasts_input=True)\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, 10),\n broadcasts_input=True)\n\n samples = tuple(sample_generator())\n return samples\n\ndef sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn(M, M, device=device) > 0,)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M,), device=device) > 0,)),\n\n SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n )\n\n return samples\n\ndef sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),\n )\n\n return samples\n\ndef sample_inputs_matmul(op_info, device, dtype, requires_grad):\n test_cases = (((L,), (L,)),\n ((S, M), (M,)),\n ((M,), (M, S)),\n ((S, M), (M, S)),\n ((S, S, M), (M,)),\n ((S, S, M), (M, S)),\n ((M,), (S, M, S)),\n ((S, M), (S, M, S)),\n ((S, S, M, M), (S, S, M, S)),\n ((S, S, M, M), (M,)),\n ((M,), (S, S, M, S)))\n sample_inputs = []\n for lhs_shape, rhs_shape in test_cases:\n lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(lhs, args=(rhs,)))\n return tuple(sample_inputs)\n\n\ndef sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\ndef sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape):\n return make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\n\ndef sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n yield SampleInput(make_arg(shape), args=(n,))\n\n return list(generator())\n\n\ndef sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n # Since the accepted lower bound for input\n # to mvlgamma depends on `p` argument,\n # the following function computes the lower bound\n # which we pass to `make_tensor`.\n def compute_min_val(p):\n return (p - 1.) / 2\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n min_val = compute_min_val(n)\n yield SampleInput(make_arg(shape, low=min_val), args=(n,))\n\n return list(generator())\n\n\n# Since `mvlgamma` has multiple entries,\n# there are multiple common skips for the additional\n# entries. Following function is a helper to that end.\ndef skips_mvlgamma(skip_redundant=False):\n skips = (\n # outside domain values are hard error for mvlgamma op.\n SkipInfo('TestUnaryUfuncs', 'test_float_domains'),\n )\n if not skip_redundant:\n # Redundant tests\n skips = skips + ( # type: ignore[assignment]\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n )\n return skips\n\n\n# To test reference numerics against multiple values of argument `p`,\n# we make multiple OpInfo entries with each entry corresponding to different value of p.\n# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.\n# Class `MvlGammaInfo` already contains the basic information related to the operator,\n# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which\n# differ between the entries.\nclass MvlGammaInfo(UnaryUfuncInfo):\n def __init__(self, variant_test_name, domain, skips, sample_kwargs):\n super(MvlGammaInfo, self).__init__(\n 'mvlgamma',\n ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,\n variant_test_name=variant_test_name,\n domain=domain,\n decorators=(precisionOverride({torch.float16: 5e-2}),),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=sample_inputs_mvlgamma,\n supports_out=False,\n skips=skips,\n sample_kwargs=sample_kwargs)\n\n\ndef sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):\n low, _ = op_info.domain\n\n if requires_grad:\n low = 0 + op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device, dtype,\n low=low,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=low,\n requires_grad=requires_grad)))\n\ndef sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):\n filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]\n return (SampleInput(input, args=(arg,), kwargs=dict(alpha=alpha))\n for (input, arg), alpha in filtered_product)\n\n int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j\n\n if variant == 'tensor':\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),\n )\n\n if dtype.is_complex:\n alphas = [int_alpha, float_alpha, complex_alpha]\n elif dtype.is_floating_point:\n alphas = [int_alpha, float_alpha]\n else:\n alphas = [int_alpha]\n\n args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),\n (_make_tensor_helper((S, S)), _make_tensor_helper((S,))),\n (_make_tensor_helper(()), _make_tensor_helper(())))\n samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]\n elif variant == 'scalar':\n # Scalar Other\n samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),\n SampleInput(_make_tensor_helper(()), args=(0.5,)),\n SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),\n SampleInput(_make_tensor_helper(()), args=(1.5j,)),\n SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),\n SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))\n\n scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),\n (_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),\n (_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]\n\n alphas = [int_alpha, float_alpha, complex_alpha]\n\n def filter_fn(arg_alpha):\n arg, alpha = arg_alpha\n if isinstance(alpha, complex):\n if dtype.is_complex or isinstance(arg[1], complex):\n return True\n else:\n # complex alpha is valid only if either `self` or `other` is complex\n return False\n\n # Non-Complex Alpha\n return True\n\n # Samples with alpha (scalar version) covers the following cases\n # self | other | alpha\n # -----------------------------------------\n # real | real | real (int and float)\n # real | complex | real and complex\n # complex | real | real and complex\n # complex | complex | real and complex\n #\n # It does not cover\n # real | real | complex\n # x = torch.randn(2, requires_grad=True, dtype=torch.float64)\n # torch.rsub(x, 1, alpha=1. + 1.6j)\n # RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)\n\n samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]\n else:\n raise Exception(\"Invalid variant!\")\n\n return samples\n\ndef sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),\n SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),\n SampleInput(_make_tensor_helper(()), args=(0,)),\n ]\n\n if supports_dtype_kwargs:\n # NOTE: if `dtype` is not same as input, then inplace variants fail with\n # `provided dtype must match the dtype of self tensor in cumsum`\n samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))\n\n return samples\n\n\ndef sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((), (0, 1, 1)),\n ((S, S, S, S), (0, 3, 1)),\n ((S, S, S, S), (1, 3, 1)),\n ((S, S, S, S), (2, 3, 1)),\n ((S, S, S, S), (3, 3, 1)),\n ((S, S, S, S), (0, 3, 2)),\n ((S, S, S, S), (1, 3, 2)),\n ((S, S, S, S), (2, 3, 2)),\n ((S, S, S, S), (3, 3, 2)),\n ((S, S, S, S), (0, 4, 1)),\n ((S, S, S, S), (1, 4, 1)),\n ((S, S, S, S), (2, 4, 1)),\n ((S, S, S, S), (3, 4, 1)),\n ((M,), (0, 3, 1)),\n ((M,), (0, 3, 2)),\n ((M,), (0, 3, 3)),\n ((1000,), (0, 3, 11)),\n ((1000,), (0, 2, 27)),\n ((10, 10), (0, 1, 2)),\n ((10, 10), (1, 2, 3)),\n ((10, 10), (1, 2, 2)),\n ((S, S, S), (2, 3, 2)),\n )\n\n sample_inputs = []\n for shape, arguments in test_cases:\n sample_inputs += [SampleInput(make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=arguments)]\n return sample_inputs\n\n\ndef sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n cases = (\n ((S, S, S), (S, S, S), False),\n ((), (), False),\n ((S, S, S), (S,), False),\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S), True),\n )\n\n def generator():\n for x_shape, y_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),\n broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_msort(op_info, device, dtype, requires_grad):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n\n sample = SampleInput(make_tensor((S, M, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n return [largesample, sample]\n\ndef sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n samples = (\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),\n # broadcast rhs with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),\n # broadcast rhs and weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),\n # broadcast_lhs\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # scalar broadcast_lhs\n SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # tensor broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),\n broadcasts_input=True),\n )\n\n if dtype.is_complex:\n samples = samples + ( # type: ignore[assignment]\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),\n )\n\n return samples\n\ndef sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):\n cases = (\n ((2, 2, 2), (2, 2, 2), (2)),\n ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),\n )\n samples = []\n for first_shape, second_shape, dims in cases:\n samples.append(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n kwargs=dict(dims=dims,)))\n return tuple(samples)\n\ndef sample_inputs_kron(op_info, device, dtype, requires_grad):\n test_cases = (\n ((S, S), (M, L)),\n )\n\n sample_inputs = []\n for input_shape, other_shape in test_cases:\n input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample = SampleInput(input, args=(other,))\n sample_inputs.append(sample)\n return tuple(sample_inputs)\n\ndef sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n )\n ),\n SampleInput(\n make_tensor((), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n )\n ),\n )\n\n# Tests for scatter when passing the reduce argument are missing\n# Reference: https://github.com/pytorch/pytorch/issues/56464\ndef sample_inputs_scatter(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n (_tensor(()), (0, zero.clone().detach(), 2.5)),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\ndef sample_inputs_scatter_add(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\n\ndef sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):\n samples = (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),)\n\n return samples\n\n\ndef sample_inputs_view(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S * S, S)),\n ((S * S, S), (S, S, S)),\n ((S,), (S,)),\n ((), ()),\n ((), (1,)))\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\n return list(generator())\n\n\ndef sample_inputs_view_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for case in cases:\n shape, shape_other = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n return list(generator())\n\n\ndef sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1, 2)),\n ((S, S, S), (-1, 2)),\n ((S, S, S), (-1, -1)),\n ((S, S, S), (1, -1)),\n ((S,), (0, 2))\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n scalar: Union[int, float, complex] = 3\n\n if dtype.is_floating_point:\n scalar = 3.14\n elif dtype.is_complex:\n scalar = 3.14j\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),\n SampleInput(_make_tensor_helper(()), args=(scalar,)),\n ]\n\n return samples\n\n\ndef sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\n return list(generator())\n\n\ndef sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for case in cases:\n shape, shape_other = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n return list(generator())\n\n\ndef sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n ((S, S, S), (S, -1)))\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\n return list(generator())\n\n\nforeach_unary_op_db: List[OpInfo] = [\n ForeachUnaryFuncInfo('exp'),\n ForeachUnaryFuncInfo('acos'),\n ForeachUnaryFuncInfo('asin'),\n ForeachUnaryFuncInfo('atan'),\n ForeachUnaryFuncInfo('cos'),\n ForeachUnaryFuncInfo('cosh'),\n ForeachUnaryFuncInfo('log'),\n ForeachUnaryFuncInfo('log10'),\n ForeachUnaryFuncInfo('log2'),\n ForeachUnaryFuncInfo('tan'),\n ForeachUnaryFuncInfo('tanh'),\n ForeachUnaryFuncInfo('sin'),\n ForeachUnaryFuncInfo('sinh'),\n\n ForeachUnaryFuncInfo('neg',\n dtypes=all_types_and_complex(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex(),\n sample_inputs_func=sample_inputs_foreach,\n safe_casts_outputs=False),\n\n ForeachUnaryFuncInfo('sqrt',\n dtypes=floating_types(),\n dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('ceil',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('erf',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('erfc',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('expm1',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('floor',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('log1p',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('round',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('frac',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('reciprocal',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('sigmoid',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('trunc',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('abs',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n safe_casts_outputs=False)\n]\n\ndef reference_sign(x):\n if x.dtype == np.bool_:\n # `np.sign` doesn't support `bool`.\n # >>> np.sign(True)\n # ufunc 'sign' did not contain a loop\n # with signature matching types dtype('bool') -> dtype('bool')\n return np.sign(x, dtype=np.uint8).astype(np.bool_)\n return np.sign(x)\n\n\ndef reference_sgn(x):\n # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.\n # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.\n # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)\n if x.dtype not in [np.complex64, np.complex128]:\n return reference_sign(x)\n\n out = (x / np.abs(x))\n if out.ndim == 0:\n # Handle x == 0 case\n if (x == 0):\n # Can't assign to np.complex object\n # So make a new one.\n return np.array(complex(0, 0), dtype=x.dtype)\n return out\n\n # Handle x == 0 case\n mask = (x == 0)\n out[mask] = complex(0, 0)\n return out\n\n\ndef reference_sigmoid(x):\n # 'scipy.special.expit' not supported for the input types\n if x.dtype in [np.complex64, np.complex128]:\n return (1 / (1 + np.exp(-x)))\n return scipy.special.expit(x)\n\n\ndef reference_lgamma(x):\n # scipy.special.gammaln returns `-inf` when input is `-inf`.\n # While Pytorch, C and C++, all return `inf` when input is `-inf`.\n # Reference:\n # https://en.cppreference.com/w/cpp/numeric/math/lgamma\n # https://en.cppreference.com/w/c/numeric/math/lgamma\n\n # To handle the above discrepancy,\n # we replace -inf with inf so values\n # that were originally -inf map to inf as expected\n if x.dtype.kind == 'f':\n x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)\n\n out = scipy.special.gammaln(x)\n\n if x.dtype == np.float16:\n # `scipy.special.gammaln` returns output of float32 when input is float16,\n # while `torch.lgamma` preserves `float16`. But due to smaller range of float16,\n # Pytorch version outputs `inf` while SciPy returns finite values.\n out = out.astype(np.float16)\n\n return out\n\ndef reference_polygamma(x, n):\n # WEIRD `scipy.special.polygamma` behavior\n # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype\n # dtype('float64')\n # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype\n # dtype('float32')\n #\n # Thus we cast output to the default torch dtype.\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n return scipy.special.polygamma(n, x).astype(np_dtype)\n\n\ndef reference_mvlgamma(x, d):\n if x.dtype == np.float16:\n return scipy.special.multigammaln(x, d).astype(np.float16)\n\n return scipy.special.multigammaln(x, d)\n\n\ndef gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for functions that take Hermitian matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the Hermitian property of the input.\n \"\"\"\n return op(input + input.conj().transpose(-2, -1), *args, **kwargs)\n\n\ndef gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):\n \"\"\"Gradcheck wrpper for functions that take lower or upper triangular matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the triangular property of the input.\n \"\"\"\n return op(input.triu() if upper else input.tril(), upper)\n\n\n# Operator database (sorted alphabetically)\nop_db: List[OpInfo] = [\n UnaryUfuncInfo('abs',\n aliases=('absolute', ),\n ref=np.abs,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat]),\n # Reference: https://github.com/pytorch/pytorch/issues/49224\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int8], active_if=TEST_WITH_ASAN),\n # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)\n # We can break the logic of the loop over all possible types but it is OK.\n # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n supports_inplace_autograd=False,\n assert_autodiffed=True),\n # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)\n UnaryUfuncInfo('acos',\n aliases=('arccos', ),\n ref=np.arccos,\n domain=(-1, 1),\n handles_complex_extremals=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-1,\n torch.complex64: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestGradients', 'test_fn_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_method_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_inplace_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n # NOTE: the derivative for inplace acosh is not implemented\n UnaryUfuncInfo('acosh',\n aliases=('arccosh', ),\n ref=np.arccosh,\n domain=(1, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n # Reference: https://github.com/pytorch/pytorch/issues/50692\n SkipInfo('TestGradients', 'test_fn_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_method_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n OpInfo('add',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2),\n supports_inplace_autograd=False),\n OpInfo('mul',\n aliases=('multiply',),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_binary_pwise),\n OpInfo('sub',\n aliases=('subtract',),\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2),\n supports_inplace_autograd=False),\n OpInfo('addmm',\n # This addmm OpInfo is for when alpha and beta are not both equal to 1.\n # alpha=beta=1 is tested in the following opinfo, because that special case will\n # trigger addmm being decomposed by a jit pass.\n dtypes=floating_and_complex_types_and(torch.float16),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_addmm),\n OpInfo('addmm',\n # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.\n variant_test_name='decomposed',\n dtypes=floating_and_complex_types_and(torch.float16),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],\n sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),\n OpInfo('addmv',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n supports_inplace_autograd=False,\n skips=(\n # issue may fix: https://github.com/pytorch/pytorch/issues/55589\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,)),\n # Reference: https://github.com/pytorch/pytorch/issues/55589\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_addmv),\n OpInfo('addbmm',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n skips=(\n # addbmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n # https://github.com/pytorch/pytorch/issues/55907\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_addbmm),\n OpInfo('baddbmm',\n dtypes=floating_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # baddbmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_baddbmm),\n OpInfo('dot',\n dtypes=all_types_and_complex_and(torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_dot_vdot),\n OpInfo('vdot',\n dtypes=all_types_and_complex_and(torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_dot_vdot),\n OpInfo('bmm',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n skips=(\n # bmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_bmm),\n OpInfo('mv',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # bmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.float16,)),\n # mv calls into addmv which doesn't fully support float16\n # RuntimeError: \"addmv_impl_cpu\" not implemented for 'Half'\n SkipInfo('TestOpInfo', 'test_supported_dtypes', dtypes=(torch.float16,)),),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_mv),\n OpInfo('addr',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n supports_inplace_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n SkipInfo('TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),),\n sample_inputs_func=sample_inputs_addr,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('addcmul',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('addcdiv',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('amax',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_amax_amin,),\n OpInfo('amin',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_amax_amin),\n OpInfo('argmax',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argmax_argmin,),\n OpInfo('argmin',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argmax_argmin,),\n UnaryUfuncInfo('asin',\n aliases=('arcsin', ),\n ref=np.arcsin,\n domain=(-1, 1),\n supports_sparse=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS)\n )),\n # NOTE: derivative for inplace asinh is not implemented\n UnaryUfuncInfo('asinh',\n aliases=('arcsinh', ),\n ref=np.arcsinh,\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('atan',\n aliases=('arctan', ),\n ref=np.arctan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n OpInfo('atan2',\n dtypes=all_types_and(torch.bool),\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n sample_inputs_func=sample_inputs_atan2,\n ),\n UnaryUfuncInfo('atanh',\n aliases=('arctanh', ),\n ref=np.arctanh,\n domain=(-1, 1),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat],\n active_if=IS_WINDOWS),\n )),\n OpInfo('broadcast_to',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_broadcast_to),\n UnaryUfuncInfo('bitwise_not',\n ref=np.bitwise_not,\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False),\n OpInfo('cdist',\n dtypes=floating_types(),\n supports_out=False,\n supports_gradgrad=False,\n sample_inputs_func=sample_inputs_cdist),\n UnaryUfuncInfo('ceil',\n ref=np.ceil,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n assert_autodiffed=True),\n OpInfo('cholesky',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('cholesky_inverse',\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_types(),\n # TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs\n # with complex dtype.\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky_inverse,\n gradcheck_wrapper=gradcheck_wrapper_triangular_input,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # cholesky_inverse does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('chunk',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_chunk,\n supports_out=False),\n OpInfo('symeig',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_symeig,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n # NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors\n OpInfo('clamp',\n aliases=('clip',),\n dtypes=all_types_and(torch.half, torch.bfloat16),\n dtypesIfCPU=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_clamp),\n UnaryUfuncInfo('clamp',\n variant_test_name='scalar',\n aliases=('clip', ),\n decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),\n ref=np.clip,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54841\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n ),\n sample_kwargs=sample_kwargs_clamp_scalar,\n sample_inputs_func=sample_inputs_clamp_scalar),\n UnaryUfuncInfo('positive',\n ref=np.positive,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_out=False,\n ),\n UnaryUfuncInfo('conj',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n skips=(\n # File \"test_unary_ufuncs.py\", line 289, in test_reference_numerics\n # if not torch.can_cast(numpy_to_torch_dtype_dict[expected.dtype.type], dtype):\n # KeyError: <class 'numpy.intc'>\n # Following error in Windows CI\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.int],\n active_if=IS_WINDOWS),\n )),\n OpInfo('view_as_real',\n dtypes=complex_types(),\n sample_inputs_func=sample_inputs_view_as_real,\n ),\n OpInfo('view_as_complex',\n dtypes=floating_types_and(torch.half),\n supports_out=False,\n skips=(\n # \"sum_cpu/sum_cuda\" not implemented for 'ComplexHalf'\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.half,)),\n ),\n sample_inputs_func=sample_inputs_view_as_complex),\n OpInfo('complex',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_complex,\n ),\n OpInfo('copysign',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_copysign,\n supports_inplace_autograd=False,\n ),\n UnaryUfuncInfo('cos',\n ref=np.cos,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n assert_autodiffed=True,\n handles_large_floats=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n UnaryUfuncInfo('cosh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n OpInfo('cumsum',\n dtypesIfCPU=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n skips=(\n # \"cumsum_out_{cpu,cuda}\" not implemented for 'Bool'\n SkipInfo('TestOpInfo', 'test_supported_dtypes',\n dtypes=(torch.bool,)),\n # cumsum does not handle correctly out= dtypes\n SkipInfo('TestCommon', 'test_out'),\n ),\n sample_inputs_func=sample_inputs_cumulative_ops),\n OpInfo('cumprod',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16),\n skips=(\n # \"cumprod_out_{cpu, cuda}\" not implemented for 'Bool'\n SkipInfo('TestOpInfo', 'test_supported_dtypes',\n dtypes=(torch.bool,)),\n # cumprod does not handle correctly out= dtypes\n SkipInfo('TestCommon', 'test_out',\n dtypes=[torch.float32]),\n ),\n # gradgradcheck fails in fast_mode=True: #56275\n sample_inputs_func=sample_inputs_cumprod,\n gradcheck_fast_mode=False),\n OpInfo('cummax',\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('cummin',\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n UnaryUfuncInfo('deg2rad',\n ref=np.radians,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n OpInfo('diff',\n op=torch.diff,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_diff),\n OpInfo('div',\n variant_test_name='no_rounding_mode',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_div,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='true_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode=None),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='trunc_rounding',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode='trunc'),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='floor_rounding',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode='floor'),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n UnaryUfuncInfo('exp',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/issues/48010\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n ),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n OpInfo('expand',\n op=lambda self, shape: self.expand(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand,\n skips=(\n # Because expand does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n supports_out=False),\n OpInfo('expand_as',\n op=lambda self, other: self.expand_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand_as,\n skips=(\n # Because expand_as does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n supports_out=False),\n OpInfo('diag',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n sample_inputs_func=sample_inputs_diag),\n OpInfo('eq',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('fmax',\n op=torch.fmax,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('fmin',\n op=torch.fmin,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n UnaryUfuncInfo('frac',\n ref=lambda x: np.modf(x)[0],\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_types_and(torch.float16),\n assert_autodiffed=True,\n # Reference for disabling extremals\n # https://github.com/pytorch/pytorch/issues/51948\n handles_extremals=False),\n SpectralFuncInfo('fft.fft',\n aten_name='fft_fft',\n ref=np.fft.fft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.fftn',\n aten_name='fft_fftn',\n ref=np.fft.fftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],),\n SpectralFuncInfo('fft.hfft',\n aten_name='fft_hfft',\n ref=np.fft.hfft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfft',\n aten_name='fft_rfft',\n ref=np.fft.rfft,\n ndimensional=False,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfftn',\n aten_name='fft_rfftn',\n ref=np.fft.rfftn,\n ndimensional=True,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.ifft',\n aten_name='fft_ifft',\n ref=np.fft.ifft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.ifftn',\n aten_name='fft_ifftn',\n ref=np.fft.ifftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfft',\n aten_name='fft_ihfft',\n ref=np.fft.ihfft,\n ndimensional=False,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False),\n SpectralFuncInfo('fft.irfft',\n aten_name='fft_irfft',\n ref=np.fft.irfft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.irfftn',\n aten_name='fft_irfftn',\n ref=np.fft.irfftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n UnaryUfuncInfo('floor',\n ref=np.floor,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n assert_autodiffed=True),\n OpInfo('flip',\n op=torch.flip,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_flip,\n supports_out=False),\n OpInfo('fliplr',\n op=torch.fliplr,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_out=False),\n OpInfo('flipud',\n op=torch.flipud,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_out=False),\n UnaryUfuncInfo('i0',\n ref=np.i0,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 5e-1}),),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_autograd=False),\n UnaryUfuncInfo('special.i0e',\n aten_name='special_i0e',\n ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 3e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_autograd=False,\n safe_casts_outputs=True),\n OpInfo('floor_divide',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_floor_divide,\n decorators=[_wrap_warn_once(\"floor_divide is deprecated, and will be removed\")],\n skips=(\n # `test_duplicate_method_tests` doesn't raise any warning, as it doesn't actually\n # call the operator.\n SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n supports_autograd=False,\n ),\n UnaryUfuncInfo('frexp',\n op=torch.frexp,\n ref=np.frexp,\n dtypes=floating_types_and(torch.half),\n # skip testing torch.frexp as it is not supported by ROCm platform yet\n decorators=[skipCUDAIfRocm],\n supports_out=False,\n skips=(\n # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,\n # while theses tests currently requires output to a single tensor.\n SkipInfo('TestUnaryUfuncs', 'test_batch_vs_slicing'),\n SkipInfo('TestUnaryUfuncs', 'test_contig_vs_every_other'),\n SkipInfo('TestUnaryUfuncs', 'test_contig_vs_transposed'),\n SkipInfo('TestUnaryUfuncs', 'test_non_contig_expand'),\n SkipInfo('TestUnaryUfuncs', 'test_variant_consistency'),\n\n # skips test_reference_numerics due to error in Windows CI.\n # The np.frexp returns exponent as np.intc dtype on Windows platform,\n # and np.intc does not have the correspond torch dtype\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=IS_WINDOWS),\n )),\n OpInfo('ge',\n aliases=('greater_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('geqrf',\n dtypes=floating_and_complex_types(),\n dtypesIfCPU=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_geqrf,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('gt',\n aliases=('greater',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n UnaryUfuncInfo('imag',\n ref=np.imag,\n dtypes=complex_types(),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Skip since real and imag don't have out variants.\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('gradient',\n dtypes=floating_and_complex_types_and(torch.int8, torch.int16,\n torch.int32, torch.int64,\n torch.bfloat16, torch.half),\n supports_out=False,\n skips=(\n # following tests give a runtime error with undefined value tensor\n # see discussion : https://github.com/pytorch/pytorch/issues/56660\n SkipInfo('TestCommon', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)),\n ),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_gradient),\n OpInfo('inverse',\n op=torch.inverse,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('le',\n aliases=('less_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n aliases=('det', ),\n dtypes=floating_and_complex_types(),\n # det doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypes=floating_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n supports_inplace_autograd=False,\n skips=(\n # The following tests fail only on ROCm. This is probably\n # related to the fact that the current linalg.det backward is\n # unstable if the matrix has repeated singular values, see\n # https://github.com/pytorch/pytorch/issues/53364\n SkipInfo('TestGradients', 'test_fn_grad', device_type='cuda',\n dtypes=(torch.float64,), active_if=TEST_WITH_ROCM),\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda',\n dtypes=(torch.float64,), active_if=TEST_WITH_ROCM),\n SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda',\n dtypes=(torch.float64, torch.float32), active_if=TEST_WITH_ROCM),\n )),\n OpInfo('linalg.cholesky',\n aten_name='linalg_cholesky',\n dtypes=floating_and_complex_types(),\n # TODO: RuntimeError: While computing batched gradients,\n # got: vmap: Calling Tensor.as_strided is not supported\n # unless the batch dims being vmapped over are at the front of the tensor (in memory layout).\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.cholesky_ex',\n aten_name='linalg_cholesky_ex',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.eig',\n aten_name='linalg_eig',\n op=torch.linalg.eig,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.eigvals',\n aten_name='linalg_eigvals',\n op=torch.linalg.eigvals,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.eigh',\n aten_name='linalg_eigh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.eigvalsh',\n aten_name='linalg_eigvalsh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('linalg.householder_product',\n aten_name='linalg_householder_product',\n op=torch.linalg.householder_product,\n aliases=('orgqr', ),\n dtypes=floating_and_complex_types(),\n # TODO: backward uses in-place operations that vmap doesn't like\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_householder_product,\n decorators=[skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'), ]),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n op=torch.linalg.lstsq,\n dtypes=floating_and_complex_types(),\n supports_out=True,\n sample_inputs_func=sample_inputs_linalg_lstsq,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # skip because `linalg_lstsq` is not differentiable\n SkipInfo('TestGradients', 'test_fn_grad'),\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n )),\n OpInfo('linalg.matrix_power',\n aliases=('matrix_power',),\n aten_name='linalg_matrix_power',\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],\n sample_inputs_func=sample_inputs_linalg_matrix_power,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('linalg.multi_dot',\n # Need this lambda because gradcheck does not work with TensorList inputs\n aten_name='linalg_multi_dot',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n supports_inplace_autograd=False,\n # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_multi_dot,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('linalg.norm',\n op=torch.linalg.norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_norm,\n aten_name='linalg_norm',\n skips=(\n # linalg.norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('linalg.matrix_norm',\n aten_name='linalg_matrix_norm',\n dtypes=floating_and_complex_types(),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_matrix_norm,\n skips=(\n # linalg.matrix_norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('linalg.qr',\n aten_name='linalg_qr',\n op=torch.linalg.qr,\n dtypes=floating_and_complex_types(),\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_qr,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.slogdet',\n aten_name='linalg_slogdet',\n op=torch.linalg.slogdet,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_slogdet,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.vector_norm',\n op=torch.linalg.vector_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_vector_norm,\n aten_name='linalg_vector_norm',\n skips=(\n # linalg.vector_norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n UnaryUfuncInfo('log',\n ref=np.log,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('log10',\n ref=np.log10,\n domain=(0, float('inf')),\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('log1p',\n ref=np.log1p,\n domain=(-1, float('inf')),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n safe_casts_outputs=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('log2',\n ref=np.log2,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat, torch.cdouble]),\n )),\n OpInfo('logaddexp',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n OpInfo('logaddexp2',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n UnaryUfuncInfo('logical_not',\n ref=np.logical_not,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_autograd=False,\n skips=(\n # The function variant always returns BoolTensor\n # while the inplace variant preserves the input dtype.\n # >>> t = torch.randn(3)\n # >>> torch.logical_not(t)\n # tensor([False, False, False])\n # >>> torch.logical_not(t).dtype\n # torch.bool\n # >>> t.logical_not_().dtype\n # torch.float32\n SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n SkipInfo('TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n )),\n OpInfo('lt',\n aliases=('less',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('lu',\n op=torch.lu,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n check_batched_gradgrad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_lu,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n # we skip jit tests because lu_backward is impelemented as autograd.Function,\n # which does not support autograd with scripting\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n # Skip operator schema test because this is a functional and not an operator\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('masked_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_fill,\n supports_out=False),\n OpInfo('masked_scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_scatter,\n supports_out=False),\n OpInfo('masked_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_select),\n OpInfo('matrix_exp',\n dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16),\n sample_inputs_func=sample_inputs_matrix_exp,\n supports_out=False),\n OpInfo('matmul',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_matmul,\n skips=(\n # matmul does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # https://github.com/pytorch/pytorch/issues/55754\n SkipInfo('TestGradients', 'test_fn_grad',\n device_type='cpu', dtypes=(torch.complex128,)),\n # https://github.com/pytorch/pytorch/issues/55755\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n device_type='cpu', dtypes=(torch.float16,)),\n # Backward for BFloat16 isn't supported because of the error\n # \"RuntimeError: CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when\n # calling cublasGemmStridedBatchedExFix.\"\n SkipInfo('TestOpInfo', 'test_supported_backward',\n device_type='cuda', dtypes=(torch.bfloat16,)),)),\n OpInfo('max',\n op=torch.max,\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n assert_autodiffed=True,),\n OpInfo('max',\n op=torch.max,\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n skips=(\n # max does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('max',\n op=torch.max,\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('min',\n op=torch.min,\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n assert_autodiffed=True,),\n OpInfo('min',\n op=torch.min,\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n skips=(\n # min does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('min',\n op=torch.min,\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('sum',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True)),\n OpInfo('nansum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCPU=all_types_and(torch.float16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True)),\n # TODO(@heitorschueroff) Add test for dtype kwarg\n OpInfo('mean',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True),\n # Need to skip out test because one of the overload for mean does not support it\n # TODO(@heitorschueroff) fix this when implementing ReductionInfo\n skips=(SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('quantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile),\n OpInfo('nanquantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile),\n OpInfo('maximum',\n op=torch.maximum,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('minimum',\n op=torch.minimum,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('nn.functional.hardswish',\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_hardswish,\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=False,\n supports_out=False,\n autodiff_fusible_nodes=[\"aten::hardswish\"]),\n OpInfo('topk',\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_topk,\n skips=(\n # Topk is not raising a warning when the out is resized\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('mm',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_mm,\n skips=(\n # mm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('mode',\n op=torch.mode,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_mode,),\n MvlGammaInfo(variant_test_name='mvlgamma_p_1',\n domain=(1e-4, float('inf')),\n skips=skips_mvlgamma(),\n sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_3',\n domain=(1.1, float('inf')),\n skips=skips_mvlgamma(skip_redundant=True) + (\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_5',\n domain=(2.1, float('inf')),\n skips=skips_mvlgamma(skip_redundant=True) + (\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),\n OpInfo('ne',\n aliases=('not_equal',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n UnaryUfuncInfo('neg',\n aliases=('negative', ),\n ref=np.negative,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,),\n OpInfo('dist',\n op=torch.dist,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # \"pow\" not implemented for 'BFloat16' or 'half'\n backward_dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_dist,\n skips=(\n # dist does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('outer',\n op=torch.outer,\n aliases=('ger', ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_outer,),\n OpInfo('ormqr',\n op=torch.ormqr,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_ormqr,\n decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),\n OpInfo('permute',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_permute),\n OpInfo('pow',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled\n # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently\n # unsupported on CPU.\n backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_pow,\n supports_inplace_autograd=False,\n assert_autodiffed=True),\n OpInfo('float_power',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_pow),\n OpInfo('prod',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n # \"cumprod_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16),\n skips=(\n # prod does not support the (Tensor, *, out) overload\n SkipInfo('TestCommon', 'test_out',\n dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_prod,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('qr',\n op=torch.qr,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_qr,\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n UnaryUfuncInfo('rad2deg',\n ref=np.degrees,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n UnaryUfuncInfo('real',\n ref=np.real,\n dtypes=complex_types(),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Skip since real and imag don't have out variants.\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('roll',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_roll),\n OpInfo('rot90',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_rot90),\n UnaryUfuncInfo('round',\n ref=np.round,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n assert_autodiffed=True,),\n UnaryUfuncInfo('sin',\n ref=np.sin,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),\n UnaryUfuncInfo('sinc',\n ref=np_sinc_with_fp16_as_fp32,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2,\n torch.float16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/49133\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat]),\n )),\n UnaryUfuncInfo('sinh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.float16: 1e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n )),\n UnaryUfuncInfo('sign',\n ref=reference_sign,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n )),\n UnaryUfuncInfo('sgn',\n ref=reference_sgn,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n # Reference: https://github.com/pytorch/pytorch/issues/53958\n # Test fails in comparison on Nan as the `equal_nan` is True for\n # comparing the CPU tensors.\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.complex64, torch.complex128]),\n # Reference: https://github.com/pytorch/pytorch/issues/48486\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.complex64])\n )),\n OpInfo('__radd__',\n op=torch.Tensor.__radd__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::add'],),\n OpInfo('__rdiv__',\n op=torch.Tensor.__rdiv__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),\n OpInfo('__rmul__',\n op=torch.Tensor.__rmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul'],),\n OpInfo('__rpow__',\n op=torch.Tensor.__rpow__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54774\n # \"log2\" \"_vml_cpu\" not implemented for Half\n SkipInfo('TestOpInfo', 'test_supported_backward', device_type='cpu',\n dtypes=(torch.float16,)),\n\n SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::pow'],),\n OpInfo('__rsub__',\n op=torch.Tensor.__rsub__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::rsub'],),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_tensor',\n supports_out=False,\n supports_inplace_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_scalar',\n supports_out=False,\n supports_inplace_autograd=False,\n sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),\n assert_autodiffed=True,),\n OpInfo('select',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select,\n supports_out=False),\n UnaryUfuncInfo('signbit',\n ref=np.signbit,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_autograd=False,),\n OpInfo('solve',\n op=torch.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('std',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # std doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypesIfCPU=floating_types_and(torch.half),\n backward_dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n # TODO: std does support out in some signatures\n supports_out=False,\n assert_autodiffed=True,\n ),\n UnaryUfuncInfo('tan',\n ref=np.tan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.float64],\n active_if=TEST_WITH_ROCM),\n )),\n UnaryUfuncInfo('tanh',\n ref=np.tanh,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"tanh_backward_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n )),\n OpInfo('tensor_split',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n sample_inputs_func=sample_inputs_tensor_split,),\n OpInfo('hsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_hsplit,),\n OpInfo('vsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_vsplit,),\n OpInfo('dsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_dsplit,),\n OpInfo('triangular_solve',\n op=torch.triangular_solve,\n dtypes=floating_and_complex_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n # CUDA gradchecks are slow and triangular solve backward is a composite operation\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n UnaryUfuncInfo('trunc',\n aliases=('fix', ),\n ref=np.trunc,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16),\n assert_autodiffed=True),\n UnaryUfuncInfo('exp2',\n aliases=('special.exp2', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),\n dtypes=all_types_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True),\n UnaryUfuncInfo('expm1',\n aliases=('special.expm1', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('nan_to_num',\n ref=np.nan_to_num,\n dtypes=all_types_and(torch.half, torch.bool)),\n UnaryUfuncInfo('reciprocal',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/45690\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('rsqrt',\n ref=lambda x: np.reciprocal(np.sqrt(x)),\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.half: 5e-2}),),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('sqrt',\n ref=np.sqrt,\n supports_sparse=True,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.bfloat16: 7e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/47358\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_MACOS),\n # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16])),\n safe_casts_outputs=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('square',\n ref=np.square,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/52549\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble]),\n # >>> t = torch.tensor(complex(-0.01, float(\"inf\")))\n # >>> np.square(t.numpy())\n # (-inf-infj)\n # >>> t.square()\n # tensor(-inf-infj)\n # >>> t.cuda().square()\n # tensor(inf+nanj, device='cuda:0')\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),),\n OpInfo('lerp',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n dtypesIfROCM=floating_and_complex_types_and(torch.half),\n sample_inputs_func=sample_inputs_lerp,\n assert_autodiffed=True),\n OpInfo('linalg.inv',\n aten_name='linalg_inv',\n op=torch.linalg.inv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # linalg_inv does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n UnaryUfuncInfo('angle',\n ref=np.angle,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n supports_complex_to_float=True),\n OpInfo('linalg.solve',\n aten_name='linalg_solve',\n op=torch.linalg.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n op=torch.linalg.pinv,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('eig',\n op=torch.eig,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_eig,\n decorators=[\n skipCUDAIfNoMagma,\n skipCPUIfNoLapack,\n skipCUDAIfRocm\n ],),\n OpInfo('einsum',\n # we need this lambda because SampleInput expects tensor input as the first argument\n # TODO(@heitorschueroff) update SampleInput to handle such cases\n op=lambda tensors, equation: torch.einsum(equation, tensors),\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_einsum,\n skips=(\n # test does not work with passing lambda for op\n # there's a test `test_einsum` in `test_jit.py` to handle this case\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n # The following dtypes are only supported for some inputs, ideally we should have\n # checked this in the einsum code but to keep BC we'll just skip the tests for now.\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n dtypes=[torch.bool]),\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n device_type='cuda', dtypes=integral_types_and(torch.bfloat16)))),\n OpInfo('svd',\n op=torch.svd,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ],\n skips=(\n # cuda gradchecks are very slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.svd',\n op=torch.linalg.svd,\n aten_name='linalg_svd',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ],\n skips=(\n # cuda gradchecks are very slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.svdvals',\n op=torch.linalg.svdvals,\n aten_name='linalg_svdvals',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svdvals,\n check_batched_gradgrad=False,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCPUIfNoLapack]),\n OpInfo('polar',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_polar),\n # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.\n # To test reference numerics against multiple values of argument `n`,\n # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).\n # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Probably related to the way the function is\n # scripted for JIT tests (or maybe not).\n # RuntimeError:\n # Arguments for call are not valid.\n # The following variants are available:\n # aten::polygamma(int n, Tensor self) -> (Tensor):\n # Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.\n # aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):\n # Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.\n # The original call is:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.polygamma(i0, 1)\n # ~~~~~~~~~~~~~~~ <--- HERE\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_1',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_2',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_3',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_4',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4})),\n OpInfo('ravel',\n dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_ravel,\n ),\n OpInfo('view',\n op=lambda x, shape: x.view(shape),\n dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # Because view does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_inputs_func=sample_inputs_view,\n ),\n OpInfo('view_as',\n op=lambda x, other: x.view_as(other),\n dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # Because view_as does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_inputs_func=sample_inputs_view_as,\n ),\n OpInfo('pinverse',\n op=torch.pinverse,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('gather',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_gather,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n ),\n OpInfo('index_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n supports_out=False,\n sample_inputs_func=sample_inputs_index_fill),\n OpInfo('index_copy',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_index_copy,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_index_select,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_index_add,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('__getitem__',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=False,\n op=torch.Tensor.__getitem__,\n sample_inputs_func=sample_inputs_getitem,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit'),)),\n OpInfo('index_put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=True,\n sample_inputs_func=sample_inputs_index_put,\n skips=(\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n )),\n OpInfo('sort',\n dtypes=all_types_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and(torch.float16),\n dtypesIfROCM=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_sort,\n skips=(\n # sort does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_put),\n OpInfo('take',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n check_batched_grad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_take),\n OpInfo('scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter,\n supports_out=False),\n OpInfo('scatter_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter_add,\n supports_out=False),\n OpInfo('stack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_stack,\n assert_autodiffed=True,\n skips=(\n # stack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('hstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # hstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('hypot',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=sample_inputs_hypot,\n ),\n OpInfo('vstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # vstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('dstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # dstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('unfold',\n op=lambda x, *args: x.unfold(*args),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False,\n skips=(\n # torch.unfold does not exist so we get a RuntimeError.\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n # Skip operator schema test because this is a functional and not an operator\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n sample_inputs_func=sample_inputs_unfold),\n OpInfo('msort',\n dtypes=all_types_and(torch.float16),\n check_batched_gradgrad=False,\n skips=(\n # msort does not correctly warn when resizing out= inputs.\n SkipInfo('TestCommon', 'test_out',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n # msort does not raise expected Runtime Error.\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes', dtypes=[torch.bool]),\n ),\n sample_inputs_func=sample_inputs_msort),\n OpInfo('movedim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_movedim_moveaxis),\n OpInfo('moveaxis',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_movedim_moveaxis),\n ShapeFuncInfo('repeat',\n op=lambda x, dims: x.repeat(dims),\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # torch.repeat does not exist so we get a RuntimeError.\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n ),\n sample_inputs_func=sample_repeat_tile),\n OpInfo('take_along_dim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_take_along_dim,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n ShapeFuncInfo('tile',\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('unsqueeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n sample_inputs_func=sample_unsqueeze),\n OpInfo('var',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # var doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypesIfCPU=floating_types_and(torch.half),\n backward_dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n # TODO: revisit, some var signatures do support out (see std, too)\n supports_out=False,\n assert_autodiffed=True,\n ),\n OpInfo('xlogy',\n dtypes=all_types_and(torch.bool),\n dtypesIfCPU=all_types_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=True,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_xlogy),\n OpInfo('special.xlog1py',\n aten_name='special_xlog1py',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestOpInfo', 'test_supported_backward',\n device_type='cpu', dtypes=[torch.float16]),\n ),\n sample_inputs_func=sample_inputs_xlog1py),\n OpInfo('logsumexp',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_logsumexp),\n OpInfo('trace',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n supports_inplace_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_trace),\n OpInfo('transpose',\n aliases=('swapdims', 'swapaxes'),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_transpose_swapdims),\n OpInfo('kron',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_kron),\n OpInfo('inner',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_inner),\n OpInfo('tensordot',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_tensordot,\n skips=(\n # Currently failing due to an INTERNAL_ASSERT_FAILED error.\n # Reference: https://github.com/pytorch/pytorch/issues/56314\n SkipInfo(\"TestCommon\", \"test_variant_consistency_jit\", dtypes=[torch.float32]),\n # Skip operator schema test because this is a functional and not an operator.\n # Reference: https://github.com/pytorch/pytorch/issues/54574\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )\n ),\n OpInfo('logcumsumexp',\n dtypes=floating_types_and(),\n dtypesIfCUDA=floating_types_and(torch.half),\n skips=(\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),\n # logcumsumexp_backward not implemented for 'Half\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.float16,), device_type='cuda'),\n ),\n sample_inputs_func=sample_inputs_logcumsumexp),\n UnaryUfuncInfo('sigmoid',\n aliases=('special.expit', ),\n ref=reference_sigmoid if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.complex64: 1e-1,\n torch.bfloat16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/56012\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.complex64]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.complex64]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # sigmoid doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/48552\n backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('digamma',\n ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.entr',\n ref=scipy.special.entr if TEST_SCIPY else _NOTHING,\n aten_name='special_entr',\n decorators=(precisionOverride({torch.float16: 1e-1,\n torch.bfloat16: 1e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16, torch.float16]),\n ),\n supports_inplace_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_entr),\n UnaryUfuncInfo('erf',\n ref=scipy.special.erf if TEST_SCIPY else _NOTHING,\n aliases=('special.erf', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfc',\n ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,\n aliases=('special.erfc', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfinv',\n ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,\n aliases=('special.erfinv', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2,\n torch.float32: 1e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n domain=(-1, 1),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n )),\n UnaryUfuncInfo('lgamma',\n ref=reference_lgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.gammaln', ),\n decorators=(precisionOverride({torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n # \"digamma\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and(torch.bool),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n ),\n safe_casts_outputs=True),\n OpInfo(\n 'logdet',\n supports_out=False,\n sample_inputs_func=sample_inputs_logdet,\n decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),\n UnaryUfuncInfo('logit',\n ref=scipy.special.logit if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aliases=('special.logit', ),\n decorators=(precisionOverride({torch.bfloat16: 5e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_logit,\n safe_casts_outputs=True),\n]\n\n# Common operator groupings\nunary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]\nspectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]\nsparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse is True]\nshape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]\n\ndef index_variable(shape, max_indices, device=torch.device('cpu')):\n if not isinstance(shape, tuple):\n shape = (shape,)\n index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()\n return index\n\n\ndef index_perm_variable(shape, max_indices):\n if not isinstance(shape, tuple):\n shape = (shape,)\n\n index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)\n return index\n\n\ndef gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):\n assert len(shape) == 2\n assert index_dim < 2\n batch_dim = 1 - index_dim\n index = torch.zeros(*shape, dtype=torch.long, device=device)\n for i in range(shape[index_dim]):\n index.select(index_dim, i).copy_(\n torch.randperm(max_indices, device=device)[:shape[batch_dim]])\n if duplicate:\n index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))\n return index\n\n\ndef bernoulli_scalar():\n return torch.tensor(0, dtype=torch.bool).bernoulli_()\n\n\ndef mask_not_all_zeros(shape):\n assert len(shape) > 0\n while True:\n result = torch.randn(shape).gt(0)\n if result.sum() > 0:\n return result\n\n\ndef uniform_scalar(offset=0, requires_grad=False):\n v = torch.rand(()) + offset\n v.requires_grad = requires_grad\n return v\n\n\ndef normal_scalar_clamp(amin, amax, requires_grad=False):\n v = torch.randn(()).clamp(amin, amax)\n v.requires_grad = requires_grad\n return v\n\n\ndef prod_zeros(dim_size, dim_select):\n assert len(dim_select) == 2\n result = torch.randn(dim_size, dim_size, dim_size)\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n\nnon_differentiable = collections.namedtuple('non_differentiable', ['tensor'])\n\n\nclass dont_convert(tuple):\n pass\n\n\nclass NoArgsClass(object):\n def __iter__(self):\n return self\n\n def __next__(self):\n raise StopIteration()\n next = __next__ # Python 2 compatibility\n\n def __len__(self):\n return 0\n\nNO_ARGS = NoArgsClass()\n\ndef ident(x):\n return x\n\n# Do NOT add to this list. Method tests are being DEPRECATED and replaced by OpInfos.\n# See https://github.com/pytorch/pytorch/wiki/Writing-tests-in-PyTorch-1.8\n#\n# (\n# method name,\n# input size/constructing fn,\n# args (tuple represents shape of a tensor arg),\n# test variant name (will be used at test name suffix), // optional\n# (should_autodiff_node[bool], nonfusible_nodes, fusible_nodes) for autodiff, // optional\n# indices for possible dim arg, // optional\n# fn mapping output to part that should be gradcheck'ed, // optional\n# kwargs // optional\n# )\n# Note: some functions have separate schema for (Tensor other) and (Scalar other),\n# and it's possible that we only support AD for Scalar version but not Tensor\n# version, and vice versa.\n# When writing tests, only scalar(float/int) input triggers the Scalar schema.\n# uniform_scalar produces a scalar **Tensor** which won't match Scalar input.\ndef method_tests():\n set_rng_seed(SEED)\n return [\n ('div', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),\n ('div', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),\n ('div', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),\n ('div', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),\n ('div', (), (uniform_scalar(0.1),), 'scalar', (True,)),\n ('div', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),\n ('div', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),\n ('div', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),\n ('div', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),\n ('true_divide', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),\n ('true_divide', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),\n ('true_divide', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),\n ('true_divide', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),\n ('true_divide', (), (uniform_scalar(0.1),), 'scalar', (True,)),\n ('true_divide', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),\n ('true_divide', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),\n ('true_divide', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),\n ('true_divide', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),\n ('div', (S, S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex', (True,)),\n ('div', (S, S, S), (torch.rand(S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_rhs', (True,)),\n ('div', (S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_lhs', (True,)),\n ('div', (S, 1, S), (torch.rand(M, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_all', (True,)),\n ('div', (), (uniform_scalar(0.1j),), 'complex_scalar', (True,)),\n ('div', (S, S, S), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_rhs', (True,)),\n ('div', (), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_lhs', (True,)),\n ('div', torch.rand(S, S, S, dtype=torch.cdouble) + 1e-1, (3.14j,), 'complex_constant', (True,)),\n ('div', uniform_scalar(1e-1j, requires_grad=True), (3.14j,), 'complex_scalar_constant', (True,)),\n ('t', (1, 2), NO_ARGS, '', (False,)),\n ('reshape', (S, S, S), (S * S, S), '', (False,)),\n ('reshape', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)),\n ('reshape', (S,), (S,), '1d', (False,)),\n ('reshape', (), (dont_convert(()),), 'scalar_to_scalar', (False,)),\n ('reshape', (), (1,), 'scalar_to_1d', (False,)),\n ('reshape_as', (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),\n ('reshape_as', (), (non_differentiable(torch.tensor(42.)),), 'scalar'),\n ('reshape_as', (), (non_differentiable(torch.rand(1, 1)),), 'scalar_to_dims'),\n ('fmod', (S, S, S), (1.5,), '', (True,)),\n ('fmod', (), (1.5,), 'scalar', (True,)),\n ('fmod', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),\n ('fmod', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),\n ('fmod', (S, S, S), (non_differentiable(torch.rand(S) + 1.5),), 'tensor_broadcast_rhs'),\n ('fmod', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),\n ('fmod', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),\n ('fmod', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),\n ('fmod', (S, S, S), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor_broadcast_rhs'),\n ('remainder', (S, S, S), (1.5,), '', (True,)),\n ('remainder', (), (1.5,), 'scalar', (True,)),\n ('remainder', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),\n ('remainder', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),\n ('remainder', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),\n ('remainder', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),\n ('remainder', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),\n ('kthvalue', (S, S, S), (2,)),\n ('kthvalue', (S, S, S), (2, 1,), 'dim', (), [1]),\n ('kthvalue', (S, S, S), (2, 1, True,), 'keepdim_dim', (), [1]),\n ('kthvalue', (S,), (2, 0,), 'dim_1d', (), [1]),\n ('kthvalue', (S,), (2, 0, True,), 'keepdim_dim_1d', (), [1]),\n ('kthvalue', (), (1,), 'scalar', (), ()),\n ('kthvalue', (), (1, 0,), 'scalar_dim', (), [1]),\n ('kthvalue', (), (1, 0, True), 'scalar_keepdim_dim', (), [1]),\n ('median', (S, S, S), NO_ARGS),\n ('median', (S, S, S), (1,), 'dim', (), [0]),\n ('median', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),\n ('median', (), NO_ARGS, 'scalar'),\n ('median', (), (0,), 'scalar_dim', (), [0]),\n ('median', (), (0, True,), 'scalar_keepdim_dim', (), [0]),\n ('nanmedian', (S, S, S), NO_ARGS),\n ('nanmedian', (S, S, S), (1,), 'dim', (), [0]),\n ('nanmedian', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),\n ('nanmedian', (), NO_ARGS, 'scalar'),\n ('nanmedian', (), (0,), 'scalar_dim', (), [0]),\n ('nanmedian', (), (0, True,), 'scalar_keepdim_dim', (), [0]),\n ('var_mean', (S, S, S), NO_ARGS, ''),\n ('var_mean', (S, S, S), (1,), 'dim', [0]),\n ('var_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),\n ('var_mean', (S,), (0,), 'dim_1d', [0]),\n ('var_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),\n ('std_mean', (S, S, S), NO_ARGS, ''),\n ('std_mean', (S, S, S), (1,), 'dim', [0]),\n ('std_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),\n ('std_mean', (S,), (0,), 'dim_1d', [0]),\n ('std_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),\n ('renorm', (S, S, S), (2, 1, 0.5), 'dim', (), [1]),\n ('renorm', (S, S, S), (1, 2, 3), 'norm_1'),\n ('renorm', (S, S, S), (inf, 2, 0.5), 'norm_inf'),\n ('log_softmax', (S, S, S), (1, torch.float64,), 'kwarg_dtype_would_break_jit_loader', (True,)),\n ('zero_', (S, S, S), NO_ARGS),\n ('zero_', (), NO_ARGS, 'scalar'),\n ('norm', (S, S), (), 'default'),\n ('norm', (S, S), (2,), '2'),\n ('norm', (S, S), (0,), '0'),\n ('norm', (S, S), (0.5,), '0_5'),\n ('norm', (S, S), (1,), '1'),\n ('norm', (S, S), (3,), '3'),\n ('norm', (S, S), (inf,), 'inf'),\n ('norm', (S, S), (-inf,), '-inf'),\n ('norm', (S, S), ('fro',), 'fro_default'),\n ('norm', (S, S), ('fro', [0, 1],), 'fro'),\n ('norm', (S, S), ('nuc',), 'nuc', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),\n ('norm', (S, S, S), ('nuc', [1, 2]), 'nuc_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),\n ('norm', (S, S), (-1,), 'neg_1'),\n ('norm', (S, S), (-2,), 'neg_2'),\n ('norm', (S, S), (-0.5,), 'neg_0_5'),\n ('norm', (S, S), (-1.5,), 'neg_1_5'),\n ('norm', (S, S), (-2, 1,), 'neg_2_2_dim', (), [1]),\n ('norm', (S, S), (-1, 1,), 'neg_1_2_dim', (), [1]),\n ('norm', (S, S), (0, 1,), '0_2_dim', (), [1]),\n ('norm', (S, S), (1, 1,), '1_2_dim', (), [1]),\n ('norm', (S, S), (2, 1,), '2_2_dim', (), [1]),\n ('norm', (S, S), (3, 1,), '3_2_dim', (), [1]),\n ('norm', (S, S), (inf, 1,), 'inf_2_dim'),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5,), '1_5_default'),\n ('norm', (S, S, S), (2, 1), '2_dim', (), [1]),\n ('norm', (S, S, S), (3, 1), '3_dim', (), [1]),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1), '1_5_dim', (), [1]),\n ('norm', (S, S, S), (2, 1, True), 'keepdim_2_dim', (), [1]),\n ('norm', (S, S, S), (3, 1, True), 'keepdim_3_dim', (), [1]),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1, True), 'keepdim_1_5_dim', (), [1]),\n ('norm', (), (2, 0), '2_dim_scalar', (), [1]),\n ('norm', (), (3, 0), '3_dim_scalar', (), [1]),\n ('norm', (), (2, 0, True), 'keepdim_2_dim_scalar', (), [1]),\n ('norm', (), (3, 0, True), 'keepdim_3_dim_scalar', (), [1]),\n ('clone', (S, M, S), NO_ARGS),\n ('clone', (), NO_ARGS, 'scalar'),\n ('contiguous', (S, S), NO_ARGS, '', (True,)),\n ('contiguous', torch.randn(S, S).transpose(0, 1), NO_ARGS, 'not_contiguous', (True,)),\n ('diag_embed', (S, S), NO_ARGS),\n ('diagonal', (M, M), NO_ARGS, '2d'),\n ('diagonal', (3, 5), NO_ARGS, '2d_wide'),\n ('diagonal', (3, 5), (2,), '2d_wide_pos'),\n ('diagonal', (3, 5), (-2,), '2d_wide_neg'),\n ('diagonal', (5, 3), NO_ARGS, '2d_tall'),\n ('diagonal', (5, 3), (2,), '2d_tall_pos'),\n ('diagonal', (5, 3), (-2,), '2d_tall_neg'),\n ('diagonal', (M, M), (1,), '2d_1'),\n ('diagonal', (M, M), (2,), '2d_2'),\n ('diagonal', (M, M, M), (1, 1, 2), '3d_1'),\n ('diagonal', (M, M, M), (2, 0, 1), '3d_2'),\n ('diagonal', (M, M, M), (-2, 0, 1), '3d_3'),\n ('tril', (M, M), NO_ARGS),\n ('tril', (M, M), (2,), 'idx'),\n ('tril', (S, M, M), NO_ARGS, 'batched'),\n ('tril', (S, M, M), (2,), 'batched_idx'),\n ('tril', (3, 3, S, S), NO_ARGS, 'more_batched'),\n ('triu', (M, M), NO_ARGS),\n ('triu', (M, M), (2,), 'idx'),\n ('triu', (S, M, M), NO_ARGS, 'batched'),\n ('triu', (S, M, M), (2,), 'batched_idx'),\n ('triu', (3, 3, S, S), NO_ARGS, 'more_batched'),\n ('cross', (S, 3), ((S, 3),)),\n ('cross', (S, 3, S), ((S, 3, S), 1), 'dim'),\n ('fill_', (S, S, S), (1,), 'number'),\n ('fill_', (), (1,), 'number_scalar'),\n ('fill_', (S, S, S), ((),), 'variable'),\n ('narrow', (S, S, S), (1, 2, 2), 'dim', (), [0]),\n ('narrow', (S, S, S), (1, 0, 0), 'empty_dim', (), [0]),\n ('squeeze', (S, 1, S, 1), NO_ARGS, '', (True,)),\n ('squeeze', (1, 1, 1, 1), NO_ARGS, 'input_sizes_are_ones', (True,)),\n ('squeeze', (S, 1, S, 1), (1,), '1_dim', (True,), [0]),\n ('squeeze', (S, 1, S, 1), (2,), 'not_1_dim', (True,), [0]),\n ('squeeze', (), (0,), 'scalar', (True,), [0]),\n ('split', (S, S, S), (2,), '', (True,)),\n ('split', (S, S, S), (S, 1), 'dim', (True,), [1]),\n ('split', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'size_list',\n (True, 'aten::split_with_sizes')),\n ('split', (S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2), 'size_list_dim',\n (True, 'aten::split_with_sizes'), [1]),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), '', (True,)),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3), 0],), 'size_0', (True, )),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'dim', (True, ), [1]),\n ('tensor_split', (S, S, S), (3,), 'sections', (False,)),\n ('tensor_split', (S, S, S), (3, 1), 'sections_dim', (False,), [1]),\n ('tensor_split', (S, S, S), ([2, 4],), 'indices', (False,)),\n ('tensor_split', (S, S, S), ([2, 4], 1), 'indices_dim', (False,), [1]),\n ('resize_', (S, S, S), (torch.Size([S * S, S])), 'fewer_dims'),\n ('resize_', (), (dont_convert(()),), 'scalar'),\n ('resize_', (), (torch.Size([1, 1, 1])), 'scalar_to_dims'),\n ('resize_as_', (), (non_differentiable(torch.tensor(5.)),), 'scalar'),\n ('resize_as_', (), (non_differentiable(torch.randn((1, 1, 1))),), 'scalar_to_dims'),\n ('resize_as_', (S, S, S), (non_differentiable(torch.randn(S * S, S)),)),\n ('where', (M, M), (mask_not_all_zeros((M, M)), (M, M)), '', (True,)),\n ('where', (M, 1, M), (mask_not_all_zeros((M, M)), (M, M, 1)), 'broadcast_all', (True,)),\n ('where', (), (bernoulli_scalar(), ()), 'scalar', (True,)),\n ('where', (M, 1, M), (bernoulli_scalar(), (M, M, 1)), 'scalar_broadcast_mask', (True,)),\n ('where', (), (mask_not_all_zeros((M, M)), ()), 'scalar_broadcast_non_mask', (True,)),\n ('to_sparse', (S, S), (), '', (), (), [], lambda x: x.to_dense())\n ]\n\ndef create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):\n if not isinstance(call_args, tuple):\n call_args = (call_args,)\n\n def map_arg(arg):\n def maybe_non_contig(tensor):\n return tensor if not non_contiguous else make_non_contiguous(tensor)\n\n if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):\n return arg\n elif isinstance(arg, tuple) and len(arg) == 0:\n var = torch.randn((), dtype=dtype, device=device)\n var.requires_grad = requires_grad\n return var\n elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):\n return Variable(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device)), requires_grad=requires_grad)\n # double check casting\n elif isinstance(arg, non_differentiable):\n if isinstance(arg.tensor, torch.Tensor):\n if arg.tensor.dtype == torch.float:\n return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))\n if arg.tensor.dtype == torch.cfloat:\n return maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device))\n return maybe_non_contig(arg.tensor.to(device=device))\n return maybe_non_contig(arg.tensor.to(device=device))\n elif isinstance(arg, torch.Tensor):\n if arg.dtype == torch.float:\n arg = arg.double()\n if arg.dtype == torch.cfloat:\n arg = arg.to(torch.cdouble)\n if arg.is_complex() != dtype.is_complex:\n raise RuntimeError(\"User provided tensor is real for a test that runs with complex dtype, \",\n \"which is not supported for now\")\n # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards\n v = maybe_non_contig(arg).detach().to(device=device).clone()\n v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())\n return v\n elif callable(arg):\n return map_arg(arg(dtype=dtype, device=device))\n else:\n return arg\n args_out = tuple(map_arg(arg) for arg in call_args)\n kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}\n return args_out, kwargs_out\n\n\ndef _compare_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n if row == 0 or col == 0:\n # have to handle this separately as tril and triu does not take\n # empty matrix as input\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n else:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n\ndef _compare_large_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.tril_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.triu_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n# (\n# row\n# col\n# offset (optional)\n# dtype (optional)\n# )\ntri_tests_args = [\n (1, 1),\n (3, 3),\n (3, 3, 1),\n (3, 3, 2),\n (3, 3, 200),\n (3, 3, -1),\n (3, 3, -2),\n (3, 3, -200),\n (0, 3, 0),\n (0, 3, 1),\n (0, 3, -1),\n (3, 0, 0),\n (3, 0, 1),\n (3, 0, -1),\n (0, 0, 0),\n (0, 0, 1),\n (0, 0, -1),\n (3, 6, 0),\n (3, 6, 1),\n (3, 6, 3),\n (3, 6, 9),\n (3, 6, -1),\n (3, 6, -3),\n (3, 6, -9),\n (6, 3, 0),\n (6, 3, 1),\n (6, 3, 3),\n (6, 3, 9),\n (6, 3, -1),\n (6, 3, -3),\n (6, 3, -9),\n (258, 253, 1, torch.float32),\n (257, 258, 1, torch.float64),\n (258, 258, 1, torch.short),\n (3, 513, 1, torch.long),\n (513, 3, 1, torch.int),\n (513, 0, 1, torch.double),\n (1024, 1024),\n (1024, 1024, 500, torch.float32),\n (1024, 1024, 1023),\n (1024, 1024, -500),\n (1023, 1025),\n (1025, 1023, 1022),\n (1024, 1024, -500),\n (3, 2028),\n (3, 2028, 1),\n (3, 2028, -1),\n (2028, 3),\n (2028, 1),\n (2028, 1, -1)\n]\n\ntri_large_tests_args: List[Tuple[int, ...]] = [\n # Large test cases below are deliberately commented out to speed up CI\n # tests and to avoid OOM error. When modifying implementations of\n # tril_indices and triu_indices, please enable these tests and make sure\n # they pass.\n #\n # (1, 268435455),\n # (5000, 5000),\n # (10000, 10000),\n # (268435455, 1),\n # (134217727, 2, 1),\n # (2, 134217727, 1),\n # (536870901, 1),\n # (1, 536870901),\n # (268435455, 2, 1),\n # (2, 268435455, 1)\n]\n\n\ndef run_additional_tri_tests(self, device):\n x = torch.ones(\n 3, 3, dtype=torch.long, device=device, layout=torch.strided)\n l = x.tril(0).nonzero().transpose(0, 1)\n u = x.triu(0).nonzero().transpose(0, 1)\n self.assertEqual(l, torch.tril_indices(3, 3, device=device))\n self.assertEqual(\n l, torch.tril_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertEqual(u, torch.triu_indices(3, 3, device=device))\n self.assertEqual(\n u, torch.triu_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.triu_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.tril_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n\ndef unpack_variables(args):\n if isinstance(args, tuple):\n return tuple(unpack_variables(elem) for elem in args)\n else:\n return args\n\n\nEXCLUDE_FUNCTIONAL = {\n 'addmm',\n 'addmm_',\n 'reshape',\n 'where' # argument order\n}\nEXCLUDE_GRADCHECK: Dict[str, Any] = {\n}\nEXCLUDE_GRADGRADCHECK: Dict[str, Any] = {\n}\nEXCLUDE_GRADGRADCHECK_BY_TEST_NAME = {\n # `other` expand_as(self, other) is not used in autograd.\n 'test_expand_as',\n 'test_cdist',\n}\n\n\ndef exclude_tensor_method(name, test_name):\n # there are no tensor equivalents for these (inplace or out)\n exclude_all_tensor_method_by_test_name = {\n 'test_slice',\n 'test_where',\n 'test_where_broadcast_all',\n 'test_where_scalar',\n 'test_where_scalar_broadcast_mask',\n 'test_where_scalar_broadcast_non_mask',\n 'test_var_mean_keepdim_dim_1d',\n 'test_var_mean_keepdim_dim',\n 'test_var_mean_dim_1d',\n 'test_var_mean_dim',\n 'test_var_mean',\n 'test_std_mean_keepdim_dim_1d',\n 'test_std_mean_keepdim_dim',\n 'test_std_mean_dim_1d',\n 'test_std_mean_dim',\n 'test_std_mean',\n }\n # there are no out-of-place tensor equivalents for these\n exclude_outplace_tensor_method = {\n 'index_fill',\n 'scatter',\n 'scatter_add',\n }\n if test_name in exclude_all_tensor_method_by_test_name:\n return True\n is_magic_method = name[:2] == '__' and name[-2:] == '__'\n is_inplace = name[-1] == \"_\" and not is_magic_method\n if not is_inplace and name in exclude_outplace_tensor_method:\n return True\n return False\n" ]
[ [ "numpy.random.choice", "torch.polygamma", "torch.triu_indices", "torch.einsum", "torch.linalg.svd", "torch.testing.all_types_and", "torch.randperm", "numpy.exp", "torch.ones", "numpy.sinc", "numpy.sign", "torch.testing.floating_and_complex_types_and", "torch.testing.all_types_and_complex_and", "torch.testing.complex_types", "torch.LongTensor", "torch.Size", "torch.empty_strided", "torch.linalg.cholesky", "torch.testing._internal.common_utils.random_symmetric_psd_matrix", "torch.testing._internal.common_utils.is_iterable_of_tensors", "torch.testing._internal.common_utils.random_symmetric_pd_matrix", "torch.tril_indices", "torch.testing._internal.common_utils.make_tensor", "torch.tensor", "torch.testing._internal.common_utils.random_symmetric_matrix", "numpy.sqrt", "torch.testing.all_types_and_complex", "torch.testing.floating_types", "torch.testing._internal.common_utils.set_rng_seed", "torch.empty", "torch.zeros", "torch.device", "torch.testing.floating_types_and", "torch.testing._internal.common_device_type.skipIf", "torch.testing._internal.common_utils.random_fullrank_matrix_distinct_singular_value", "torch.repeat_interleave", "numpy.modf", "torch.testing._internal.common_utils.random_hermitian_pd_matrix", "torch.testing.floating_and_complex_types", "torch.cuda.empty_cache", "torch.testing._internal.common_utils._wrap_warn_once", "torch.testing._internal.common_device_type.precisionOverride", "torch.testing.make_non_contiguous", "torch.rand", "torch.get_default_dtype", "torch.testing.all_types", "torch.testing.integral_types_and", "torch.no_grad", "numpy.abs", "torch.randn", "torch.testing._internal.common_utils.random_well_conditioned_matrix" ] ]
markzhao98/optical-tweezers-archive
[ "1a7b813fd48c5e72a5872df6a6c7f046996e3f43" ]
[ "GUI_GM_1.0.py" ]
[ "# Run Micro-Manager ------------------------------------------------------------\r\n\r\n#import os\r\n#os.startfile(\"C:\\Program Files\\Micro-Manager-1.4\\ImageJ.exe\")\r\n\r\nimport Tkinter as tk\r\nimport PyDAQmx\r\nimport numpy as np\r\nimport time\r\nimport csv\r\n\r\n# The GUI ------------------------------------------------------------\r\n\r\nclass GUI:\r\n \r\n def __init__(self, root, title):\r\n\r\n self.task_X = PyDAQmx.Task() \r\n self.task_Y = PyDAQmx.Task()\r\n\r\n self.task_X.CreateAOVoltageChan(\"/Dev1/ao0\",\"\",\r\n -10.0,10.0,PyDAQmx.DAQmx_Val_Volts,None) \r\n self.task_Y.CreateAOVoltageChan(\"/Dev1/ao1\",\"\",\r\n -10.0,10.0,PyDAQmx.DAQmx_Val_Volts,None)\r\n\r\n self.task_X.StartTask() \r\n self.task_Y.StartTask()\r\n\r\n self.value_X = -0.06 # Equilibrium X voltage value\r\n self.value_Y = -0.63 # Equilibrium Y voltage value\r\n\r\n self.task_X.WriteAnalogScalarF64(0,10.0,self.value_X,None) \r\n self.task_Y.WriteAnalogScalarF64(1,10.0,self.value_Y,None)\r\n\r\n self.root = root\r\n self.root.title(title)\r\n \r\n # A ----------------------------------------\r\n \r\n self.varA = tk.BooleanVar()\r\n self.varA.set(True)\r\n \r\n self.label_A = tk.Label(self.root, text = '(??, ??)', font='12', \r\n height = 2, width = 16, \r\n relief = tk.FLAT)\r\n self.label_A.grid(row = 0, column=1)\r\n \r\n self.cbtn_A = tk.Checkbutton(self.root, relief = tk.GROOVE, \r\n height = 2, width = 8, \r\n text='Trap A', font='12', \r\n variable = self.varA, \r\n disabledforeground = 'black',\r\n state = tk.DISABLED)\r\n self.cbtn_A.grid(row = 0, column = 0, padx = 5, sticky = tk.W)\r\n \r\n # B ----------------------------------------\r\n \r\n self.varB = tk.BooleanVar()\r\n self.varB.set(True)\r\n \r\n self.X = 520\r\n self.Y = 520\r\n \r\n self.label_B = tk.Label(self.root, text = str((self.X, self.Y)), font='12', \r\n height = 2, width = 16, \r\n relief = tk.FLAT)\r\n self.label_B.grid(row = 1, column=1)\r\n \r\n self.btn_B = tk.Button(self.root, text = 'Set', font = '12', fg = 'orange', \r\n height = 1, width = 6, \r\n disabledforeground = 'slate grey',\r\n state = tk.NORMAL, command = self.setbutton)\r\n self.btn_B.grid(row = 1, column=2, padx = 5)\r\n \r\n self.cbtn_B = tk.Checkbutton(self.root, relief = tk.GROOVE,\r\n height = 2, width = 8,\r\n text='Trap B', font='12', \r\n variable = self.varB, \r\n disabledforeground = 'black',\r\n state = tk.DISABLED)\r\n self.cbtn_B.grid(row = 1, column = 0, padx = 5, sticky = tk.W)\r\n \r\n # Done --------------------------------------------------\r\n \r\n self.btn_unset = tk.Button(self.root, text='Done setting', \r\n font = '12', fg = 'salmon',\r\n disabledforeground = 'slate grey',\r\n height = 1, width = 12,\r\n command = self.done, state = tk.DISABLED)\r\n self.btn_unset.grid(row = 1, column = 3, padx = 20)\r\n \r\n # Oscillation config --------------------------------------------------\r\n \r\n self.label_osci = tk.Label(self.root, \r\n text = \"Oscillation Configuration - Trap B\",\r\n font = '12', height = 2, width = 38, \r\n relief = tk.RIDGE)\r\n self.label_osci.grid(row = 2, column = 0, columnspan = 3, \r\n padx = 5, pady = 10, sticky = tk.W)\r\n\r\n self.label_whichdir = tk.Label(self.root, text = \"Direction\", font = '12',\r\n height = 2, width = 14, relief = tk.FLAT)\r\n self.label_whichdir.grid(row = 3, column = 1)\r\n\r\n self.listbox_whichdir = tk.Listbox(self.root, selectmode=tk.BROWSE,\r\n height = 2, width = 14,\r\n exportselection=0)\r\n self.listbox_whichdir.insert(0, \"Horizontal\")\r\n self.listbox_whichdir.insert(1, \"Vertical\")\r\n self.listbox_whichdir.grid(row = 3, column = 2)\r\n \r\n self.label_freq = tk.Label(self.root, text = \"Frequency [Hz]\", font = '12',\r\n height = 2, width = 14, relief = tk.FLAT)\r\n self.label_freq.grid(row = 4, column = 1)\r\n \r\n self.label_freqrange = tk.Label(self.root, text = 'int (0,1000) >>',\r\n height = 2, width = 12, relief = tk.FLAT)\r\n self.label_freqrange.grid(row = 4, column = 0)\r\n \r\n self.entry_freq = tk.Entry(self.root, width = 14)\r\n self.entry_freq.grid(row = 4, column = 2)\r\n \r\n self.label_amp = tk.Label(self.root, text = \"Amplitude [Pixel]\", font = '12',\r\n height = 2, width = 14, relief = tk.FLAT)\r\n self.label_amp.grid(row = 5, column = 1)\r\n \r\n self.label_amprange = tk.Label(self.root, text = 'int (0,1000) >>', \r\n height = 2, width = 12, relief = tk.FLAT)\r\n self.label_amprange.grid(row = 5, column = 0)\r\n\r\n self.entry_amp = tk.Entry(self.root, width = 14)\r\n self.entry_amp.grid(row = 5, column = 2)\r\n \r\n # Begin oscillation --------------------------------------------------\r\n \r\n self.btn_osci_begin = tk.Button(self.root, text = 'Begin', font = '12',\r\n fg = 'forest green', height = 4 , width = 10,\r\n disabledforeground = 'slate grey', \r\n state = tk.NORMAL, command = self.oscibegin)\r\n self.btn_osci_begin.grid(row =2, rowspan = 2, column = 3, sticky = tk.S)\r\n \r\n # End oscillation --------------------------------------------------\r\n \r\n self.btn_osci_end = tk.Button(self.root, text = 'End', font = '12',\r\n fg = 'lime green', height = 2, width = 10,\r\n disabledforeground = 'slate grey', \r\n state = tk.DISABLED, command = self.osciend)\r\n self.btn_osci_end.grid(row = 4, rowspan = 2, column = 3)\r\n \r\n # update --------------------------------------------------\r\n \r\n self.update()\r\n \r\n # shutdown --------------------------------------------------\r\n \r\n self.root.wm_protocol(\"WM_DELETE_WINDOW\", self.out)\r\n \r\n # -------------------------------------------------------\r\n \r\n def update(self): \r\n with open(\"coords.csv\") as file:\r\n csv_reader = csv.reader(file, delimiter=',')\r\n for row in csv_reader:\r\n self.X_temp = row[1]\r\n self.Y_temp = row[2]\r\n \r\n if self.X != int(self.X_temp) or self.Y != int(self.Y_temp):\r\n self.ifclick = True\r\n self.X = int(self.X_temp)\r\n self.Y = int(self.Y_temp) \r\n else:\r\n self.ifclick = False\r\n\r\n self.root.after(5, self.update)\r\n \r\n # -------------------------------------------------------\r\n\r\n def setbutton(self):\r\n self.btn_B.config(relief = tk.SUNKEN, state = tk.DISABLED)\r\n self.btn_unset.config(state = tk.NORMAL)\r\n self.btn_osci_begin.config(state = tk.DISABLED)\r\n if self.ifclick == True:\r\n self.label_B.config(text = str((self.X, self.Y)))\r\n \r\n self.value_X = - 0.1/42 * (self.X - 520) - 0.06\r\n self.value_Y = -0.1/52 * (self.Y - 520) - 0.63\r\n \r\n self.task_X.WriteAnalogScalarF64(0,10.0,self.value_X,None) \r\n self.task_Y.WriteAnalogScalarF64(1,10.0,self.value_Y,None)\r\n \r\n self.solve = self.root.after(2, self.setbutton)\r\n \r\n # -------------------------------------------------------\r\n \r\n def done(self):\r\n self.btn_B.config(relief = tk.RAISED, state = tk.NORMAL)\r\n self.btn_unset.config(state = tk.DISABLED)\r\n self.btn_osci_begin.config(state = tk.NORMAL)\r\n self.root.after_cancel(self.solve)\r\n\r\n # -------------------------------------------------------\r\n \r\n def X_osci(self):\r\n \r\n self.amp = int(self.entry_amp.get()) * 0.1/42\r\n \r\n self.tt = time.clock() - self.st\r\n self.txout = self.amp*np.sin(self.omega*self.tt) + self.value_X\r\n self.task_X.WriteAnalogScalarF64(0,10.0,self.txout,None)\r\n \r\n self.solve = self.root.after(1, self.X_osci)\r\n \r\n # -------------------------------------------------------\r\n \r\n def Y_osci(self):\r\n \r\n self.amp = int(self.entry_amp.get()) * 0.1/52\r\n \r\n self.tt = time.clock() - self.st\r\n self.tyout = self.amp*np.sin(self.omega*self.tt) + self.value_Y\r\n self.task_Y.WriteAnalogScalarF64(0,10.0,self.tyout,None)\r\n \r\n self.solve = self.root.after(1, self.Y_osci)\r\n \r\n # -------------------------------------------------------\r\n\r\n\r\n def oscibegin(self):\r\n if (len(self.listbox_whichdir.curselection()) == 1 and \r\n self.isint(self.entry_freq.get()) == True and \r\n self.isint(self.entry_amp.get()) == True):\r\n if (int(self.entry_freq.get()) > 0 and \r\n int(self.entry_freq.get()) <= 999 and \r\n int(self.entry_amp.get()) > 0 and \r\n int(self.entry_amp.get()) <= 999):\r\n \r\n self.btn_B.config(state = tk.DISABLED)\r\n self.listbox_whichdir.config(state = tk.DISABLED)\r\n self.entry_freq.config(state = tk.DISABLED)\r\n self.entry_amp.config(state = tk.DISABLED)\r\n self.btn_osci_begin.config(relief = tk.SUNKEN, state = tk.DISABLED)\r\n self.btn_osci_end.config(state = tk.NORMAL)\r\n \r\n self.freq = int(self.entry_freq.get())\r\n self.omega = 2*np.pi*self.freq\r\n\r\n self.st = time.clock()\r\n \r\n if self.listbox_whichdir.curselection()[0] == 0:\r\n self.X_osci()\r\n if self.listbox_whichdir.curselection()[0] == 1:\r\n self.Y_osci()\r\n \r\n # -------------------------------------------------------\r\n \r\n def osciend(self):\r\n self.btn_osci_begin.config(relief = tk.RAISED, state = tk.NORMAL)\r\n self.btn_osci_end.config(state = tk.DISABLED)\r\n self.btn_B.config(state = tk.NORMAL)\r\n self.listbox_whichdir.config(state = tk.NORMAL)\r\n self.entry_freq.config(state = tk.NORMAL)\r\n self.entry_amp.config(state = tk.NORMAL)\r\n \r\n self.root.after_cancel(self.solve)\r\n \r\n self.task_X.WriteAnalogScalarF64(0,10.0,self.value_X,None)\r\n self.task_Y.WriteAnalogScalarF64(1,10.0,self.value_Y,None)\r\n\r\n # -------------------------------------------------------\r\n \r\n def isint(self, stuff):\r\n try:\r\n float(stuff)\r\n return True\r\n except ValueError:\r\n return False\r\n \r\n # -------------------------------------------------------\r\n \r\n def out(self):\r\n self.task_Y.StopTask()\r\n self.task_X.StopTask()\r\n self.root.destroy()\r\n \r\n# ------------------------------------------------------------\r\n \r\ngui = GUI(tk.Tk(), \"The Optical Tweezer Program - Galvo Version\")\r\ngui.root.mainloop()\r\n" ]
[ [ "numpy.sin" ] ]
lucaagozzino/Fantapalla_Forever_database
[ "8adf036924ceec6dc544e189c834937875a8646f" ]
[ "Algoritmo_rose/Utilities_fantapalla.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[367]:\n\n\nimport numpy as np\nfrom sympy.utilities.iterables import multiset_permutations\nimport random\nimport copy\nimport glob\nfrom IPython import display\nimport pandas as pd\nquotazioni = pd.read_csv ('Quotazioni_Fantacalcio.csv')\n\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\nN_cores = multiprocessing.cpu_count()\n\n\n\nimport progressbar\n#pbar = progressbar.progressbar()\n\n# In[368]:\n\n######################################################## START DEFAULT PARAMETERS\nstruttura_rosa = np.array([3, 8, 8, 6])\n#this must contain all the allowed schemes\n\nFormazioni = {\n '352': [1, 3, 5, 2],\n '343': [1, 3, 4, 3],\n '442': [1, 4, 4, 2],\n '541': [1, 5, 4, 1],\n '532': [1, 5, 3, 2],\n '433': [1, 4, 3, 3]\n #aggiungere tutte le altre formazioni\n }\nFasce_goal = np.array([66, 70, 84, 88, 92, 96, 100])\nFasce_modificatore = np.array([6. , 6.5, 7. , 7.5, 8. ])\nValori_modificatore = np.array([1, 3, 5, 6, 8])\nrows_to_skip=[0,1,2,3,4]\n\nN_squadre = 8\n\n######################################################## END DEFAULT PARAMETERS\n# In[369]:\n\n\ndef names(num_squadre):\n team_names = []\n teams = {}\n for i in range(1,num_squadre+1):\n team_names.append(\"Team \" + str(i))\n teams[i] = \"Team \" + str(i)\n return [teams, team_names]\n\n\n# In[370]:\n\n\n#[teams, team_names] = names(N_squadre)\n\n\n# In[371]:\n\n\ndef fixture_gen(teams):\n temp = copy.deepcopy(teams)\n var = []\n while len(temp)>1:\n idx = list(temp)\n j,k = random.sample(idx,2)\n var.append((temp.pop(j),temp.pop(k)))\n return var\n\n\n# In[372]:\n\n\ndef genera_rose(struttura_rosa, num_squadre):\n \n giocatori = np.array(struttura_rosa)*num_squadre\n tot_giocatori = sum(giocatori)\n \n [p,d,c,a] = giocatori\n por = np.array(range(1,p+1))\n dif = np.array(range(1,d+1))+p\n cen = np.array(range(1,c+1))+p+d\n att = np.array(range(1,a+1))+p+d+c\n \n rosa_por=np.random.choice(por,[struttura_rosa[0],num_squadre],replace=False)\n rosa_dif=np.random.choice(dif,[struttura_rosa[1],num_squadre],replace=False)\n rosa_cen=np.random.choice(cen,[struttura_rosa[2],num_squadre],replace=False)\n rosa_att=np.random.choice(att,[struttura_rosa[3],num_squadre],replace=False)\n \n rosa = np.append(rosa_por,rosa_dif,axis=0)\n rosa = np.append(rosa,rosa_cen,axis=0)\n rosa = np.append(rosa,rosa_att,axis=0)\n \n return rosa\n\n\n# In[373]:\n\n\n#gives back a dataframe with the top 200 players\ndef top_players(struttura_rosa, quotazioni, num_squadre):\n players = {}\n j = 1\n [p,c,d,a]=struttura_rosa*num_squadre\n for k, element in quotazioni.iterrows(): \n if element['R'] == 'P' and j<=p:\n players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]\n j+=1 \n elif element['R'] == 'D' and j<=p+d:\n players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]\n j+=1 \n elif element['R'] == 'C' and j<=p+d+c:\n players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]\n j+=1 \n elif element['R'] == 'A' and j<=p+d+c+a:\n players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]\n j+=1\n players=pd.DataFrame(players).T\n\n players = players.rename(columns = {0:'My Id',1:'FC Id', 2:'Nome', 3:'Quotazione'})\n return players\n\n\n# In[374]:\n\n\n#assigns the dictionary grade to the specific team players\ndef assign_grade(rose, grades_dict):\n n,m = np.shape(rose)\n grades = np.zeros((n,m))\n for i in range(n):\n for j in range(m):\n if rose[i,j] in grades_dict:\n grades[i,j] = grades_dict[rose[i,j]]\n return grades \n\n\n# In[375]:\n\n\ndef modificatore(voti_dif, valori, fasce):\n temp = 0\n media = np.average(voti_dif)\n for i in range(len(fasce)):\n if media >= fasce[i]:\n temp = valori[i]\n return temp\n\n\n# In[376]:\n\n\n#for the top 200 players, creates a dictionary mapping 'My Id' to 'voto'\ndef all_grades_dict(struttura_rosa, quotazioni, voti_giornata, num_squadre):\n players = top_players(struttura_rosa, quotazioni, num_squadre)\n temp_votes = {}\n for k in range(len(voti_giornata['Cod.'])):\n if voti_giornata['Cod.'][k] == 'Cod.' or voti_giornata['Cod.'][k] == 'GENOA' or voti_giornata['Cod.'][k] == 'INTER' or voti_giornata['Cod.'][k] =='JUVENTUS'or voti_giornata['Cod.'][k] =='LAZIO'or voti_giornata['Cod.'][k] =='LECCE'or voti_giornata['Cod.'][k] =='MILAN'or voti_giornata['Cod.'][k] =='NAPOLI'or voti_giornata['Cod.'][k] =='PARMA' or voti_giornata['Cod.'][k] =='ROMA' or voti_giornata['Cod.'][k] =='FIORENTINA'or voti_giornata['Cod.'][k] =='SAMPDORIA'or voti_giornata['Cod.'][k] =='SASSUOLO'or voti_giornata['Cod.'][k] =='SPAL' or voti_giornata['Cod.'][k] =='TORINO'or voti_giornata['Cod.'][k] =='GENOA'or voti_giornata['Cod.'][k] =='UDINESE'or voti_giornata['Cod.'][k] =='VERONA'or voti_giornata['Cod.'][k] =='BOLOGNA'or voti_giornata['Cod.'][k] =='BRESCIA'or voti_giornata['Cod.'][k] =='CAGLIARI'or voti_giornata['Cod.'][k] =='ATALANTA':\n continue\n for j in range(1,len(players['My Id'])+1):\n if players['FC Id'][j] == np.float(voti_giornata['Cod.'][k]):\n #print(voti_giornata['Cod.'][k])\n temp_votes[j]=voti_giornata['Voto'][k]\n if temp_votes[j] == '6*':\n temp_votes[j]= '6'\n return temp_votes # struttura My Id: voto\n\n\n# In[377]:\n\n\n#formazioni and dict_voti_giornata (from all_grades_dict) must be dictionaries\ndef voti_max(rose, struttura_rosa, formazioni, dict_voti_giornata, teams, num_squadre, valori, fasce):\n [P,D,C,A]=struttura_rosa\n \n voti_rosa = assign_grade(rose, dict_voti_giornata)\n \n voti ={}\n for k in range(num_squadre):\n voto = 0\n for f in formazioni.items():\n #da aggiungere: modificatore difesa\n [n_p,n_d,n_c,n_a] = f[1]\n \n idx_p = (-voti_rosa[0:P,k]).argsort()[:n_p]\n idx_d = (-voti_rosa[0+P:P+D,k]).argsort()[:n_d]+P\n idx_c = (-voti_rosa[0+P+D:C+P+D,k]).argsort()[:n_c]+P+D\n idx_a = (-voti_rosa[0+P+D+C:P+D+C+A,k]).argsort()[:n_a]+P+D+C\n\n idx_all = np.hstack((idx_p,idx_d,idx_c,idx_a))\n \n extra = 0\n l_temp = copy.deepcopy(voti_rosa[idx_d,k].tolist())\n l_temp = np.sort(l_temp)\n \n if n_d >=4 and (l_temp >= 6).sum()>=4:\n voti_mod = np.append(l_temp[-3:],voti_rosa[idx_p,k])\n extra = modificatore(voti_mod, valori, fasce)\n \n voto = max(voto,np.sum(voti_rosa[idx_all,k]) + extra)\n \n voti[teams[k+1]] = voto\n return voti #per ogni combinazione di rose trova il voto massimo di squadra per la giornata\n\n\n# In[378]:\n\n\ndef goal_scored(voti_squadre, fasce_goal):\n team_goals={}\n for team, voto in voti_squadre.items():\n goals = 0\n for i in range(len(fasce_goal)):\n if voto >= fasce_goal[i]:\n goals = i+1\n team_goals[team] = goals\n return team_goals\n\n\n# In[379]:\n\n\ndef points(fixtures, voti_squadre, fasce_goal):\n goals = goal_scored(voti_squadre, fasce_goal)\n points_temp = {}\n matches = len(fixtures)\n for m in range(matches):\n teams = fixtures[m]\n if goals[teams[0]] == goals[teams[1]]:\n points_temp[teams[0]]=1\n points_temp[teams[1]]=1\n \n elif goals[teams[0]] > goals[teams[1]]:\n points_temp[teams[0]]=3\n points_temp[teams[1]]=0\n \n elif goals[teams[0]] < goals[teams[1]]:\n points_temp[teams[0]]=0\n points_temp[teams[1]]=3\n return points_temp\n \n\n\n# In[380]:\n\n\ndef id_toName(struttura_rosa, quotazioni, rose, num_squadre, team_names):\n topPlayers = top_players(struttura_rosa, quotazioni, num_squadre)\n rose_nomi=pd.DataFrame(columns=team_names, index=range(np.sum(struttura_rosa)))\n for team_name in team_names:\n temp_teams = []\n for Myid in rose[team_name]:\n temp_teams.append(topPlayers['Nome'][Myid])\n rose_nomi[team_name] = temp_teams\n return rose_nomi\n\n\n# In[381]:\n\n\ndef all_quot_dict(struttura_rosa, quotazioni, num_squadre):\n players = top_players(struttura_rosa, quotazioni, num_squadre)\n temp_quot={}\n for idx in players['My Id']:\n temp_quot[idx] = players['Quotazione'][idx]\n return temp_quot # struttura My Id: quotazione\n\n\n# In[382]:\n\n\ndef assign_quot(rose, quot_dict, team_names): \n n,m = np.shape(rose)\n quot = np.zeros((n,m))\n rose = np.array(rose)\n for i in range(n):\n for j in range(m):\n if rose[i,j] in quot_dict:\n quot[i,j] = quot_dict[rose[i,j]]\n quot_tot = pd.DataFrame(data=np.sum(quot,axis=0,keepdims=True),columns=team_names).T\n return quot_tot\n\n\n# In[383]:\n\n\n\ndef simula_campionato(struttura_rosa, team_names, teams, quotazioni, path, num_squadre, valori, fasce, fasce_goal, formazioni):\n rose = genera_rose(struttura_rosa, num_squadre)\n\n #voti_giornata is the imported dataframe which will be inserted in the loop\n all_points = pd.DataFrame(index = team_names)\n all_files = glob.glob(path + \"/*.xlsx\")\n i=1\n for filename in all_files:\n # this is to be read from file\n #print('Giornata attuale:' f'{i}\\r', end=\"\")\n i+=1\n voti_giornata = pd.read_excel(filename,sheet_name=0,skiprows=rows_to_skip)\n fixtures = fixture_gen(teams)\n dict_voti_giornata = all_grades_dict(struttura_rosa, quotazioni, voti_giornata, num_squadre)\n voti_squadre = voti_max(rose, struttura_rosa, formazioni, dict_voti_giornata, teams, num_squadre, valori, fasce)\n punti = pd.DataFrame.from_dict(points(fixtures, voti_squadre, fasce_goal),orient='index')\n all_points = pd.concat([all_points,punti],axis=1)\n #print(voti_squadre)\n total = pd.DataFrame(data= np.sum(np.array(all_points),axis=1,keepdims = True), index=team_names, columns =['tot'])\n rose_id=rose\n rose = pd.DataFrame(data=rose, columns = team_names)\n rose_nomi = id_toName(struttura_rosa, quotazioni, rose, num_squadre, team_names)\n return [total, rose_nomi, rose_id]\n\ndef calcola_giornata(filename, teams, struttura_rosa, quotazioni, num_squadre, fasce_goal, rose, formazioni, valori, fasce):\n voti_giornata = pd.read_excel(filename,sheet_name=0,skiprows=rows_to_skip)\n fixtures = fixture_gen(teams)\n dict_voti_giornata = all_grades_dict(struttura_rosa, quotazioni, voti_giornata, num_squadre)\n voti_squadre = voti_max(rose, struttura_rosa, formazioni, dict_voti_giornata, teams, num_squadre, valori, fasce)\n punti= pd.DataFrame.from_dict(points(fixtures, voti_squadre, fasce_goal), orient = 'index')\n return punti\n\ndef simula_campionato_Parallel(struttura_rosa, team_names, teams, quotazioni, path, num_squadre, valori, fasce, fasce_goal, formazioni):\n rose = genera_rose(struttura_rosa, num_squadre)\n\n #voti_giornata is the imported dataframe which will be inserted in the loop\n all_points = pd.DataFrame(index = team_names)\n all_files = glob.glob(path + \"/*.xlsx\")\n i=1\n full_season = Parallel(n_jobs = N_cores)(delayed(calcola_giornata)(file_name, teams, struttura_rosa, quotazioni, num_squadre, fasce_goal, rose, formazioni, valori, fasce) for file_name in all_files)\n standings = pd.concat(full_season, axis=1)\n total = pd.DataFrame(data= np.sum(np.array(standings),axis=1,keepdims = True), index=team_names, columns =['tot'])\n rose_id=rose\n rose = pd.DataFrame(data=rose, columns = team_names)\n rose_nomi = id_toName(struttura_rosa, quotazioni, rose, num_squadre, team_names)\n return [total, rose_nomi, rose_id]\n\n\n# In[384]:\n# if we want to extract data to train a network we need to parallelize this loop in order to save all the teams and the ranges\n\ndef main_model(n_campionati, struttura_rosa, team_names, teams, quotazioni, path, num_squadre = N_squadre, valori = Valori_modificatore, fasce = Fasce_modificatore, fasce_goal = Fasce_goal, formazioni = Formazioni):\n range_best = 100;\n q_range_best = 2000;\n for i in progressbar.progressbar(range(n_campionati)):\n #print('Campionato attuale:' f'{i+1}\\r', end=\"\")\n classifica, rose, rose_id= simula_campionato_Parallel(struttura_rosa, team_names, teams, quotazioni, path, num_squadre, valori, fasce, fasce_goal, formazioni)\n quot_dict = all_quot_dict(struttura_rosa, quotazioni, num_squadre)\n classifica_quot = assign_quot(rose_id, quot_dict, team_names)\n range_temp = np.float(classifica.max() - classifica.min())\n q_range_temp = np.float(classifica_quot.max() - classifica_quot.min())\n if (range_temp < range_best + 10 and q_range_temp < q_range_best):\n range_best = range_temp\n q_range_best = q_range_temp\n classifica_best = classifica\n classifica_q_best = classifica_quot\n rose_best = rose\n return [rose_best, classifica_best, classifica_q_best]\n\n\n# In[356]:\n\ndef styling_rows(x, struttura):\n [P,D,C,A] = struttura\n color_P = 'background-color: rgba(249,168,38,.5); color: black'\n color_D = 'background-color: rgba(46,125,51,.5); color: black'\n color_C = 'background-color: rgba(21,119,189,.5); color: black'\n color_A = 'background-color: rgba(198,40,39,.5); color: black'\n df_styler = pd.DataFrame('', index=x.index, columns=x.columns)\n col_idx = range(df_styler.shape[1])\n row_idx_P = P\n row_idx_D = D\n for idx in range(P):\n df_styler.iloc[idx, col_idx] = color_P\n for idx in range(P,P+D):\n df_styler.iloc[idx, col_idx] = color_D\n for idx in range(P+D,P+D+C):\n df_styler.iloc[idx, col_idx] = color_C\n for idx in range(P+D+C,P+D+C+A):\n df_styler.iloc[idx, col_idx] = color_A\n return df_styler\n\ndef ruoli(struttura_rosa):\n return struttura_rosa[0]*['P']+struttura_rosa[1]*['D']+struttura_rosa[2]*['C']+struttura_rosa[3]*['A']\n\n\n# In[ ]:\n\n\ndef FC_colors(dataframe, struttura_rosa):\n output = dataframe.style.apply(styling_rows, struttura = struttura_rosa, axis = None)\\\n .hide_index()\n return output" ]
[ [ "numpy.array", "numpy.random.choice", "numpy.zeros", "numpy.float", "pandas.concat", "pandas.DataFrame", "pandas.read_excel", "numpy.sum", "numpy.shape", "numpy.sort", "numpy.append", "numpy.average", "numpy.hstack", "pandas.read_csv" ] ]
Gerryflap/RL_project_common
[ "c143e3e98ab882188f511a4b4edcde8cd10fc9f4" ]
[ "environments/flappybird.py" ]
[ "import numpy as np\nimport pygame\nfrom ple import PLE\nimport ple.games\n\nfrom core import State, Action, FiniteActionEnvironment\n\n\"\"\"\n Flappy Bird Environment wrapper for PyGame Learning Environment's Flappy Bird\n https://github.com/ntasfi/PyGame-Learning-Environment\n\"\"\"\n\npygame.init()\n\n\nclass FlappyBirdState(State):\n \"\"\"\n Flappy Bird State\n \"\"\"\n\n def __init__(self, state: dict, terminal: bool):\n \"\"\"\n Create a new Flappy Bird State\n :param state: a dictionary containing an state\n :param terminal: a boolean indicating whether the environment state is terminal\n \"\"\"\n super().__init__(terminal)\n self.state = state\n\n def __str__(self) -> str:\n return str(self.state)\n\n\nclass FlappyBirdAction(Action):\n \"\"\"\n Flappy Bird Action that can be performed on the environment state\n \"\"\"\n\n def __init__(self, flap: bool):\n self._flap = flap\n\n def flap(self):\n return self._flap\n\n\nclass FlappyBird(FiniteActionEnvironment):\n \"\"\"\n FlappyBird Environment class\n \"\"\"\n\n FLAP = FlappyBirdAction(True)\n REST = FlappyBirdAction(False)\n ACTIONS = [REST, FLAP]\n\n def __init__(self, size: tuple = (48, 48)):\n \"\"\"\n Create a new Flappy Bird Environment\n :param size: Game window dimensions\n \"\"\"\n super().__init__()\n self.width, self.height = size\n self.game = ple.games.FlappyBird(width=self.width, height=self.height)\n self.game.screen = pygame.display.set_mode(self.game.getScreenDims(), 0, 32)\n self.game.clock = pygame.time.Clock()\n self.game.rng = np.random.RandomState(24)\n\n self.game.rewards['loss'] = -1\n self.game.rewards['win'] = 1\n\n self.ple = PLE(self.game)\n self.ple.init()\n\n self.terminal = False\n self.reset()\n\n @staticmethod\n def action_space() -> list:\n return list(FlappyBird.ACTIONS)\n\n @staticmethod\n def valid_actions_from(state) -> list:\n return FlappyBird.action_space()\n\n def step(self, action: FlappyBirdAction) -> tuple:\n \"\"\"\n Perform an action on the current environment state\n :param action: The action to be performed\n :return: A two-tuple of (state, reward)\n \"\"\"\n if self.terminal:\n raise Exception('Cannot perform action on terminal state!')\n\n if action.flap():\n key = pygame.K_w\n else:\n key = self.ple.NOOP\n reward = self.ple.act(key)\n state = self.game.getGameState()\n self.terminal = self.ple.game_over()\n pygame.display.update()\n return FlappyBirdState(state, self.terminal), reward\n\n def reset(self):\n \"\"\"\n Reset the environment state\n :return: A state containing the initial state\n \"\"\"\n self.ple.reset_game()\n state = self.game.getGameState()\n self.terminal = self.ple.game_over()\n return FlappyBirdState(state, self.terminal)\n\n def valid_actions(self) -> list:\n \"\"\"\n :return: A list of actions that can be performed on the current environment state\n \"\"\"\n return self.action_space()\n\n\nif __name__ == '__main__':\n import numpy as np\n import time\n\n _width, _height = _size = 288, 512\n _e = FlappyBird(_size)\n\n _s = _e.reset()\n while not _s.is_terminal():\n _s, _r = _e.step(np.random.choice(_e.valid_actions(), p=[0.9, 0.1]))\n print(_r)\n if _s.is_terminal():\n _s = _e.reset()\n time.sleep(0.1)\n" ]
[ [ "numpy.random.RandomState" ] ]
SoumyaShreeram/Locating_AGN_in_DM_halos
[ "1cfbee69b2c000faee4ecb199d65c3235afbed42" ]
[ "imported_files/plotting_aim03.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Plotting.py for notebook 02_AGN_incidence_in_Major_Mergers\n\nThis python file contains all the functions used for plotting graphs and maps in the 2nd notebook (.ipynb) of the repository: 02. Creating a Major Merger (MM) catalogue to study AGN incidence due to galaxy mergers\n\nScript written by: Soumya Shreeram \nProject supervised by Johan Comparat \nDate created: 30th March 2021\n\"\"\"\n# astropy modules\nimport astropy.units as u\nimport astropy.io.fits as fits\nfrom astropy.table import Table, Column\nfrom astropy.coordinates import SkyCoord\nfrom astropy.cosmology import FlatLambdaCDM, z_at_value\n\nimport numpy as np\nimport pandas as pd\n\n# scipy modules\nfrom scipy.spatial import KDTree\nfrom scipy import interpolate \n\nimport os\nimport importlib\n\n# plotting imports\nimport matplotlib\nfrom mpl_toolkits import axes_grid1\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib import cm\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\nimport seaborn as sns\n\nimport Modelling_AGN_fractions_from_literature as mafl\nimport Agn_incidence_from_Major_Mergers as aimm\nimport Comparison_simulation_with_literature_data as cswl\nimport All_sky as sky\n\n\ndef setLabel(ax, xlabel, ylabel, title='', xlim='default', ylim='default', legend=True):\n \"\"\"\n Function defining plot properties\n @param ax :: axes to be held\n @param xlabel, ylabel :: labels of the x-y axis\n @param title :: title of the plot\n @param xlim, ylim :: x-y limits for the axis\n \"\"\"\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n \n if xlim != 'default':\n ax.set_xlim(xlim)\n \n if ylim != 'default':\n ax.set_ylim(ylim)\n \n if legend:\n l = ax.legend(loc='best', fontsize=14, frameon=False)\n for legend_handle in l.legendHandles:\n legend_handle._legmarker.set_markersize(12)\n \n ax.set_title(title, fontsize=18)\n ax.grid(False)\n return\n\ndef saveFig(filename):\n plt.savefig('../figures/'+filename, facecolor='w', edgecolor='w', bbox_inches='tight')\n return\n\ndef plotNumberDensityVsRadius(num_pairs_all0, num_pairs_all1, title, plot_shell_vol=False):\n \"\"\"\n Function to plot the number density of pairs found as a function of the projected separation for a range of different mass bins\n \"\"\"\n # get shell volume and projected radius bins\n r_p, _, shell_volume = aimm.shellVolume()\n \n fig, ax = plt.subplots(1,1,figsize=(7,6))\n ax.plot(r_p[1:], num_pairs_all0, \"s\", color='k', label='DM halos', ms=9, mec='k')\n ax.plot(r_p[1:], num_pairs_all1, \"s\", color='b', label='AGNs', ms=9, mec='b')\n \n # errorbars\n ax.errorbar(r_p[1:], num_pairs_all[i], yerr=getError(num_pairs_all[i]), ecolor=pal[i], fmt='none', capsize=4.5)\n ax.errorbar(r_p[1:], num_pairs_all[i], yerr=getError(num_pairs_all[i]), ecolor=pal[i], fmt='none', capsize=4.5)\n if np.any(num_pairs_all[i]) != 0: ax.set_yscale(\"log\")\n \n # plot the shell volume\n if plot_shell_vol:\n ax.plot(r_p[1:], 1/shell_volume, \"grey\", marker=\".\", mfc='k', ls=\"--\", label='Shell Volume') \n \n setLabel(ax, r'Separation, $r$ [kpc/h]', r'$n_{\\rm pairs}}$ [$h^{3}/{\\rm kpc}^{-3}$]', title, [np.min(r_p[1:])-1, np.max(r_p[1:])+1], 'default', legend=True)\n ax.set_yscale(\"log\")\n return ax\n\ndef plotEffectOfTimeSinceMerger(num_pairs_dt_m, dt_m_arr, title, binsize=15):\n \"\"\"\n Function to plot the effect of time since merger of the number of pairs found\n \"\"\"\n pal_r = sns.color_palette(\"rocket\", len(dt_m_arr)).as_hex()\n labels = [r'$\\Delta t_{\\rm m}$ = %d Gyr'%dt for dt in dt_m_arr]\n \n fig, ax = plt.subplots(1,1,figsize=(7,6))\n \n n_p = [num_pairs_dt_m[0], num_pairs_dt_m[1], num_pairs_dt_m[2], num_pairs_dt_m[3]]\n ax.hist(n_p, bins=binsize, color=pal_r, label=labels)\n \n setLabel(ax, r'$n_{\\rm pairs}$ [kpc$^{-3}$]', r'Number of counts', title, 'default', 'default', legend=False)\n ax.legend(loc='upper right', fontsize=14)\n \n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n return \n\ndef plotTimeSinceMergerMassBins(dt_m_arr, num_pairs, title=\"DM Halos\"):\n \"\"\"\n Function to study the mass and merger dependence simultaneously\n \"\"\"\n # get shell volume and projected radius bins\n r_p, _, shell_volume = aimm.shellVolume()\n \n # initiating plot params\n color_palatte = sns.color_palette(\"magma\", len(dt_m_arr)).as_hex()\n fig, ax = plt.subplots(1,1,figsize=(7,6))\n \n for t, dt in enumerate(dt_m_arr):\n ax.plot(r_p[1:], num_pairs[t], \"s\", mfc=color_palatte[t], ms=9, mec='k', label= r'$\\Delta t_{\\rm m} = %d$ Gyr'%(dt))\n \n # plot the shell volume\n ax.plot(r_p[1:], 1/shell_volume, \"grey\", marker=\".\", mfc='k', ls=\"--\", label='Shell Volume')\n \n setLabel(ax, r'Separation, $r$ [kpc/h]', r'$n_{\\rm pairs}$ [kpc$^{-3}$]', title, [np.min(r_p[1:])-1, np.max(r_p[1:])+1], 'default', legend=True)\n ax.legend(loc='upper right', fontsize=14)\n \n ax.set_yscale(\"log\")\n return \n\ndef plotSatyapal(ax, Satyapal_14, r_p_err_S14, f_agn_err_S14, color_S14):\n \"Plot taken from Satyapal et al. 2014\"\n r_p_S14, f_agn_S14 = mafl.getXY(Satyapal_14)\n ax.plot(r_p_S14, f_agn_S14, 'o', label='Satyapal et al. 2014', color=color_S14, ms=9, mec='#8637b8', zorder=2)\n\n xerr, yerr = mafl.getErr(r_p_err_S14, r_p_S14, f_agn_err_S14, f_agn_S14)\n ax.errorbar(r_p_S14, f_agn_S14, yerr = yerr, fmt='none', ecolor='#210340', capsize=2, zorder=2)\n return ax, np.array([r_p_S14, f_agn_S14, xerr, yerr], dtype=object)\n\ndef plotLiu(ax, r_p_L12, f_agn_L12, Liu_12_err, color_E11):\n \"\"\"\n Plot taken from Liu et al 2012\n \"\"\"\n ax.plot(r_p_L12, f_agn_L12, 'd', label='Liu et al. 2012', color=color_E11, ms=9,\\\n mec='#487ab8', zorder=2)\n\n yerr = np.abs(np.transpose(Liu_12_err)-f_agn_L12)\n ax.errorbar(r_p_L12, f_agn_L12, yerr = yerr, fmt='none', ecolor='#759c00',\\\n capsize=2, zorder=2)\n return ax, np.array([r_p_L12, f_agn_L12, [1e-3*np.ones(len(yerr[0])), 1e-3*np.ones(len(yerr[0]))], yerr], dtype=object) \n\ndef plotSilverman(ax, Silverman_11, r_p_err_Sil11, f_agn_err_Sil11, color_Sil11, xmax=150):\n \"\"\"\n Plot taken from Silverman et al. 2011 \n \"\"\"\n r_p_Sil11, f_agn_Sil11 = mafl.getXY(Silverman_11)\n excess = ax.plot(r_p_Sil11, f_agn_Sil11, 'o', label='Silverman et al. 2011',\\\n color=color_Sil11, ms=9, mec='k', zorder=2)\n control = ax.hlines(0.05, 0, xmax, colors='k', linestyles=':', zorder=2)\n\n xerr, yerr = mafl.getErr(r_p_err_Sil11, r_p_Sil11, f_agn_err_Sil11, f_agn_Sil11)\n ax.errorbar(r_p_Sil11, f_agn_Sil11, yerr = yerr, xerr = xerr, fmt='none',\\\n ecolor='k', capsize=2, zorder=2) \n return ax, np.array([r_p_Sil11, f_agn_Sil11, xerr, yerr], dtype=object)\n\ndef plotEllison(ax, r_p_E11, f_agn_E11, r_p_err_E11, f_agn_err_E11, color_E11, xmax=150, mec_E11 = '#0b8700'):\n \"\"\"\n Plot taken from Ellison et al. 2011\n \"\"\"\n ax.plot(r_p_E11, f_agn_E11, 'o', label='Ellison et al. 2011', color=color_E11, ms=9,\\\n mec=mec_E11, zorder=2)\n control = ax.hlines(0.0075, 0, xmax, colors=mec_E11, linestyles='--', zorder=2)\n \n # errorbars\n xerr, yerr = mafl.getErr(r_p_err_E11, r_p_E11, f_agn_err_E11, f_agn_E11)\n ax.errorbar(r_p_E11, f_agn_E11, yerr = yerr, xerr = xerr, fmt='none', ecolor=mec_E11, capsize=2)\n return ax, np.array([r_p_E11, f_agn_E11, xerr, yerr], dtype=object)\n\ndef plotAllLiteraturePlots(Satyapal_14, r_p_err_S14, f_agn_err_S14, r_p_L12, f_agn_L12,\\\n Liu_12_err, Silverman_11, r_p_err_Sil11, f_agn_err_Sil11, r_p_E11, f_agn_E11, r_p_err_E11,\\\n f_agn_err_E11, axs = None, xmax= 150,ymax = 0.22):\n \"\"\"\n Function plots all the data points obtained from literature \n \"\"\"\n if np.any(axs) == None:\n fig, axs = plt.subplots(1,2,figsize=(15,6))\n fig.patch.set_facecolor('white')\n ax, ax1 = axs[0], axs[1]\n \n ax.set_xticks(ticks=np.arange(0, xmax, step=10), minor=True)\n ax.set_yticks(ticks=np.arange(0, ymax, step=1e-2), minor=True)\n \n color_E11, color_S14, color_Sil11 ='#d5ff03', '#ff6803', '#ff0318'\n \n\n # Satyapal et al. 2014\n ax, Satyapal_14_all = plotSatyapal(ax, Satyapal_14, r_p_err_S14, f_agn_err_S14, color_S14)\n \n # Liu et al. 2012\n ax, Liu_12_all = plotLiu(ax, r_p_L12, f_agn_L12, Liu_12_err, color_E11)\n \n # Silverman et al 2011\n ax1, Silverman_11_all = plotSilverman(ax1, Silverman_11, r_p_err_Sil11, f_agn_err_Sil11, color_Sil11)\n \n # Ellison et al. 2011\n ax, Ellison_11_all = plotEllison(ax, r_p_E11, f_agn_E11, r_p_err_E11, f_agn_err_E11, color_E11)\n \n setLabel(ax, r'Projected separation, $r_{\\rm p}$ [kpc]', r'Fraction of AGNs, $f_{\\rm AGN}$', title='z<0.2', xlim=[0, xmax], ylim=[0, ymax])\n setLabel(ax1, r'Projected separation, $r_{\\rm p}$ [kpc]', '', title='z<1', xlim=[0, xmax], ylim=[0, ymax])\n plt.savefig('../figures/close_p_lit_combined.pdf', facecolor='w', edgecolor='w', bbox_inches='tight')\n return ax, np.array([Satyapal_14_all, Liu_12_all, Silverman_11_all, Ellison_11_all], dtype=object)\n\n\ndef plotChangesCatAGN(ax, g_cp, g_rand, redshift_limit=.2, c='r', label_idx = 3, num_rp_bins=12,frac_cp_agn=0.03):\n \"\"\"\n Function to see the changes in the new AGN cat wrt the old one \n @c :: color of the lines\n \"\"\"\n # get shell volume and projected radius bins\n r_p, shell_volume = aimm.shellVolume()\n r_p_half, shell_volume_half = aimm.shellVolume(num_bins=num_rp_bins)\n \n # pixel number from the simulation file\n pixel_no_cont_arr = sky.allPixelNames()\n\n halo_lens = np.load('../Data/all_sky_halo_lengths_z%.1f.npy'%redshift_limit)\n rand_agn, cp_agn = mafl.getAGNlengths(redshift_limit=redshift_limit, frac_cp_agn=frac_cp_agn, all=False)\n \n # get the total number of possible AGN-halo pairs\n data_dir = '../Data/pairs_z%.1f/Major_dv_pairs/'%1\n gamma_all = np.load(data_dir+'gamma_all_pixels.npy', allow_pickle=True)\n\n r_kpc, r_kpc_half = (1e3*r_p[1:]), (1e3*r_p_half[1:])\n \n \n label1 = r'$t_{\\rm MM}, \\tilde{X}_{\\rm off}$ assigned AGN-halo pairs'\n label2 = 'randomly assigned AGN-halo pairs'\n label3 = 'all halo-halo pairs'\n # --------- plot 1 -------------------------\n l1, = ax[0].plot(r_kpc, g_cp['Gamma_mean_CP'], '-', color=c, label=label1)\n l2, = ax[0].plot(r_kpc_half, g_rand['Gamma_mean_RAND'], '--', color=c, label=label2)\n l3, = ax[0].plot(r_kpc, gamma_all[0], ':', color=c, label=label3)\n\n ax[0].set_yscale('log')\n #xlim = [(1e3*np.min(r_p[1:])), (1e3*np.max(r_p[1:]))]\n \n xlabel, t = r'Separation, $r$ [kpc]', r'z<%.1f, $f_{\\rm cp AGN}$ = %.2f'%(redshift_limit,frac_cp_agn)\n ylabel = r'$\\Gamma_{\\rm t_{\\rm MM}; \\ \\tilde{X}_{\\rm off}}(m;\\ \\Delta v)$ [Mpc$^{-3}$]'\n \n if label_idx == 3: \n handles, labels = [l1, l2, l3], [label1, label2 ,label3 ]\n l = ax[0].legend(handles, labels, loc='best', fontsize=14, frameon=False) \n setLabel(ax[0], xlabel, ylabel, title=t, legend=True)\n\n # --------- plot 2 -------------------------\n # interpolate the halo-halo numberty to divide by old agn\n f = interpolate.interp1d(r_kpc, gamma_all[0])\n f_err = interpolate.interp1d(r_kpc, gamma_all[1])\n gamma_all_inter = np.array([f(r_kpc_half), f_err(r_kpc_half)])\n\n ax[1].plot(r_kpc, (g_cp['Gamma_mean_CP']/gamma_all[0]), '-', color=c)\n ax[1].plot(r_kpc_half, (g_rand['Gamma_mean_RAND']/gamma_all_inter[0]), '--', color=c)\n \n ylabel1 = r'$f_{\\rm AGN}$'\n ax[1].set_yscale('log')\n setLabel(ax[1], xlabel, ylabel1, title=t, legend=False)\n return gamma_all_inter\n\ndef getFracError(da, a, db, b):\n return a/b, (a/b)*np.sqrt( (da/a)**2 + (db/b)**2)\n\ndef plotErrors(ax, r_p, r_p_half, g_cp_z1 , g_rand_z1 ):\n # --- plot to the left ----------\n top, bottom = g_cp_z1['Gamma_mean_CP']+g_cp_z1['Gamma_std_CP']/2, g_cp_z1['Gamma_mean_CP']-g_cp_z1['Gamma_std_CP']/2\n ax[0].fill_between((1e3*r_p[1:]), top, bottom, color='#5b7c85', alpha=0.3)\n\n top_r, bottom_r = g_rand_z1['Gamma_mean_RAND']+g_rand_z1['Gamma_std_RAND']/2, g_rand_z1['Gamma_mean_RAND']-g_rand_z1['Gamma_std_RAND']/2\n ax[0].fill_between((1e3*r_p_half[1:]), top_r, bottom_r, color='#5b7c85', alpha=0.3)\n\n top_all, bottom_all = g_cp_z1['Gamma_meanALL']+g_cp_z1['Gamma_stdALL']/2, g_cp_z1['Gamma_meanALL']-g_cp_z1['Gamma_stdALL']/2\n ax[0].fill_between((1e3*r_p[1:]), top_all, bottom_all, color='#5b7c85', alpha=0.3)\n\n # get fractional errors\n f_cp, err_cp = getFracError(g_cp_z1['Gamma_std_CP'], g_cp_z1['Gamma_mean_CP'], g_cp_z1['Gamma_stdALL'], g_cp_z1['Gamma_meanALL'])\n f_rand, err_rand = getFracError(g_rand_z1['Gamma_std_RAND'], g_rand_z1['Gamma_mean_RAND'], g_rand_z1['Gamma_stdALL'], g_rand_z1['Gamma_meanALL'])\n \n t, b = f_cp+err_cp/2, f_cp-err_cp/2\n ax[1].fill_between((1e3*r_p[1:]), t, b, color='#5b7c85', alpha=0.3)\n\n t_r, b_r = f_rand+err_rand/2, f_rand-err_rand/2\n ax[1].fill_between((1e3*r_p_half[1:]),t_r, b_r, color='#5b7c85', alpha=0.3)\n\n col_0 = Column(data=err_cp, name='frac_std_CP')\n col_1 = Column(data=err_rand, name='frac_std_RAND')\n g_cp_z1.add_column(col_0)\n g_rand_z1.add_column(col_1)\n return g_cp_z1, g_rand_z1\n\ndef label(t, x, offset):\n label_tmm = r'$\\langle t_{\\rm MM}^{(%d)}$'%t\n label_xoff = r'$ + \\tilde{X}_{\\rm off}^{(%d)}\\rangle$'%x\n if offset == 0:\n label=label_tmm + label_xoff\n if offset != 0:\n label=label_tmm + label_xoff + ' + %.2f'%offset\n return label\n\n\ndef plotModels(axs, models, std, r_kpc, left=True, asymotote_value=[0.01, 0.05]):\n pal = sns.color_palette(\"Wistia\", models.shape[0]+1).as_hex()\n greys = sns.color_palette(\"Wistia\", models.shape[0]+1).as_hex()\n \n for i in range(models.shape[0]):\n if left:\n m, s = mafl.normalizeAsymptote(models[i], asymotote_value=asymotote_value[0]), std[i]\n \n axs[0].plot(r_kpc[1:], m[1:], color='k', lw=0.3, alpha=0.2, zorder=1)\n top, bottom = m.astype(None) + s.astype(None)/2, m.astype(None) - s.astype(None)/2\n axs[0].fill_between(r_kpc[1:], top[1:], bottom[1:], color=pal[i], alpha=0.09, zorder=1)\n\n else:\n m, s = mafl.normalizeAsymptote(models[i], asymotote_value=asymotote_value[1]), std[i]\n \n axs[1].plot(r_kpc[1:], m[1:], color='k', lw=0.4, alpha=0.2, zorder=1)\n top, bottom = m.astype(None) + s.astype(None)/2, m.astype(None) - s.astype(None)/2\n axs[1].fill_between(r_kpc[1:], top[1:], bottom[1:], color=greys[i], alpha=0.09, zorder=1)\n return axs\n\ndef plotAGNModelZ(axs, idx, r_p, g_cp_z0_2, ls='-', redshift_limit=2, c='b', if_cp=True):\n # fraction of agn line\n if if_cp:\n axs[idx].plot((1e3*r_p[1:]), g_cp_z0_2['Gamma_mean_CP']/g_cp_z0_2['Gamma_meanALL'], \\\n ls=ls, lw=2, color=c, label='AGN model z<%.1f'%redshift_limit)\n \n # fill the standard deviation\n m, std = g_cp_z0_2['Gamma_mean_CP']/g_cp_z0_2['Gamma_meanALL'], g_cp_z0_2['frac_std_CP']\n axs[idx].fill_between((1e3*r_p[1:]), m-std/2, m+std/2, color='#5b7c85', alpha=0.4 )\n else:\n axs[idx].plot((1e3*r_p[1:]), g_cp_z0_2['Gamma_mean_RAND']/g_cp_z0_2['Gamma_meanALL'], \\\n ls=ls, lw=2, color=c)\n \n # fill the standard deviation\n m, std = g_cp_z0_2['Gamma_mean_RAND']/g_cp_z0_2['Gamma_meanALL'], g_cp_z0_2['frac_std_RAND']\n axs[idx].fill_between((1e3*r_p[1:]), m-std/2, m+std/2, color='#5b7c85', alpha=0.4 )\n return axs\n\ndef plotMSEdist(ax, mse, pal, i, names, label):\n ax.plot(mse, lw=2, ls='--', color=pal[i], zorder=1)\n \n min_mse_idx = np.where(mse == np.min(mse))\n min_x = np.arange(len(mse))[min_mse_idx]\n min_model_name = names[i][1][min_mse_idx[0][0]]\n label = label[i]+min_model_name\n ax.plot(min_x, np.min(mse), 's', ms=12, mec='k', mew=.5, color=pal[i], label=label, zorder=2)\n return ax, min_mse_idx[0][0], label\n\n\ndef plotSelectedModels(axs, selected_A, selected_B, r_kpc):\n \"Function plots all the physical models\"\n # overplot with the data\n pt.plotModels(axs, select_E11_A[0], select_E11_A[1], r_kpc)\n pt.plotModels(axs, select_S14_A[0], select_S14_A[1], r_kpc)\n pt.plotModels(axs, select_L12_A[0], select_L12_A[1], r_kpc)\n pt.plotModels(axs, select_Sil11_A[0], select_Sil11_A[1], r_kpc, left=False)\n\n pt.plotModels(axs, select_E11_B[0], select_E11_B[1], r_kpc)\n pt.plotModels(axs, select_S14_B[0], select_S14_B[1], r_kpc)\n pt.plotModels(axs, select_L12_B[0], select_L12_B[1], r_kpc)\n pt.plotModels(axs, select_Sil11_B[0], select_Sil11_B[1], r_kpc, left=False)\n return\n\n\ndef plotMatParameterSpace2d(names_E11_A, mse_E11_A, title='A1 (E11)', model_z=2, second_min=False, row_pix=4):\n \"Function to plot the parameter space\"\n fig, ax = plt.subplots(1,1,figsize=(6,5))\n\n tmm_dec = np.load('../Data/pairs_z%.1f/t_mm_deciles.npy'%model_z)\n xoff_dec = np.load('../Data/pairs_z%.1f/xoff_deciles.npy'%model_z)\n \n tmm_dec = [i+(j-i)/2 for i, j in zip(tmm_dec[:-1], tmm_dec[1:])]\n xoff_dec = [i+(j-i)/2 for i, j in zip(xoff_dec[:-1], xoff_dec[1:])]\n \n tmm_dec, xoff_dec = ['%.1f'%t for t in tmm_dec], ['%.2f'%t for t in xoff_dec]\n\n mse_2d = mafl.makeMatrix2D(names_E11_A, mse_E11_A)\n\n df = pd.DataFrame(mse_2d,\n index=tmm_dec,\n columns=xoff_dec)\n\n ax = sns.heatmap(df, cbar_kws={'label': 'MSE values'}, cmap='ocean')\n \n # make frame visible\n for _, spine in ax.spines.items():\n spine.set_visible(True)\n\n row, col = np.where(mse_2d == np.min(mse_2d))[0][0], np.where(mse_2d == np.min(mse_2d))[1][0]\n ax.add_patch(Rectangle((col, row), 1, 1, edgecolor='#fc521e', fill=False, lw=3))\n\n if second_min:\n #second_try = np.min(mse_2d[mse_2d != np.min(mse_2d)])\n row2 = row_pix\n col2 = np.where(mse_2d == second_try)[1][0]\n print(row2, col2)\n \n ax.add_patch(Rectangle((col2, row2), 1, 1, edgecolor='#fc521e', ls='--', fill=False, lw=3))\n\n setLabel(ax, r'$\\tilde{X}_{\\rm off}$', r'$t_{\\rm MM}$ [Gyr]', 'Model '+title, legend=False)\n fig.patch.set_facecolor('white')\n \n saveFig(title+'parameter_space.png')\n return ax" ]
[ [ "numpy.max", "scipy.interpolate.interp1d", "numpy.array", "matplotlib.pyplot.savefig", "pandas.DataFrame", "numpy.load", "numpy.min", "matplotlib.pyplot.subplots", "numpy.any", "numpy.where", "numpy.transpose", "numpy.arange", "numpy.sqrt", "matplotlib.patches.Rectangle" ] ]
edosedgar/RLForSeg
[ "fc748d8e7d2f2a1e7ac0dddb3f268ec3025d40ca" ]
[ "agents/sac_obj_lvl_rew.py" ]
[ "import os\nimport torch\nimport elf\nimport numpy as np\nimport wandb\nfrom elf.segmentation.features import compute_rag\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom collections import namedtuple\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom multiprocessing import Process, Lock\nimport threading\n\nfrom environments.multicut_obj_lvl_rew import MulticutEmbeddingsEnv, State\nfrom data.spg_dset import SpgDset\nfrom models.agent_model_obj_lvl import Agent\nfrom models.feature_extractor import FeExtractor\nfrom utils.exploration_functions import RunningAverage\nfrom utils.general import soft_update_params, set_seed_everywhere, cluster_embeddings, pca_project, random_label_cmap\nfrom utils.replay_memory import TransitionData_ts\nfrom utils.distances import CosineDistance, L2Distance\nfrom utils.matching import matching\nfrom utils.yaml_conv_parser import dict_to_attrdict\nfrom utils.training_helpers import update_env_data, supervised_policy_pretraining, state_to_cpu, Forwarder\n# from timeit import default_timer as timer\n\n\nclass AgentSacTrainerObjLvlReward(object):\n\n def __init__(self, cfg, global_count):\n super(AgentSacTrainerObjLvlReward, self).__init__()\n assert torch.cuda.device_count() == 1\n self.device = torch.device(\"cuda:0\")\n torch.cuda.set_device(self.device)\n torch.set_default_tensor_type(torch.FloatTensor)\n\n self.cfg = cfg\n self.global_count = global_count\n self.memory = TransitionData_ts(capacity = self.cfg.mem_size)\n self.best_val_reward = -np.inf\n if self.cfg.distance == 'cosine':\n self.distance = CosineDistance()\n else:\n self.distance = L2Distance()\n\n self.fe_ext = FeExtractor(dict_to_attrdict(self.cfg.backbone), self.distance, cfg.fe_delta_dist, self.device)\n self.fe_ext.embed_model.load_state_dict(torch.load(self.cfg.fe_model_name))\n self.fe_ext.cuda(self.device)\n\n self.model = Agent(self.cfg, State, self.distance, self.device)\n wandb.watch(self.model)\n self.model.cuda(self.device)\n self.model_mtx = Lock()\n\n MovSumLosses = namedtuple('mov_avg_losses', ('actor', 'critic', 'temperature'))\n Scalers = namedtuple('Scalers', ('critic', 'actor'))\n OptimizerContainer = namedtuple('OptimizerContainer',\n ('actor', 'critic', 'temperature', 'actor_shed', 'critic_shed', 'temp_shed'))\n actor_optimizer = torch.optim.Adam(self.model.actor.parameters(), lr=self.cfg.actor_lr)\n critic_optimizer = torch.optim.Adam(self.model.critic.parameters(), lr=self.cfg.critic_lr)\n temp_optimizer = torch.optim.Adam([self.model.log_alpha], lr=self.cfg.alpha_lr)\n\n lr_sched_cfg = dict_to_attrdict(self.cfg.lr_sched)\n bw = lr_sched_cfg.mov_avg_bandwidth\n off = lr_sched_cfg.mov_avg_offset\n weights = np.linspace(lr_sched_cfg.weight_range[0], lr_sched_cfg.weight_range[1], bw)\n weights = weights / weights.sum() # make them sum up to one\n shed = lr_sched_cfg.torch_sched\n\n self.mov_sum_losses = MovSumLosses(RunningAverage(weights, band_width=bw, offset=off),\n RunningAverage(weights, band_width=bw, offset=off),\n RunningAverage(weights, band_width=bw, offset=off))\n self.optimizers = OptimizerContainer(actor_optimizer, critic_optimizer, temp_optimizer,\n *[ReduceLROnPlateau(opt, patience=shed.patience,\n threshold=shed.threshold, min_lr=shed.min_lr,\n factor=shed.factor) for opt in\n (actor_optimizer, critic_optimizer, temp_optimizer)])\n self.scalers = Scalers(torch.cuda.amp.GradScaler(), torch.cuda.amp.GradScaler())\n self.forwarder = Forwarder()\n\n if self.cfg.agent_model_name != \"\":\n self.model.load_state_dict(torch.load(self.cfg.agent_model_name))\n # if \"policy_warmup\" in self.cfg and self.cfg.agent_model_name == \"\":\n # supervised_policy_pretraining(self.model, self.env, self.cfg, device=self.device)\n # torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, \"sv_pretrained_policy_agent.pth\"))\n\n # finished with prepping\n for param in self.fe_ext.parameters():\n param.requires_grad = False\n\n self.train_dset = SpgDset(self.cfg.data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys))\n self.val_dset = SpgDset(self.cfg.val_data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys))\n\n def validate(self):\n \"\"\"validates the prediction against the method of clustering the embedding space\"\"\"\n env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)\n if self.cfg.verbose:\n print(\"\\n\\n###### start validate ######\", end='')\n self.model.eval()\n n_examples = len(self.val_dset)\n taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n rl_scores, keys = [], None\n ex_raws, ex_sps, ex_gts, ex_mc_gts, ex_embeds, ex_rl = [], [], [], [], [], []\n dloader = iter(DataLoader(self.val_dset, batch_size=1, shuffle=True, pin_memory=True, num_workers=0))\n acc_reward = 0\n for it in range(len(self.val_dset)):\n update_env_data(env, dloader, self.val_dset, self.device, with_gt_edges=\"sub_graph_dice\" in self.cfg.reward_function)\n env.reset()\n state = env.get_state()\n\n self.model_mtx.acquire()\n try:\n distr, _ = self.forwarder.forward(self.model, state, State, self.device, grad=False, post_data=False)\n finally:\n self.model_mtx.release()\n action = torch.sigmoid(distr.loc)\n reward, state = env.execute_action(action, None, post_images=True, tau=0.0, train=False)\n acc_reward += reward[-1].item()\n if self.cfg.verbose:\n print(f\"\\nstep: {it}; mean_loc: {round(distr.loc.mean().item(), 5)}; mean reward: {round(reward[-1].item(), 5)}\", end='')\n\n embeddings = env.embeddings[0].cpu().numpy()\n gt_seg = env.gt_seg[0].cpu().numpy()\n gt_mc = cm.prism(env.gt_soln[0].cpu()/env.gt_soln[0].max().item()) if env.gt_edge_weights is not None else torch.zeros(env.raw.shape[-2:])\n rl_labels = env.current_soln.cpu().numpy()[0]\n\n if it < n_examples:\n ex_embeds.append(pca_project(embeddings, n_comps=3))\n ex_raws.append(env.raw[0].cpu().permute(1, 2, 0).squeeze())\n ex_sps.append(env.init_sp_seg[0].cpu())\n ex_mc_gts.append(gt_mc)\n ex_gts.append(gt_seg)\n ex_rl.append(rl_labels)\n\n _rl_scores = matching(gt_seg, rl_labels, thresh=taus, criterion='iou', report_matches=False)\n\n if it == 0:\n for tau_it in range(len(_rl_scores)):\n rl_scores.append(np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:]))))\n keys = list(_rl_scores[0]._asdict().keys())[1:]\n else:\n for tau_it in range(len(_rl_scores)):\n rl_scores[tau_it] += np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:])))\n\n div = np.ones_like(rl_scores[0])\n for i, key in enumerate(keys):\n if key not in ('fp', 'tp', 'fn'):\n div[i] = 10\n\n for tau_it in range(len(rl_scores)):\n rl_scores[tau_it] = dict(zip(keys, rl_scores[tau_it] / div))\n\n fig, axs = plt.subplots(1, 2, figsize=(10, 10))\n plt.subplots_adjust(hspace=.5)\n\n for m in ('precision', 'recall', 'accuracy', 'f1'):\n y = [s[m] for s in rl_scores]\n data = [[x, y] for (x, y) in zip(taus, y)]\n table = wandb.Table(data=data, columns=[\"IoU_threshold\", m])\n wandb.log({\"validation/\" + m: wandb.plot.line(table, \"IoU_threshold\", m, stroke=None, title=m)})\n axs[0].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)\n axs[0].set_ylabel('Metric value')\n axs[0].grid()\n axs[0].legend(bbox_to_anchor=(.8, 1.65), loc='upper left', fontsize='xx-small')\n axs[0].set_title('RL method')\n axs[0].set_xlabel(r'IoU threshold $\\tau$')\n\n for m in ('fp', 'tp', 'fn'):\n y = [s[m] for s in rl_scores]\n data = [[x, y] for (x, y) in zip(taus, y)]\n table = wandb.Table(data=data, columns=[\"IoU_threshold\", m])\n wandb.log({\"validation/\" + m: wandb.plot.line(table, \"IoU_threshold\", m, stroke=None, title=m)})\n axs[1].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)\n axs[1].set_ylabel('Number #')\n axs[1].grid()\n axs[1].legend(bbox_to_anchor=(.87, 1.6), loc='upper left', fontsize='xx-small');\n axs[1].set_title('RL method')\n axs[1].set_xlabel(r'IoU threshold $\\tau$')\n\n wandb.log({\"validation/metrics\": [wandb.Image(fig, caption=\"metrics\")]})\n wandb.log({\"validation_reward\": acc_reward})\n plt.close('all')\n if acc_reward > self.best_val_reward:\n self.best_val_reward = acc_reward\n wandb.run.summary[\"validation_reward\"] = acc_reward\n torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, \"best_checkpoint_agent.pth\"))\n if self.cfg.verbose:\n print(\"\\n###### finish validate ######\\n\", end='')\n\n for i in range(n_examples):\n fig, axs = plt.subplots(2, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})\n axs[0, 0].imshow(ex_gts[i], cmap=random_label_cmap(), interpolation=\"none\")\n axs[0, 0].set_title('gt')\n axs[0, 0].axis('off')\n if ex_raws[i].ndim == 3:\n axs[0, 1].imshow(ex_raws[i][..., 0])\n else:\n axs[0, 1].imshow(ex_raws[i])\n axs[0, 1].set_title('raw image')\n axs[0, 1].axis('off')\n axs[0, 2].imshow(ex_sps[i], cmap=random_label_cmap(), interpolation=\"none\")\n axs[0, 2].set_title('superpixels')\n axs[0, 2].axis('off')\n axs[1, 0].imshow(ex_embeds[i])\n axs[1, 0].set_title('pc proj 1-3', y=-0.15)\n axs[1, 0].axis('off')\n if ex_raws[i].ndim == 3:\n if ex_raws[i].shape[-1] > 1:\n axs[1, 1].imshow(ex_raws[i][..., 1])\n else:\n axs[1, 1].imshow(ex_raws[i][..., 0])\n else:\n axs[1, 1].imshow(ex_raws[i])\n axs[1, 1].set_title('sp edge', y=-0.15)\n axs[1, 1].axis('off')\n axs[1, 2].imshow(ex_rl[i], cmap=random_label_cmap(), interpolation=\"none\")\n axs[1, 2].set_title('prediction', y=-0.15)\n axs[1, 2].axis('off')\n wandb.log({\"validation/samples\": [wandb.Image(fig, caption=\"sample images\")]})\n plt.close('all')\n\n def update_critic(self, obs, action, reward):\n self.optimizers.critic.zero_grad()\n with torch.cuda.amp.autocast(enabled=True):\n current_Q1, current_Q2 = self.forwarder.forward(self.model, obs, State, self.device, actions=action)\n\n target_Q = reward[0]\n target_Q = target_Q.detach()\n\n critic_loss = F.mse_loss(current_Q1.squeeze(1), target_Q) + F.mse_loss(current_Q2.squeeze(1), target_Q)\n\n self.scalers.critic.scale(critic_loss).backward()\n self.scalers.critic.step(self.optimizers.critic)\n self.scalers.critic.update()\n\n return critic_loss.item(), reward[0].mean()\n\n def update_actor_and_alpha(self, obs, reward, expl_action):\n self.optimizers.actor.zero_grad()\n self.optimizers.temperature.zero_grad()\n obj_edge_mask_actor = obs.obj_edge_mask_actor.to(self.device)\n with torch.cuda.amp.autocast(enabled=True):\n distribution, actor_Q1, actor_Q2, action, side_loss = self.forwarder.forward(self.model, obs, State, self.device,\n expl_action=expl_action, policy_opt=True)\n obj_n_edges = obj_edge_mask_actor.sum(1)\n log_prob = distribution.log_prob(action)\n actor_loss = torch.tensor([0.0], device=actor_Q1[0].device)\n alpha_loss = torch.tensor([0.0], device=actor_Q1[0].device)\n\n actor_Q = torch.min(actor_Q1, actor_Q2)\n obj_log_prob = (log_prob[None] * obj_edge_mask_actor[..., None]).sum(1)\n obj_entropy = ((1 / 2 * (1 + (2 * np.pi * distribution.scale ** 2).log()))[None] * obj_edge_mask_actor[..., None]).sum(1).squeeze(1)\n\n loss = (self.model.alpha.detach() * obj_log_prob - actor_Q).mean()\n actor_loss = actor_loss + loss\n\n actor_loss = actor_loss + self.cfg.side_loss_weight * side_loss\n\n min_entropy = (self.cfg.entropy_range[1] - self.cfg.entropy_range[0]) * ((1.5 - reward[0]) / 1.5) + self.cfg.entropy_range[0]\n\n min_entropy = min_entropy.to(self.model.alpha.device).squeeze()\n entropy = obj_entropy.detach() if self.cfg.use_closed_form_entropy else -obj_log_prob.detach()\n alpha_loss = alpha_loss + (self.model.alpha * (entropy - (obj_n_edges * min_entropy))).mean()\n\n self.scalers.actor.scale(actor_loss).backward()\n self.scalers.actor.scale(alpha_loss).backward()\n self.scalers.actor.step(self.optimizers.actor)\n self.scalers.actor.step(self.optimizers.temperature)\n self.scalers.actor.update()\n\n return actor_loss.item(), alpha_loss.item(), min_entropy.mean().item(), distribution.loc.mean().item()\n\n def _step(self, step):\n actor_loss, alpha_loss, min_entropy, loc_mean = None, None, None, None\n\n (obs, action, reward), sample_idx = self.memory.sample()\n action = action.to(self.device)\n for i in range(len(reward)):\n reward[i] = reward[i].to(self.device)\n critic_loss, mean_reward = self.update_critic(obs, action, reward)\n self.memory.report_sample_loss(critic_loss + mean_reward, sample_idx)\n self.mov_sum_losses.critic.apply(critic_loss)\n # self.optimizers.critic_shed.step(self.mov_sum_losses.critic.avg)\n wandb.log({\"loss/critic\": critic_loss})\n\n if self.cfg.actor_update_after < step and step % self.cfg.actor_update_frequency == 0:\n actor_loss, alpha_loss, min_entropy, loc_mean = self.update_actor_and_alpha(obs, reward, action)\n self.mov_sum_losses.actor.apply(actor_loss)\n self.mov_sum_losses.temperature.apply(alpha_loss)\n # self.optimizers.actor_shed.step(self.mov_sum_losses.actor.avg)\n # self.optimizers.temp_shed.step(self.mov_sum_losses.actor.avg)\n wandb.log({\"loss/actor\": actor_loss})\n wandb.log({\"loss/alpha\": alpha_loss})\n\n if step % self.cfg.post_stats_frequency == 0:\n if min_entropy != \"nl\":\n wandb.log({\"min_entropy\": min_entropy})\n wandb.log({\"mov_avg/critic\": self.mov_sum_losses.critic.avg})\n wandb.log({\"mov_avg/actor\": self.mov_sum_losses.actor.avg})\n wandb.log({\"mov_avg/temperature\": self.mov_sum_losses.temperature.avg})\n wandb.log({\"lr/critic\": self.optimizers.critic_shed.optimizer.param_groups[0]['lr']})\n wandb.log({\"lr/actor\": self.optimizers.actor_shed.optimizer.param_groups[0]['lr']})\n wandb.log({\"lr/temperature\": self.optimizers.temp_shed.optimizer.param_groups[0]['lr']})\n\n if step % self.cfg.critic_target_update_frequency == 0:\n soft_update_params(self.model.critic, self.model.critic_tgt, self.cfg.critic_tau)\n\n return [critic_loss, actor_loss, alpha_loss, loc_mean]\n\n def train_until_finished(self):\n while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:\n self.model_mtx.acquire()\n try:\n stats = [[], [], [], []]\n for i in range(self.cfg.n_updates_per_step):\n _stats = self._step(self.global_count.value())\n [s.append(_s) for s, _s in zip(stats, _stats)]\n for j in range(len(stats)):\n if any([_s is None for _s in stats[j]]):\n stats[j] = \"nl\"\n else:\n stats[j] = round(sum(stats[j])/self.cfg.n_updates_per_step, 5)\n\n if self.cfg.verbose:\n print(f\"step: {self.global_count.value()}; mean_loc: {stats[-1]}; n_explorer_steps {self.memory.push_count}\", end=\"\")\n print(f\"; cl: {stats[0]}; acl: {stats[1]}; al: {stats[3]}\")\n finally:\n self.model_mtx.release()\n self.global_count.increment()\n self.memory.reset_push_count()\n if self.global_count.value() % self.cfg.validatoin_freq == 0:\n self.validate()\n\n\n # Acts and trains model\n def train_and_explore(self, rn):\n self.global_count.reset()\n\n set_seed_everywhere(rn)\n wandb.config.random_seed = rn\n if self.cfg.verbose:\n print('###### start training ######')\n print('Running on device: ', self.device)\n print('found ', self.train_dset.length, \" training data patches\")\n print('found ', self.val_dset.length, \"validation data patches\")\n print('training with seed: ' + str(rn))\n explorers = []\n for i in range(self.cfg.n_explorers):\n explorers.append(threading.Thread(target=self.explore))\n [explorer.start() for explorer in explorers]\n\n self.memory.is_full_event.wait()\n trainer = threading.Thread(target=self.train_until_finished)\n trainer.start()\n\n trainer.join()\n self.global_count.set(self.cfg.T_max + self.cfg.mem_size + 4)\n [explorer.join() for explorer in explorers]\n self.memory.clear()\n del self.memory\n torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, \"last_checkpoint_agent.pth\"))\n if self.cfg.verbose:\n print('\\n\\n###### training finished ######')\n return\n\n def explore(self):\n env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)\n tau = 1\n while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:\n dloader = iter(DataLoader(self.train_dset, batch_size=self.cfg.batch_size, shuffle=True, pin_memory=True, num_workers=0))\n for iteration in range((len(self.train_dset) // self.cfg.batch_size) * self.cfg.data_update_frequency):\n if iteration % self.cfg.data_update_frequency == 0:\n update_env_data(env, dloader, self.train_dset, self.device, with_gt_edges=\"sub_graph_dice\" in self.cfg.reward_function)\n env.reset()\n state = env.get_state()\n\n if not self.memory.is_full():\n action = torch.rand((env.edge_ids.shape[-1], 1), device=self.device)\n else:\n self.model_mtx.acquire()\n try:\n distr, action = self.forwarder.forward(self.model, state, State, self.device, grad=False)\n finally:\n self.model_mtx.release()\n reward, state = env.execute_action(action, tau=max(0, tau))\n for i in range(len(reward)):\n reward[i] = reward[i].cpu()\n\n self.memory.push(state_to_cpu(state, State), action.cpu(), reward)\n if self.global_count.value() > self.cfg.T_max + self.cfg.mem_size:\n break\n return\n" ]
[ [ "numpy.ones_like", "torch.cuda.amp.autocast", "torch.load", "torch.sigmoid", "matplotlib.pyplot.subplots", "torch.tensor", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots_adjust", "torch.zeros", "torch.device", "torch.min", "torch.set_default_tensor_type", "matplotlib.pyplot.close", "torch.cuda.device_count", "torch.cuda.set_device", "torch.cuda.amp.GradScaler", "torch.rand", "torch.optim.Adam", "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.linspace" ] ]
Peter-Chou/cgec-initialized-with-plm
[ "f2e3615de99ca1044b247d3cc49e89d63b587f43", "f2e3615de99ca1044b247d3cc49e89d63b587f43" ]
[ "experiments/roberta_roberta_share/eval_roberta_roberta_share.py", "experiments/ernie_gpt2/eval_ernie_gpt2.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport sys\nfrom pathlib import Path\nimport os\nPROJECT_DIR = Path(__file__).resolve().parents[2]\nsys.path.append(os.fspath(PROJECT_DIR))\n\nimport torch\nfrom transformers import EncoderDecoderModel\n\nfrom experiments.eval_encoder_decoder_model import eval_test_result, get_test_dataloader\nfrom experiments.utils import (\n create_tokenizer,\n prepare_evaluate,\n set_eval_parser,\n update_parser_defaults,\n)\n\n\ndef main():\n hyparam_file = Path(__file__).resolve().parent / \"hyparams.json\"\n parser = set_eval_parser()\n update_parser_defaults(hyparam_file, parser, train=False)\n args = parser.parse_args()\n\n args.model_name = \"roberta_roberta_share\"\n args.project_dir = PROJECT_DIR\n ckpt_dir, output_file = prepare_evaluate(args)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n encoder_tokenizer = create_tokenizer(\n vocab_path=PROJECT_DIR / \"vocabs\" / \"vocab.txt\",\n tokenizer_type=\"bert\",\n use_custom_sos_eos=False,\n )\n decoder_tokenizer = create_tokenizer(\n vocab_path=PROJECT_DIR / \"vocabs\" / \"vocab.txt\",\n tokenizer_type=\"bert\",\n use_custom_sos_eos=True,\n )\n\n test_dataloader = get_test_dataloader(\n PROJECT_DIR / \"data\" / \"test.txt\",\n encoder_tokenizer,\n args.batch_size,\n max_seq_length=args.max_seq_length,\n )\n\n encoder_decoder_model = EncoderDecoderModel.from_pretrained(ckpt_dir).to(\n device)\n\n eval_test_result(\n test_dataloader=test_dataloader,\n result_path=output_file,\n decoder_tokenizer=decoder_tokenizer,\n model=encoder_decoder_model,\n max_seq_length=args.max_seq_length,\n num_beams=args.num_beams,\n device=device,\n )\n\n\nif __name__ == '__main__':\n main()\n", "# -*- coding: utf-8 -*-\n\nimport sys\nfrom pathlib import Path\nimport os\nPROJECT_DIR = Path(__file__).resolve().parents[2]\nsys.path.append(os.fspath(PROJECT_DIR))\n\nimport torch\nfrom transformers import EncoderDecoderModel\n\nfrom experiments.eval_encoder_decoder_model import eval_test_result, get_test_dataloader\nfrom experiments.utils import (\n create_tokenizer,\n prepare_evaluate,\n set_eval_parser,\n update_parser_defaults,\n)\n\n\ndef main():\n hyparam_file = Path(__file__).resolve().parent / \"hyparams.json\"\n parser = set_eval_parser()\n update_parser_defaults(hyparam_file, parser, train=False)\n args = parser.parse_args()\n\n args.model_name = \"ernie_gpt2\"\n args.project_dir = PROJECT_DIR\n ckpt_dir, output_file = prepare_evaluate(args)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n encoder_vocab_file = PROJECT_DIR / \"vocabs\" / \"ernie_vocab.txt\"\n decoder_vocab_file = PROJECT_DIR / \"vocabs\" / \"vocab.txt\"\n encoder_tokenizer = create_tokenizer(\n vocab_path=encoder_vocab_file,\n tokenizer_type=\"bert\",\n use_custom_sos_eos=False,\n )\n decoder_tokenizer = create_tokenizer(\n vocab_path=decoder_vocab_file,\n tokenizer_type=\"bert\",\n use_custom_sos_eos=False,\n )\n\n test_dataloader = get_test_dataloader(\n PROJECT_DIR / \"data\" / \"test.txt\",\n encoder_tokenizer,\n args.batch_size,\n max_seq_length=args.max_seq_length,\n )\n\n encoder_decoder_model = EncoderDecoderModel.from_pretrained(ckpt_dir).to(\n device)\n\n eval_test_result(\n test_dataloader=test_dataloader,\n result_path=output_file,\n decoder_tokenizer=decoder_tokenizer,\n model=encoder_decoder_model,\n max_seq_length=args.max_seq_length,\n num_beams=args.num_beams,\n device=device,\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.cuda.is_available" ] ]
jeremylimconsulting/spiceai
[ "84b370dfce02dac03774a6c8cd74f72e39904ee7" ]
[ "ai/src/connector/stateful.py" ]
[ "from data import DataManager\nimport numpy as np\nimport pandas as pd\nimport copy\nfrom exception import LawInvalidException, DataSourceActionInvalidException\nfrom exec import somewhat_safe_exec, somewhat_safe_eval\n\n\nclass StatefulConnector:\n def __init__(\n self,\n data_manager: DataManager,\n action_effects: \"dict[str]\",\n ):\n self.action_effects = action_effects\n self.data_manager = data_manager\n\n def update_state(self, next_timestamp: pd.Timestamp, new_data: dict):\n new_series = dict()\n for data in new_data:\n new_series[data] = [new_data[data]]\n new_data_frame = pd.DataFrame(new_series, index={next_timestamp})\n\n self.data_manager.merge_data(new_data_frame)\n\n def apply_action(self, action: int, data_row: pd.DataFrame) -> bool:\n action_name = self.data_manager.action_names[action]\n\n if not action_name in self.action_effects:\n return True\n\n locals = dict()\n for key, value in data_row.items():\n locals[key] = value[-1]\n\n original_local = copy.deepcopy(locals)\n\n try:\n locals = somewhat_safe_exec(self.action_effects[action_name], locals)\n except Exception as ex:\n raise DataSourceActionInvalidException(repr(ex))\n\n try:\n for law in self.data_manager.laws:\n if not somewhat_safe_eval(law, locals):\n return False\n except Exception as ex:\n raise LawInvalidException(repr(ex))\n\n del_fields = []\n for field in locals:\n if original_local[field] == locals[field]:\n del_fields.append(field)\n\n for field in del_fields:\n del locals[field]\n\n if len(locals) == 0:\n return True\n\n next_timestamp = data_row.index[-1] + self.data_manager.granularity_secs\n self.update_state(next_timestamp, locals)\n\n return True\n" ]
[ [ "pandas.DataFrame" ] ]
ranahanocka/pytorch-a2c-ppo-acktr
[ "e3119e883861315885d22788d025c6b29f076018" ]
[ "envs.py" ]
[ "import os\n\nimport gym\nimport numpy as np\nfrom gym.spaces.box import Box\n\nfrom baselines import bench\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\n\ntry:\n import dm_control2gym\nexcept ImportError:\n pass\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pass\n\n\ndef make_env(env_id, seed, rank, log_dir, add_timestep):\n def _thunk():\n if env_id.startswith(\"dm\"):\n _, domain, task = env_id.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n env = gym.make(env_id)\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n env.seed(seed + rank)\n\n obs_shape = env.observation_space.shape\n if add_timestep and len(\n obs_shape) == 1 and str(env).find('TimeLimit') > -1:\n env = AddTimestep(env)\n\n if log_dir is not None:\n env = bench.Monitor(env, os.path.join(log_dir, str(rank)))\n\n if is_atari:\n env = wrap_deepmind(env)\n\n # If the input has shape (W,H,3), wrap for PyTorch convolutions\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:\n env = WrapPyTorch(env)\n\n return env\n\n return _thunk\n\n\nclass AddTimestep(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(AddTimestep, self).__init__(env)\n self.observation_space = Box(\n self.observation_space.low[0],\n self.observation_space.high[0],\n [self.observation_space.shape[0] + 1],\n dtype=self.observation_space.dtype)\n\n def observation(self, observation):\n return np.concatenate((observation, [self.env._elapsed_steps]))\n\n\nclass WrapPyTorch(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(WrapPyTorch, self).__init__(env)\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0],\n [obs_shape[2], obs_shape[1], obs_shape[0]],\n dtype=self.observation_space.dtype)\n\n def observation(self, observation):\n return observation.transpose(2, 0, 1)\n" ]
[ [ "numpy.concatenate" ] ]
PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn-
[ "8bb2095093a822363675368a4216d30d14cac501" ]
[ "Section 3/nlp-3-sentiment-analysis.py" ]
[ "import collections\nimport nltk\nimport os\nfrom sklearn import (\n datasets, model_selection, feature_extraction, linear_model\n)\n\n\ndef extract_features(corpus):\n '''Extract TF-IDF features from corpus'''\n # vectorize means we turn non-numerical data into an array of numbers\n count_vectorizer = feature_extraction.text.CountVectorizer(\n lowercase=True, # for demonstration, True by default\n tokenizer=nltk.word_tokenize, # use the NLTK tokenizer\n stop_words='english', # remove stop words\n min_df=1 # minimum document frequency, i.e. the word must appear more than once.\n )\n processed_corpus = count_vectorizer.fit_transform(corpus)\n processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(\n processed_corpus)\n\n return processed_corpus\n\n\ndata_directory = 'movie_reviews'\nmovie_sentiment_data = datasets.load_files(data_directory, shuffle=True)\nprint('{} files loaded.'.format(len(movie_sentiment_data.data)))\nprint('They contain the following classes: {}.'.format(\n movie_sentiment_data.target_names))\n\nmovie_tfidf = extract_features(movie_sentiment_data.data)\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(\n movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)\n\n# similar to nltk.NaiveBayesClassifier.train()\nmodel = linear_model.LogisticRegression()\nmodel.fit(X_train, y_train)\nprint('Model performance: {}'.format(model.score(X_test, y_test)))\n\ny_pred = model.predict(X_test)\nfor i in range(5):\n print('Review:\\n{review}\\n-\\nCorrect label: {correct}; Predicted: {predict}'.format(\n review=X_test[i], correct=y_test[i], predict=y_pred[i]\n ))\n" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.datasets.load_files", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
JoaoLages/ecco
[ "0ef61c51b76c48cd8c21c7807c20510565f307de" ]
[ "src/ecco/activations.py" ]
[ "import torch\nimport numpy as np\n\n\ndef reshape_hidden_states_to_3d(hidden_states):\n \"\"\"\n Turn hidden_states from (layer, batch, position, d_model)\n to a tensor (layer, d_model, batch + position).\n Args:\n hidden_states: the hidden states return by the language model. A list of tensors. Its shape:\n (layer, batch, position, d_model)\n returns:\n hidden_states: tensor in the shape (layer, d_model, batch + position)\n \"\"\"\n hs = hidden_states\n\n # Turn from a list of tensors into a tensor\n if isinstance(hs, tuple):\n hs = torch.stack(hs)\n\n # Merge the batch and position dimensions\n hs = hs.reshape((hs.shape[0], -1, hs.shape[-1]))\n\n return hs\n\n\ndef reshape_activations_to_3d(activations):\n \"\"\"\n Reshape the activations tensors into a shape where it's easier to compare\n activation vectors.\n Args:\n activations: activations tensor of LM. Shape:\n (batch, layer, neuron, position)\n returns:\n activations: activations tensor reshaped into:\n (layer, neuron, batch + position)\n \"\"\"\n\n # Swap axes from (0 batch, 1 layer, 2 neuron, 3 position)\n # to (0 layer, 1 neuron, 2 batch, 3 position)\n activations = np.moveaxis(activations, [0, 1, 2], [2, 0, 1])\n s = activations.shape\n acts = activations.reshape(s[0], s[1], -1)\n return acts\n\n\n\n" ]
[ [ "numpy.moveaxis", "torch.stack" ] ]
wvitzthum/DL_super_resolution
[ "59b2e68bad02579693d48f33ae64d524f08af5ff" ]
[ "src/trainer_esrgan.py" ]
[ "from datetime import datetime\nimport time\nimport copy\nimport torch\nfrom torch import nn\nfrom torch.optim.adam import Adam\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom .models import ESRGANDiscriminator, ESRGANGenerator\nfrom .trainer import Trainer\nfrom .utils import AverageMeter, PerceptualLoss\nfrom .utils.image_operations import convert_image\n\n\nclass ESRGANTrainer(Trainer):\n ## ESRGAN specific params\n model = 'ESRGAN'\n\n # generator\n in_channels = 3 \n out_channels = 3\n channels_g = 64\n blocks_g = 23\n gc = 32\n\n # discriminator\n blocks_d = 4\n\n # image size\n img_size = 1000\n\n # optimizer\n weight_decay = 1e-2\n b1 = 0.9\n b2 = 0.999\n\n # lr\n decay_iter = [50000, 100000, 200000, 300000]\n\n # loss weights\n adv_loss_weight = 1\n cont_loss_weight = 1\n perc_loss_weight = 1\n\n\n def train(self):\n adversarial_loss = nn.BCEWithLogitsLoss().to(self.device)\n content_loss = nn.L1Loss().to(self.device)\n perception_loss = PerceptualLoss().to(self.device)\n # Generator\n generator = ESRGANGenerator().to(self.device)\n\n # Discriminator\n discriminator = ESRGANDiscriminator().to(self.device)\n\n # Initialize discriminator's optimizer\n optimizer_g = Adam(\n generator.parameters(),\n lr=self.lr, betas=(self.b1, self.b2),\n weight_decay=self.weight_decay)\n optimizer_d = Adam(\n discriminator.parameters(),\n lr=self.lr, betas=(self.b1, self.b2),\n weight_decay=self.weight_decay)\n \n lr_scheduler_g = MultiStepLR(\n optimizer_g, self.decay_iter\n )\n lr_scheduler_d = MultiStepLR(\n optimizer_d, self.decay_iter\n )\n\n generator.train()\n discriminator.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter() \n losses_g = AverageMeter()\n losses_d = AverageMeter()\n\n # variables for early stopping\n best_model_d, best_model_g = None, None\n best_epoch = None\n best_optimizer_d, best_optimizer_g = None, None\n best_loss = None\n\n loss_counter = 0\n\n start_time = time.time()\n for epoch in range(self.start_epoch, self.epochs):\n epoch_start = time.time()\n\n for i, (lr_img, hr_img) in enumerate(self.training_loader):\n data_time.update(time.time() - epoch_start)\n\n # Move to default device\n lr_img = lr_img.to(self.device) # (batch_size (N), 3, 24, 24), imagenet-normed\n hr_img = hr_img.to(self.device) # (batch_size (N), 3, 96, 96), imagenet-normed\n\n ###############################\n # Generator update\n ###############################\n optimizer_g.zero_grad()\n\n # Generate\n sr_img = generator(lr_img) # (N, 3, 96, 96), in [-1, 1]\n sr_img = convert_image(sr_img, source='[-1, 1]', target='imagenet-norm') # (N, 3, 96, 96), imagenet-normed\n\n # discriminate sr images\n hr_disc = discriminator(hr_img)\n sr_disc = discriminator(sr_img)\n\n disc_rf = hr_disc - sr_disc.mean()\n disc_fr = sr_disc - hr_disc.mean()\n\n # Calculate the losses\n cont_loss = content_loss(sr_img, hr_img)\n adv_loss = (\n adversarial_loss(disc_rf, torch.ones_like(disc_rf)) +\\\n adversarial_loss(disc_fr, torch.zeros_like(disc_rf))\n ) / 2\n perc_loss = perception_loss(hr_img, sr_img)\n\n\n generator_loss = perc_loss * self.perc_loss_weight + \\\n adv_loss * self.adv_loss_weight + \\\n cont_loss * self.cont_loss_weight\n\n generator_loss.backward()\n\n # Generator step\n optimizer_g.step()\n\n ###############################\n # Discriminator\n ###############################\n\n optimizer_d.zero_grad()\n\n # Discriminate both hr and sr image\n hr_disc = discriminator(hr_img)\n sr_disc = discriminator(sr_img.detach())\n\n disc_rf = hr_disc - sr_disc.mean()\n disc_fr = sr_disc - hr_disc.mean()\n\n disc_adv_loss = (\n adversarial_loss(disc_rf, torch.ones_like(disc_rf)) +\\\n adversarial_loss(disc_fr, torch.zeros_like(disc_fr))\n ) / 2\n\n # Backpropagate\n disc_adv_loss.backward()\n optimizer_d.step()\n\n losses_d.update(disc_adv_loss.item())\n\n ## Step learning rate schedulers\n lr_scheduler_g.step()\n lr_scheduler_d.step()\n\n if generator_loss.item() < losses_g.min:\n self.log(f\"# New best model selected for loss {generator_loss.item():.4f} last {losses_g.min:.4f}\")\n best_model_g = copy.deepcopy(generator)\n best_model_d = copy.deepcopy(discriminator)\n best_epoch = epoch\n best_loss = generator_loss.item()\n best_optimizer_d = copy.deepcopy(optimizer_d)\n best_optimizer_g = copy.deepcopy(optimizer_g)\n loss_counter = 0\n elif loss_counter == self.early_stopping:\n self.log(\"Early stopping condition has been reached, selected model from epoch %s\" % (epoch))\n self.save_model(\n epoch=best_epoch, model=best_model_d, optimizer=best_optimizer_d,\n loss=best_loss, start_time=start_time, identifier='d'\n )\n self.save_model(\n epoch=best_epoch, model=best_model_g, optimizer=best_optimizer_g,\n loss=best_loss, start_time=start_time, identifier='g'\n )\n return\n else: loss_counter += 1\n\n losses_g.update(generator_loss.item())\n\n # track batch time\n batch_time.update(time.time() - epoch_start)\n\n if self.save_images:\n self.save_img(generator, epoch, i)\n\n # reset epoch time and log results of iteration\n epoch_start = time.time()\n if i % self.print_freq == 0:\n loss_msg = f'[G {losses_g.val:.4f}/{losses_g.avg:.4f}] [D {losses_d.val:.4f}/{losses_d.avg:.4f}] [C {loss_counter}]'\n self.log_loss_msg(i, epoch, loss_msg, batch_time.val, time.time()-start_time)\n\n\n\n # Save the model\n save_time = datetime.now().strftime(\"[%m-%d]%H%M\")\n\n # save generator model\n torch.save({'epoch': epoch,\n 'model_state_dict': generator.state_dict(),\n 'optimizer_state_dict': optimizer_g.state_dict(),\n 'loss': generator_loss.item()},\n f'./checkpoints/{save_time}_CP_esrgan_g_{epoch}.pth.tar')\n\n # save discriminator model\n torch.save({'epoch': epoch,\n 'model_state_dict': discriminator.state_dict(),\n 'optimizer_state_dict': optimizer_d.state_dict(),\n 'loss': disc_adv_loss.item()},\n f'./checkpoints/{save_time}_CP_esrgan_d_{epoch}.pth.tar')\n\n self.log(f'{save_time} Saved ESRGAN checkpoints at epoch {epoch}')\n self.log_end_msg(time.time()-start_time)\n" ]
[ [ "torch.nn.L1Loss", "torch.optim.lr_scheduler.MultiStepLR", "torch.ones_like", "torch.nn.BCEWithLogitsLoss", "torch.zeros_like" ] ]
evdcush/neorl
[ "a1af069072e752ab79e7279a88ad95d195a81821" ]
[ "neorl/rl/baselines/acer/acer_simple.py" ]
[ "import time\nimport warnings\n\nimport numpy as np\nimport tensorflow as tf\nfrom gym.spaces import Discrete, Box\nfrom collections import deque\n\nfrom neorl.rl.baselines.shared import logger\nfrom neorl.rl.baselines.shared.schedules import Scheduler\nfrom neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, \\\n check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\nfrom neorl.rl.baselines.acer.buffer import Buffer\nfrom neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\nfrom neorl.rl.baselines.shared.runners import AbstractEnvRunner\nfrom neorl.rl.baselines.shared.policies import ActorCriticPolicy, RecurrentActorCriticPolicy\n\n# Filter tensorflow version warnings\nimport os\n# https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\nimport warnings\n# https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=Warning)\nimport tensorflow as tf\ntf.get_logger().setLevel('INFO')\ntf.autograph.set_verbosity(0)\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\n\n# For ACER\ndef get_by_index(input_tensor, idx):\n \"\"\"\n Return the input tensor, offset by a certain value\n\n :param input_tensor: (TensorFlow Tensor) The input tensor\n :param idx: (int) The index offset\n :return: (TensorFlow Tensor) the offset tensor\n \"\"\"\n assert len(input_tensor.get_shape()) == 2\n assert len(idx.get_shape()) == 1\n idx_flattened = tf.range(0, input_tensor.shape[0], dtype=tf.int64) * input_tensor.shape[1] + idx\n offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input\n idx_flattened) # use flattened indices\n return offset_tensor\n\n\ndef strip(var, n_envs, n_steps, flat=False):\n \"\"\"\n Removes the last step in the batch\n\n :param var: (TensorFlow Tensor) The input Tensor\n :param n_envs: (int) The number of environments\n :param n_steps: (int) The number of steps to run for each environment\n :param flat: (bool) If the input Tensor is flat\n :return: (TensorFlow Tensor) the input tensor, without the last step in the batch\n \"\"\"\n out_vars = batch_to_seq(var, n_envs, n_steps + 1, flat)\n return seq_to_batch(out_vars[:-1], flat)\n\n\ndef q_retrace(rewards, dones, q_i, values, rho_i, n_envs, n_steps, gamma):\n \"\"\"\n Calculates the target Q-retrace\n\n :param rewards: ([TensorFlow Tensor]) The rewards\n :param dones: ([TensorFlow Tensor])\n :param q_i: ([TensorFlow Tensor]) The Q values for actions taken\n :param values: ([TensorFlow Tensor]) The output of the value functions\n :param rho_i: ([TensorFlow Tensor]) The importance weight for each action\n :param n_envs: (int) The number of environments\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) The discount value\n :return: ([TensorFlow Tensor]) the target Q-retrace\n \"\"\"\n rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs]\n reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs]\n done_seq = batch_to_seq(dones, n_envs, n_steps, True) # list of len steps, shape [n_envs]\n q_is = batch_to_seq(q_i, n_envs, n_steps, True)\n value_sequence = batch_to_seq(values, n_envs, n_steps + 1, True)\n final_value = value_sequence[-1]\n qret = final_value\n qrets = []\n for i in range(n_steps - 1, -1, -1):\n check_shape([qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]], [[n_envs]] * 6)\n qret = reward_seq[i] + gamma * qret * (1.0 - done_seq[i])\n qrets.append(qret)\n qret = (rho_bar[i] * (qret - q_is[i])) + value_sequence[i]\n qrets = qrets[::-1]\n qret = seq_to_batch(qrets, flat=True)\n return qret\n\n\nclass EpisodeStats:\n def __init__(self, n_steps, n_envs):\n \"\"\"\n Calculates the episode statistics\n\n :param n_steps: (int) The number of steps to run for each environment\n :param n_envs: (int) The number of environments\n \"\"\"\n self.episode_rewards = []\n for _ in range(n_envs):\n self.episode_rewards.append([])\n self.len_buffer = deque(maxlen=40) # rolling buffer for episode lengths\n self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards\n self.n_steps = n_steps\n self.n_envs = n_envs\n\n def feed(self, rewards, masks):\n \"\"\"\n Update the latest reward and mask\n\n :param rewards: ([float]) The new rewards for the new step\n :param masks: ([float]) The new masks for the new step\n \"\"\"\n rewards = np.reshape(rewards, [self.n_envs, self.n_steps])\n masks = np.reshape(masks, [self.n_envs, self.n_steps])\n for i in range(0, self.n_envs):\n for j in range(0, self.n_steps):\n self.episode_rewards[i].append(rewards[i][j])\n if masks[i][j]:\n reward_length = len(self.episode_rewards[i])\n reward_sum = sum(self.episode_rewards[i])\n self.len_buffer.append(reward_length)\n self.rewbuffer.append(reward_sum)\n self.episode_rewards[i] = []\n\n def mean_length(self):\n \"\"\"\n Returns the average length of each episode\n\n :return: (float)\n \"\"\"\n if self.len_buffer:\n return np.mean(self.len_buffer)\n else:\n return 0 # on the first params dump, no episodes are finished\n\n def mean_reward(self):\n \"\"\"\n Returns the average reward of each episode\n\n :return: (float)\n \"\"\"\n if self.rewbuffer:\n return np.mean(self.rewbuffer)\n else:\n return 0\n\n\nclass ACER(ActorCriticRLModel):\n \"\"\"\n The ACER (Actor-Critic with Experience Replay) model class\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (NEORL environment or Gym environment) The environment to learn with PPO, either use NEORL method ``CreateEnvironment`` (see **below**) or construct your custom Gym environment\n :param gamma: (float) The discount value\n :param n_steps: (int) The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param q_coef: (float) The weight for the loss on the Q value\n :param ent_coef: (float) The weight for the entropy loss\n :param max_grad_norm: (float) The clipping value for the maximum gradient\n :param learning_rate: (float) The initial learning rate for the RMS prop optimizer\n :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',\n 'double_linear_con', 'middle_drop' or 'double_middle_drop')\n :param buffer_size: (int) The buffer size in number of steps\n :param replay_ratio: (float) The number of replay learning per on policy learning on average,\n using a poisson distribution\n :param replay_start: (int) The minimum number of steps in the buffer, before experience replay starts\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed.\n \"\"\"\n #:param alpha: (float) The decay rate for the Exponential moving average of the parameters\n #:param correction_term: (float) Importance weight clipping factor (default: 10)\n #:param delta: (float) max KL divergence between the old policy and updated policy (default: 1)\n #:param trust_region: (bool) Whether or not algorithms estimates the gradient KL divergence\n # between the old and updated policy and uses it to determine step size (default: True) \n \n def __init__(self, policy, env, gamma=0.99, n_steps=20, q_coef=0.5, ent_coef=0.01, max_grad_norm=10,\n learning_rate=7e-4, lr_schedule='linear', buffer_size=5000,\n replay_ratio=4, replay_start=1000, verbose=0, seed=None, _init_setup_model=True):\n\n #if num_procs is not None:\n # warnings.warn(\"num_procs will be removed in a future version (v3.x.x) \"\n # \"use n_cpu_tf_sess instead\", DeprecationWarning)\n # n_cpu_tf_sess = num_procs\n\n self.n_steps = n_steps\n self.replay_ratio = replay_ratio\n self.buffer_size = buffer_size\n self.replay_start = replay_start\n self.gamma = gamma\n self.alpha = 0.99\n self.correction_term = 10.0\n self.q_coef = q_coef\n self.ent_coef = ent_coef\n self.trust_region = True\n self.delta = 1\n self.max_grad_norm = max_grad_norm\n self.rprop_alpha = 0.99\n self.rprop_epsilon = 1e-5\n self.learning_rate = learning_rate\n self.lr_schedule = lr_schedule\n self.tensorboard_log = None\n self.full_tensorboard_log = False\n \n policy_kwargs=None\n n_cpu_tf_sess=1\n\n self.action_ph = None\n self.done_ph = None\n self.reward_ph = None\n self.mu_ph = None\n self.learning_rate_ph = None\n self.polyak_model = None\n self.learning_rate_schedule = None\n self.run_ops = None\n self.names_ops = None\n self.train_model = None\n self.step_model = None\n self.proba_step = None\n self.n_act = None\n self.n_batch = None\n self.summary = None\n\n super(ACER, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n if _init_setup_model:\n self.setup_model()\n\n def _make_runner(self) -> AbstractEnvRunner:\n return _Runner(env=self.env, model=self, n_steps=self.n_steps)\n\n def _get_pretrain_placeholders(self):\n policy = self.step_model\n action_ph = policy.pdtype.sample_placeholder([None])\n if isinstance(self.action_space, Discrete):\n return policy.obs_ph, action_ph, policy.policy\n raise NotImplementedError('Only discrete actions are supported for ACER for now')\n\n def set_env(self, env):\n if env is not None:\n assert self.n_envs == env.num_envs, \\\n \"Error: the environment passed must have the same number of environments as the model was trained on.\" \\\n \"This is due to ACER not being capable of changing the number of environments.\"\n\n super().set_env(env)\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the ACER model must be \" \\\n \"an instance of common.policies.ActorCriticPolicy.\"\n\n if isinstance(self.action_space, Discrete):\n self.n_act = self.action_space.n\n continuous = False\n elif isinstance(self.action_space, Box):\n # self.n_act = self.action_space.shape[-1]\n # continuous = True\n raise NotImplementedError(\"WIP: Acer does not support Continuous actions yet.\")\n else:\n raise ValueError(\"Error: ACER does not work with {} actions space.\".format(self.action_space))\n\n self.n_batch = self.n_envs * self.n_steps\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n self.set_random_seed(self.seed)\n n_batch_step = None\n if issubclass(self.policy, RecurrentActorCriticPolicy):\n n_batch_step = self.n_envs\n n_batch_train = self.n_envs * (self.n_steps + 1)\n\n step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n n_batch_step, reuse=False, **self.policy_kwargs)\n\n self.params = tf_util.get_trainable_vars(\"model\")\n\n with tf.variable_scope(\"train_model\", reuse=True,\n custom_getter=tf_util.outer_scope_getter(\"train_model\")):\n train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,\n self.n_steps + 1, n_batch_train, reuse=True, **self.policy_kwargs)\n\n with tf.variable_scope(\"moving_average\"):\n # create averaged model\n ema = tf.train.ExponentialMovingAverage(self.alpha)\n ema_apply_op = ema.apply(self.params)\n\n def custom_getter(getter, name, *args, **kwargs):\n name = name.replace(\"polyak_model/\", \"\")\n val = ema.average(getter(name, *args, **kwargs))\n return val\n\n with tf.variable_scope(\"polyak_model\", reuse=True, custom_getter=custom_getter):\n self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space,\n self.n_envs, self.n_steps + 1,\n self.n_envs * (self.n_steps + 1), reuse=True,\n **self.policy_kwargs)\n\n with tf.variable_scope(\"loss\", reuse=False):\n self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones\n self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns\n self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's\n self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch])\n self.learning_rate_ph = tf.placeholder(tf.float32, [])\n eps = 1e-6\n\n # Notation: (var) = batch variable, (var)s = sequence variable,\n # (var)_i = variable index by action at step i\n # shape is [n_envs * (n_steps + 1)]\n if continuous:\n value = train_model.value_flat\n else:\n value = tf.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1)\n\n rho, rho_i_ = None, None\n if continuous:\n action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps)\n distribution_f = tf.contrib.distributions.MultivariateNormalDiag(\n loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps),\n scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps))\n f_polyak = tf.contrib.distributions.MultivariateNormalDiag(\n loc=strip(polyak_model.proba_distribution.mean, self.n_envs, self.n_steps),\n scale_diag=strip(polyak_model.proba_distribution.logstd, self.n_envs, self.n_steps))\n\n f_i = distribution_f.prob(self.action_ph)\n f_i_ = distribution_f.prob(action_)\n f_polyak_i = f_polyak.prob(self.action_ph)\n phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps)\n\n q_value = strip(train_model.value_fn, self.n_envs, self.n_steps)\n q_i = q_value[:, 0]\n\n rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps)\n rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps)\n\n qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, tf.pow(rho_i, 1 / self.n_act),\n self.n_envs, self.n_steps, self.gamma)\n else:\n # strip off last step\n # f is a distribution, chosen to be Gaussian distributions\n # with fixed diagonal covariance and mean \\phi(x)\n # in the paper\n distribution_f, f_polyak, q_value = \\\n map(lambda variables: strip(variables, self.n_envs, self.n_steps),\n [train_model.policy_proba, polyak_model.policy_proba, train_model.q_value])\n\n # Get pi and q values for actions taken\n f_i = get_by_index(distribution_f, self.action_ph)\n f_i_ = distribution_f\n phi_i = distribution_f\n f_polyak_i = f_polyak\n\n q_i = get_by_index(q_value, self.action_ph)\n\n # Compute ratios for importance truncation\n rho = distribution_f / (self.mu_ph + eps)\n rho_i = get_by_index(rho, self.action_ph)\n\n # Calculate Q_retrace targets\n qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, rho_i, self.n_envs, self.n_steps,\n self.gamma)\n\n # Calculate losses\n # Entropy\n entropy = tf.reduce_sum(train_model.proba_distribution.entropy())\n\n # Policy Gradient loss, with truncated importance sampling & bias correction\n value = strip(value, self.n_envs, self.n_steps, True)\n # check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4)\n # check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2)\n\n # Truncated importance sampling\n adv = qret - value\n log_f = tf.log(f_i + eps)\n # [n_envs * n_steps]\n gain_f = log_f * tf.stop_gradient(adv * tf.minimum(self.correction_term, rho_i))\n loss_f = -tf.reduce_mean(gain_f)\n\n # Bias correction for the truncation\n adv_bc = (q_value - tf.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act]\n\n # check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2)\n if continuous:\n gain_bc = tf.stop_gradient(adv_bc *\n tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) *\n f_i_)\n else:\n log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps)\n gain_bc = tf.reduce_sum(log_f_bc *\n tf.stop_gradient(\n adv_bc *\n tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) *\n f_i_),\n axis=1)\n # IMP: This is sum, as expectation wrt f\n loss_bc = -tf.reduce_mean(gain_bc)\n\n loss_policy = loss_f + loss_bc\n\n # Value/Q function loss, and explained variance\n check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2)\n explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]),\n tf.reshape(qret, [self.n_envs, self.n_steps]))\n loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)\n\n # Net loss\n check_shape([loss_policy, loss_q, entropy], [[]] * 3)\n loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy\n\n tf.summary.scalar('entropy_loss', entropy)\n tf.summary.scalar('policy_gradient_loss', loss_policy)\n tf.summary.scalar('value_function_loss', loss_q)\n tf.summary.scalar('loss', loss)\n\n norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None\n avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None\n if self.trust_region:\n # [n_envs * n_steps, n_act]\n grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs,\n phi_i)\n # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f\n kl_grad = - f_polyak_i / (f_i_ + eps)\n k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1)\n adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / (\n tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps]\n\n # Calculate stats (before doing adjustment) for logging.\n avg_norm_k = avg_norm(kl_grad)\n avg_norm_g = avg_norm(grad)\n avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g))\n avg_norm_adj = tf.reduce_mean(tf.abs(adj))\n\n grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad\n # These are turst region adjusted gradients wrt f ie statistics of policy pi\n grads_f = -grad / (self.n_envs * self.n_steps)\n grads_policy = tf.gradients(f_i_, self.params, grads_f)\n grads_q = tf.gradients(loss_q * self.q_coef, self.params)\n grads = [gradient_add(g1, g2, param, verbose=self.verbose)\n for (g1, g2, param) in zip(grads_policy, grads_q, self.params)]\n\n avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs)\n norm_grads_q = tf.global_norm(grads_q)\n norm_grads_policy = tf.global_norm(grads_policy)\n else:\n grads = tf.gradients(loss, self.params)\n\n norm_grads = None\n if self.max_grad_norm is not None:\n grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm)\n grads = list(zip(grads, self.params))\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph))\n tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))\n tf.summary.scalar('advantage', tf.reduce_mean(adv))\n tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph))\n\n if self.full_tensorboard_log:\n tf.summary.histogram('rewards', self.reward_ph)\n tf.summary.histogram('learning_rate', self.learning_rate)\n tf.summary.histogram('advantage', adv)\n tf.summary.histogram('action_probability', self.mu_ph)\n if tf_util.is_image(self.observation_space):\n tf.summary.image('observation', train_model.obs_ph)\n else:\n tf.summary.histogram('observation', train_model.obs_ph)\n\n trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha,\n epsilon=self.rprop_epsilon)\n _opt_op = trainer.apply_gradients(grads)\n\n # so when you call _train, you first do the gradient step, then you apply ema\n with tf.control_dependencies([_opt_op]):\n _train = tf.group(ema_apply_op)\n\n # Ops/Summaries to run, and their names for logging\n assert norm_grads is not None\n run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads]\n names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance',\n 'norm_grads']\n if self.trust_region:\n self.run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g,\n avg_norm_k_dot_g, avg_norm_adj]\n self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k',\n 'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj']\n\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.proba_step = step_model.proba_step\n self.initial_state = step_model.initial_state\n\n tf.global_variables_initializer().run(session=self.sess)\n\n self.summary = tf.summary.merge_all()\n\n def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None):\n \"\"\"\n applies a training step to the model\n\n :param obs: ([float]) The input observations\n :param actions: ([float]) The actions taken\n :param rewards: ([float]) The rewards from the environment\n :param dones: ([bool]) Whether or not the episode is over (aligned with reward, used for reward calculation)\n :param mus: ([float]) The logits values\n :param states: ([float]) The states (used for recurrent policies)\n :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)\n :param steps: (int) the number of steps done so far (can be None)\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :return: ([str], [float]) the list of update operation name, and the list of the results of the operations\n \"\"\"\n cur_lr = self.learning_rate_schedule.value_steps(steps)\n td_map = {self.train_model.obs_ph: obs, self.polyak_model.obs_ph: obs, self.action_ph: actions,\n self.reward_ph: rewards, self.done_ph: dones, self.mu_ph: mus, self.learning_rate_ph: cur_lr}\n\n if states is not None:\n td_map[self.train_model.states_ph] = states\n td_map[self.train_model.dones_ph] = masks\n td_map[self.polyak_model.states_ph] = states\n td_map[self.polyak_model.dones_ph] = masks\n\n if writer is not None:\n # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)\n if self.full_tensorboard_log and (1 + (steps / self.n_batch)) % 10 == 0:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options,\n run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % steps)\n else:\n step_return = self.sess.run([self.summary] + self.run_ops, td_map)\n writer.add_summary(step_return[0], steps)\n step_return = step_return[1:]\n else:\n step_return = self.sess.run(self.run_ops, td_map)\n\n return self.names_ops, step_return[1:] # strip off _train\n\n def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name=\"ACER\",\n reset_num_timesteps=True):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n callback = self._init_callback(callback)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn()\n\n self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,\n schedule=self.lr_schedule)\n\n episode_stats = EpisodeStats(self.n_steps, self.n_envs)\n\n if self.replay_ratio > 0:\n buffer = Buffer(env=self.env, n_steps=self.n_steps, size=self.buffer_size)\n else:\n buffer = None\n\n t_start = time.time()\n callback.on_training_start(locals(), globals())\n\n # n_batch samples, 1 on_policy call and multiple off-policy calls\n for steps in range(0, total_timesteps, self.n_batch):\n\n callback.on_rollout_start()\n\n enc_obs, obs, actions, rewards, mus, dones, masks = self.runner.run(callback)\n callback.update_locals(locals())\n callback.on_rollout_end()\n\n # Early stopping due to the callback\n if not self.runner.continue_training:\n break\n\n episode_stats.feed(rewards, dones)\n\n if buffer is not None:\n buffer.put(enc_obs, actions, rewards, mus, dones, masks)\n\n if writer is not None:\n total_episode_reward_logger(self.episode_reward,\n rewards.reshape((self.n_envs, self.n_steps)),\n dones.reshape((self.n_envs, self.n_steps)),\n writer, self.num_timesteps)\n\n # reshape stuff correctly\n obs = obs.reshape(self.runner.batch_ob_shape)\n actions = actions.reshape([self.n_batch])\n rewards = rewards.reshape([self.n_batch])\n mus = mus.reshape([self.n_batch, self.n_act])\n dones = dones.reshape([self.n_batch])\n masks = masks.reshape([self.runner.batch_ob_shape[0]])\n\n names_ops, values_ops = self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks,\n self.num_timesteps, writer)\n\n if self.verbose >= 1 and (int(steps / self.n_batch) % log_interval == 0):\n logger.record_tabular(\"total_timesteps\", self.num_timesteps)\n logger.record_tabular(\"fps\", int(steps / (time.time() - t_start)))\n # IMP: In EpisodicLife env, during training, we get done=True at each loss of life,\n # not just at the terminal state. Thus, this is mean until end of life, not end of episode.\n # For true episode rewards, see the monitor files in the log folder.\n logger.record_tabular(\"mean_episode_length\", episode_stats.mean_length())\n logger.record_tabular(\"mean_episode_reward\", episode_stats.mean_reward())\n for name, val in zip(names_ops, values_ops):\n logger.record_tabular(name, float(val))\n logger.dump_tabular()\n\n if (self.replay_ratio > 0 and\n buffer is not None and\n buffer.has_atleast(self.replay_start)):\n samples_number = np.random.poisson(self.replay_ratio)\n for _ in range(samples_number):\n # get obs, actions, rewards, mus, dones from buffer.\n obs, actions, rewards, mus, dones, masks = buffer.get()\n\n # reshape stuff correctly\n obs = obs.reshape(self.runner.batch_ob_shape)\n actions = actions.reshape([self.n_batch])\n rewards = rewards.reshape([self.n_batch])\n mus = mus.reshape([self.n_batch, self.n_act])\n dones = dones.reshape([self.n_batch])\n masks = masks.reshape([self.runner.batch_ob_shape[0]])\n\n self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks,\n self.num_timesteps)\n\n callback.on_training_end()\n\n return self\n\n def save(self, save_path, cloudpickle=False):\n data = {\n \"gamma\": self.gamma,\n \"n_steps\": self.n_steps,\n \"q_coef\": self.q_coef,\n \"ent_coef\": self.ent_coef,\n \"max_grad_norm\": self.max_grad_norm,\n \"learning_rate\": self.learning_rate,\n \"lr_schedule\": self.lr_schedule,\n \"rprop_alpha\": self.rprop_alpha,\n \"rprop_epsilon\": self.rprop_epsilon,\n \"replay_ratio\": self.replay_ratio,\n \"replay_start\": self.replay_start,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n 'n_cpu_tf_sess': self.n_cpu_tf_sess,\n 'seed': self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)\n\n\nclass _Runner(AbstractEnvRunner):\n def __init__(self, env, model, n_steps):\n \"\"\"\n A runner to learn the policy of an environment for a model\n\n :param env: (Gym environment) The environment to learn from\n :param model: (Model) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n \"\"\"\n\n super(_Runner, self).__init__(env=env, model=model, n_steps=n_steps)\n self.env = env\n self.model = model\n self.n_env = n_env = env.num_envs\n if isinstance(env.action_space, Discrete):\n self.n_act = env.action_space.n\n else:\n self.n_act = env.action_space.shape[-1]\n self.n_batch = n_env * n_steps\n\n if len(env.observation_space.shape) > 1:\n self.raw_pixels = True\n obs_height, obs_width, obs_num_channels = env.observation_space.shape\n self.batch_ob_shape = (n_env * (n_steps + 1), obs_height, obs_width, obs_num_channels)\n self.obs_dtype = np.uint8\n self.obs = np.zeros((n_env, obs_height, obs_width, obs_num_channels), dtype=self.obs_dtype)\n self.num_channels = obs_num_channels\n else:\n if len(env.observation_space.shape) == 1:\n self.obs_dim = env.observation_space.shape[0]\n else:\n self.obs_dim = 1\n self.raw_pixels = False\n if isinstance(self.env.observation_space, Discrete):\n self.batch_ob_shape = (n_env * (n_steps + 1),)\n else:\n self.batch_ob_shape = (n_env * (n_steps + 1), self.obs_dim)\n self.obs_dtype = np.float32\n\n self.n_steps = n_steps\n self.states = model.initial_state\n self.dones = [False for _ in range(n_env)]\n\n def _run(self):\n \"\"\"\n Run a step leaning of the model\n\n :return: ([float], [float], [int64], [float], [float], [bool], [float])\n encoded observation, observations, actions, rewards, mus, dones, masks\n \"\"\"\n enc_obs = [self.obs]\n mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []\n for _ in range(self.n_steps):\n actions, _, states, _ = self.model.step(self.obs, self.states, self.dones)\n mus = self.model.proba_step(self.obs, self.states, self.dones)\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_mus.append(mus)\n mb_dones.append(self.dones)\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.env.action_space, Box):\n clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n obs, rewards, dones, _ = self.env.step(clipped_actions)\n\n self.model.num_timesteps += self.n_envs\n\n if self.callback is not None:\n # Abort training early\n self.callback.update_locals(locals())\n if self.callback.on_step() is False:\n self.continue_training = False\n # Return dummy values\n return [None] * 7\n\n # states information for statefull models like LSTM\n self.states = states\n self.dones = dones\n self.obs = obs\n mb_rewards.append(rewards)\n enc_obs.append(obs)\n mb_obs.append(np.copy(self.obs))\n mb_dones.append(self.dones)\n\n enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0)\n mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0)\n mb_actions = np.asarray(mb_actions, dtype=np.int64).swapaxes(1, 0)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)\n mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)\n\n mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done\n mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards\n\n # shapes are now [nenv, nsteps, []]\n # When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy.\n\n return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks\n" ]
[ [ "tensorflow.group", "numpy.copy", "tensorflow.reshape", "numpy.mean", "tensorflow.gradients", "tensorflow.control_dependencies", "tensorflow.global_variables_initializer", "tensorflow.square", "tensorflow.summary.histogram", "numpy.random.poisson", "tensorflow.variable_scope", "tensorflow.get_logger", "tensorflow.abs", "tensorflow.range", "tensorflow.minimum", "numpy.reshape", "numpy.zeros", "tensorflow.summary.scalar", "tensorflow.nn.relu", "tensorflow.autograph.set_verbosity", "tensorflow.train.RMSPropOptimizer", "tensorflow.log", "tensorflow.placeholder", "tensorflow.reduce_sum", "tensorflow.global_norm", "numpy.clip", "tensorflow.summary.merge_all", "tensorflow.RunOptions", "tensorflow.clip_by_global_norm", "tensorflow.summary.image", "numpy.asarray", "tensorflow.Graph", "tensorflow.RunMetadata", "tensorflow.train.ExponentialMovingAverage", "tensorflow.pow", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
ChetanMadan/face_recognition
[ "fd1989ea536784baf86ec502fb5c05151bc99221" ]
[ "facerec_from_webcam_faster.py" ]
[ "import face_recognition\nimport cv2\nimport pandas as pd\n\n# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the\n# other example, but it includes some basic performance tweaks to make things run a lot faster:\n# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)\n# 2. Only detect faces in every other frame of video.\n\n# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.\n# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this\n# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.\n\n# Get a reference to webcam #0 (the default one)\nvideo_capture = cv2.VideoCapture(0)\n\n\n# Create arrays of known face encodings and their names\n\nknown_face_names=pd.read_csv(\"names\", header=None)\n\n\"\"\"\nknown_face_names = [\n \"Chetan Madan\",\n \"Joe Biden\"\n]\n\n\"\"\"\n# Load a sample picture and learn how to recognize it.\n#obama_image = face_recognition.load_image_file(\"known/Chetan Madan.jpg\")\nfor name in known_face_names:\n\n face_encoding=face_recognition.face_encodings(face_recognition.load_image_file(\"known/\"+name))[0]\n print(face_encoding)\n#obama_face_encoding = face_recognition.face_encodings(face_recognition.load_image_file(\"known/Chetan Madan.jpg\"))[0]\n\n# Load a second sample picture and learn how to recognize it.\nbiden_image = face_recognition.load_image_file(\"known/Joe Biden.jpg\")\nbiden_face_encoding = face_recognition.face_encodings(biden_image)[0]\n\n\nknown_face_encodings = [\n obama_face_encoding,\n biden_face_encoding\n]\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n if True in matches:\n first_match_index = matches.index(True)\n name = known_face_names[first_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n" ]
[ [ "pandas.read_csv" ] ]