repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
puchake/market-teller
[ "368b8fd8cf151723edebaf5787c3d95c0731c72a" ]
[ "src/nn/config.py" ]
[ "import os\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass NnConst(object):\n # Global index of performed network run.\n RUN_INDEX = 33\n\n # Index of input data set, which is used for network\n # (basic_nn_set_0, 1, 2 or 3).\n SET_INDEX = 0\n\n # Available modes of the network. If it is fresh run (start), continuation\n # of previously started training from the last checkpoint (continue) or\n # usage run (use).\n START_MODE = \"start\"\n CONTINUE_MODE = \"continue\"\n USE_MODE = \"use\"\n\n # Mode of currently performed run.\n MODE = START_MODE\n\n # Network's training paths patterns.\n TRAINING_DATA_SET_DIR_PATTERN = \"../../data/set/basic_nn_set_{}/\"\n TRAIN_LOGS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/train\"\n VALIDATION_LOGS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/validation\"\n CHECKPOINTS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/checkpoints\"\n BEST_MODEL_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/best_model\"\n\n # Paths used in usage runs.\n USAGE_DATA_SET_PATH = \"../../data/set/usage/data.npy\"\n USAGE_OUT_PATH = \"../../data/out/out\"\n USAGE_MODEL_PATH_PATTERN = \"../../data/logs/basic_nn/run_{}/best_model/\" \\\n \"best_model\"\n\n # Error message for logs override attempt.\n LOGS_OVERRIDE_ERROR = \"Attempted to override existing logs for run {}.\"\n\n # Maximum checkpoints to keep. This value will probably be never exceeded.\n MAX_CHECKPOINTS = 1000\n\n\nclass LstmConst(object):\n # Global index of performed network run.\n RUN_INDEX = 33\n\n # Index of input data set, which is used for network\n # (basic_nn_set_0, 1, 2 or 3).\n SET_INDEX = 0\n\n # Available modes of the network. If it is fresh run (start), continuation of\n # previously started training from the last checkpoint (continue) or usage run\n # (use).\n START_MODE = \"start\"\n CONTINUE_MODE = \"continue\"\n USE_MODE = \"use\"\n\n # Mode of currently performed run.\n MODE = START_MODE\n\n # Network's training paths patterns.\n TRAINING_DATA_SET_DIR_PATTERN = \"../../data/set/basic_nn_set_{}/\"\n TRAIN_LOGS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/train\"\n VALIDATION_LOGS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/validation\"\n CHECKPOINTS_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/checkpoints\"\n BEST_MODEL_DIR_PATTERN = \"../../data/logs/basic_nn/run_{}/best_model\"\n\n # Paths used in usage runs.\n USAGE_DATA_SET_PATH = \"../../data/set/usage/data.npy\"\n USAGE_OUT_PATH = \"../../data/out/out\"\n USAGE_MODEL_PATH_PATTERN = \"../../data/logs/basic_nn/run_{}/best_model/\" \\\n \"best_model\"\n\n # Error message for logs override attempt.\n LOGS_OVERRIDE_ERROR = \"Attempted to override existing logs for run {}.\"\n\n # Maximum checkpoints to keep. This value will probably be never exceeded.\n MAX_CHECKPOINTS = 1000\n\n\nclass NnConfig(object):\n TRAIN_BATCH_SIZE = 50000\n NO_DROPOUT = 0.0\n IN_SIZE = 75\n NUM_OF_LABELS = 2\n LAYER_SIZES = [1024, 512, 256, 128, 128, 2]\n DROPOUT = 0.25\n MAX_ITERATIONS = 1000\n VALIDATION_INTERVAL = 10\n LEARNING_RATE = 0.001\n LEARNING_RATE_DECAY = 0.8\n DECAY_INTERVAL = 100\n CHECKPOINT_INTERVAL = 10\n\n\nclass LstmConfig(object):\n UNFOLD_BATCH_SIZE = 50\n TRAIN_BATCH_SIZE = 50000\n NO_DROPOUT = 0.0\n IN_SIZE = 3\n NUM_OF_LABELS = 2\n HIDDEN_SIZE = 512\n FORGET_BIAS = 0.0\n ACTIVATION = tf.tanh\n DROPOUT = 0.25\n MAX_ITERATIONS = 10000\n VALIDATION_INTERVAL = 10\n LEARNING_RATE = 0.0003\n LEARNING_RATE_DECAY = 0.8\n DECAY_INTERVAL = 100\n CHECKPOINT_INTERVAL = 10\n\n\ndef setup_training_environment(const, run_index, set_index, mode):\n \"\"\"\n Creates directories necessary for neural network's run. If directories\n already exist, it raises an ValueError to stop user from overriding\n existing logs and models.\n\n :param run_index: index of the neural network's run\n :param set_index: index of the used data set\n :param mode: current run's mode\n :return: set of paths used by basic nn: data set, train logs,\n validation logs, checkpoints and best model directories\n \"\"\"\n\n data_set_dir = const.TRAINING_DATA_SET_DIR_PATTERN.format(set_index)\n train_logs_dir = const.TRAIN_LOGS_DIR_PATTERN.format(run_index)\n validation_logs_dir = const.VALIDATION_LOGS_DIR_PATTERN.format(run_index)\n checkpoints_dir = const.CHECKPOINTS_DIR_PATTERN.format(run_index)\n best_model_dir = const.BEST_MODEL_DIR_PATTERN.format(run_index)\n\n dirs_to_check = [\n train_logs_dir, validation_logs_dir, checkpoints_dir, best_model_dir\n ]\n\n for dir_path in dirs_to_check:\n\n # If one of the log directories already exists raise an error.\n # Else create that directory.\n if os.path.exists(dir_path):\n if mode == const.START_MODE:\n raise ValueError(const.LOGS_OVERRIDE_ERROR.format(run_index))\n else:\n os.makedirs(dir_path)\n\n return data_set_dir, train_logs_dir, validation_logs_dir, \\\n checkpoints_dir, best_model_dir\n\n\ndef load_training_data_set(data_set_dir):\n \"\"\"\n Load data, which will be used by nn in training, from a specified folder.\n\n :param data_set_dir: path to folder which contains training data\n :return: train, validation and test data and labels\n \"\"\"\n\n train_data = np.load(data_set_dir + \"train_data.npy\")\n train_labels = np.load(data_set_dir + \"train_labels.npy\")\n validation_data = np.load(data_set_dir + \"validation_data.npy\")\n validation_labels = np.load(data_set_dir + \"validation_labels.npy\")\n test_data = np.load(data_set_dir + \"test_data.npy\")\n test_labels = np.load(data_set_dir + \"test_labels.npy\")\n\n return train_data, train_labels, \\\n validation_data, validation_labels, \\\n test_data, test_labels\n" ]
[ [ "numpy.load" ] ]
owaisCS/TestHateSpeech
[ "6b72afa1ebafc43624676e7006795c8d99a305b6" ]
[ "bert/run_classifier.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score\n\nfrom .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME\nfrom .modeling import BertForSequenceClassification, BertConfig\nfrom .tokenization import BertTokenizer\nfrom .optimization import BertAdam, WarmupLinearSchedule\n\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MnliMismatchedProcessor(MnliProcessor):\n \"\"\"Processor for the MultiNLI Mismatched data set (GLUE version).\"\"\"\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")),\n \"dev_matched\")\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass Sst2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass StsbProcessor(DataProcessor):\n \"\"\"Processor for the STS-B data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [None]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass QqpProcessor(DataProcessor):\n \"\"\"Processor for the QQP data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n try:\n text_a = line[3]\n text_b = line[4]\n label = line[5]\n except IndexError:\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass QnliProcessor(DataProcessor):\n \"\"\"Processor for the QNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass RteProcessor(DataProcessor):\n \"\"\"Processor for the RTE data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass WnliProcessor(DataProcessor):\n \"\"\"Processor for the WNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer, output_mode):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef acc_and_f1(preds, labels):\n acc = simple_accuracy(preds, labels)\n f1 = f1_score(y_true=labels, y_pred=preds)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n\n\ndef pearson_and_spearman(preds, labels):\n pearson_corr = pearsonr(preds, labels)[0]\n spearman_corr = spearmanr(preds, labels)[0]\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n }\n\n\ndef compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n else:\n raise KeyError(task_name)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mnli-mm\": MnliMismatchedProcessor,\n \"mrpc\": MrpcProcessor,\n \"sst-2\": Sst2Processor,\n \"sts-b\": StsbProcessor,\n \"qqp\": QqpProcessor,\n \"qnli\": QnliProcessor,\n \"rte\": RteProcessor,\n \"wnli\": WnliProcessor,\n }\n\n output_modes = {\n \"cola\": \"classification\",\n \"mnli\": \"classification\",\n \"mrpc\": \"classification\",\n \"sst-2\": \"classification\",\n \"sts-b\": \"regression\",\n \"qqp\": \"classification\",\n \"qnli\": \"classification\",\n \"rte\": \"classification\",\n \"wnli\": \"classification\",\n }\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n output_mode = output_modes[task_name]\n\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n train_examples = None\n num_train_optimization_steps = None\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n num_train_optimization_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n # Prepare model\n cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))\n model = BertForSequenceClassification.from_pretrained(args.bert_model,\n cache_dir=cache_dir,\n num_labels=num_labels)\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n if args.do_train:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)\n\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n\n # define a new function to compute loss values for both output_modes\n logits = model(input_ids, segment_ids, input_mask, labels=None)\n\n if output_mode == \"classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), label_ids.view(-1))\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,\n args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Save a trained model, configuration and tokenizer\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)\n tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n else:\n model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)\n model.to(device)\n\n if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)\n\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask, labels=None)\n\n # create eval loss and other metric required by the task\n if output_mode == \"classification\":\n loss_fct = CrossEntropyLoss()\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n \n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(task_name, preds, all_label_ids.numpy())\n loss = tr_loss/global_step if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n # hack for MNLI-MM\n if task_name == \"mnli\":\n task_name = \"mnli-mm\"\n processor = processors[task_name]()\n\n if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir + '-MM'):\n os.makedirs(args.output_dir + '-MM')\n\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask, labels=None)\n \n loss_fct = CrossEntropyLoss()\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n \n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n preds = np.argmax(preds, axis=1)\n result = compute_metrics(task_name, preds, all_label_ids.numpy())\n loss = tr_loss/global_step if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(args.output_dir + '-MM', \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.distributed.get_world_size", "torch.utils.data.RandomSampler", "scipy.stats.pearsonr", "torch.cuda.is_available", "sklearn.metrics.f1_score", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel", "torch.distributed.init_process_group", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "numpy.argmax", "torch.distributed.get_rank", "torch.device", "torch.cuda.manual_seed_all", "sklearn.metrics.matthews_corrcoef", "torch.utils.data.SequentialSampler", "torch.cuda.device_count", "torch.cuda.set_device", "numpy.squeeze", "torch.utils.data.TensorDataset", "torch.nn.MSELoss", "numpy.random.seed", "torch.no_grad", "scipy.stats.spearmanr", "torch.utils.data.distributed.DistributedSampler" ] ]
lucyfan7266/Accelerator-Control
[ "9be80ce4dbe917712f35d96004ca920146e9c909" ]
[ "tests/test_mesh.py" ]
[ "import unittest\nfrom numpy.linalg import det\nimport numpy.random as npr\nimport numpy as np\n\nfrom DLA_Control import MZI, Layer, Mesh\n\nclass TestMesh(unittest.TestCase):\n \"\"\" Code for testing the MZI mesh\"\"\"\n\n def setUp(self):\n pass\n\n @staticmethod\n def is_unitary(M):\n M_dagger = M.conj().T\n np.testing.assert_array_almost_equal(np.dot(M, M_dagger), np.eye(M.shape[0]))\n\n def test_MZI(self):\n \"\"\" Tests an invidual MZI\"\"\"\n m = MZI()\n\n print(m)\n\n self.is_unitary(m.M)\n\n # make a 2x2 unitary matrix\n phi1 = 2*np.pi*npr.random()\n phi2 = 2*np.pi*npr.random()\n\n m = MZI(phi1, phi2)\n self.is_unitary(m.M)\n\n def test_layer(self):\n \"\"\" Tests a MZI layer\"\"\"\n N = 5\n L = Layer(N)\n m1 = MZI()\n m2 = MZI()\n\n L.embed_MZI(m1, offset=0)\n L.embed_MZI(m2, offset=2)\n\n print('\\n')\n print(L)\n\n # check the sparsity pattern of the matrix\n matrix_pattern = 1.*(np.abs(L.M)>0)\n np.testing.assert_array_almost_equal(matrix_pattern, np.array([[1, 1, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 0, 0, 1]]))\n\n # make sure its still unitary\n self.is_unitary(L.M)\n\n # make sure resetting works\n L.reset_MZI(offset=2, phi1=0, phi2=0)\n\n np.testing.assert_array_almost_equal(L.M[2:4, 2:4], np.array(np.eye(2)))\n\n def test_mesh(self):\n \"\"\" prints out some meshes for debugging MZI mesh\"\"\"\n\n # triangular mesh\n print('')\n N = 10\n M = Mesh(N, mesh_type='triangular', initialization='random', M=None)\n print('Triangular, N = {}, M = None:'.format(N))\n print(M)\n\n # full clements mesh\n M = Mesh(N, mesh_type='clements', initialization='random', M=None)\n print('Clements, N = {}, M = None:'.format(N))\n print(M)\n\n # clements mesh with custom number of layers\n M_temp = 50\n M = Mesh(N-1, mesh_type='clements', initialization='random', M=M_temp)\n print('Clements, N = {}, M = {}:'.format(N-1, M_temp))\n\n # check if unitary with random initialization\n self.is_unitary(M.full_matrix)\n\n N = 4\n # check if identity matrix with zero initialization\n M = Mesh(N, mesh_type='clements', initialization='zeros', M=1)\n np.testing.assert_array_almost_equal(M.full_matrix, np.eye(N, dtype=np.complex64))\n\n\n def test_real(self):\n\n N = 10\n mesh = Mesh(N, mesh_type='clements', initialization='real', M=None)\n print(mesh)\n R = (np.random.random((N, 1)) - 0.5) #+ 1j*(np.random.random((N, 1)) - 0.5)\n\n mesh.input_couple(R)\n output_values = mesh.output_values\n print(output_values)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.eye", "numpy.abs", "numpy.random.random" ] ]
shashankg7/glove-tensorflow
[ "b44e01da2110424d8b31ef2854190151f6704b24" ]
[ "glove/train.py" ]
[ "\nfrom glove import glove\nfrom build_coocurence import generateCoocur as gen_coocur\nimport os\nimport json\nimport cPickle as pickle\nimport numpy as np\nimport argparse\nimport logging\nimport time\nimport pdb\n\nclass train_glove(object):\n\n def __init__(self):\n '''\n Define hyperparamters of the model. (Modify using arg parser.\n '''\n self.dim = 100\n self.n_epochs = 10\n self.minibatch_size = 64\n self.path = '../data/text8'\n self.lr = 0.05\n self.context_size = 10\n\n def arg_parser(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"path to text file\")\n parser.add_argument(\n \"--dim\", help=\"dimension of word vectors\", type=int)\n parser.add_argument(\n \"--epochs\", help=\"no of epochs to run SGD\", type=int)\n parser.add_argument(\n \"--learning_rate\", help=\"learning rate for gradient descent\",\n type=float)\n parser.add_argument(\n \"--mini_batchsize\", help=\"size of mini-batch for training\",\n type=int)\n parser.add_argument(\"--context_size\",\n help=\"context size for constructing coocurence matrix\",\n type=int)\n args = parser.parse_args()\n if args.path:\n self.path = args.path\n if args.dim:\n self.dim = args.dim\n if args.epochs:\n self.epochs = args.epochs\n if args.learning_rate:\n self.lr = args.learning_rate\n if args.context_size:\n self.context_size = args.context_size\n\n def train_minibatch(self):\n '''\n Train gloVe model\n '''\n logger = logging.getLogger('glove')\n logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler('glove.log')\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n t = time.time()\n coocur = gen_coocur(self.path)\n if os.path.isfile('vocab.json'):\n f = open('vocab.json', 'r')\n coocur.vocab = json.load(f)\n else:\n coocur.gen_vocab()\n logger.info(\"Vocabulary constructed\")\n vocab_size = len(coocur.vocab)\n if os.path.isfile('coocurence.mat'):\n f = open('coocurence.mat', 'r')\n coocur.coocur_mat = pickle.load(f)\n else:\n coocur.gen_coocur(self.context_size)\n logger.info(\"Coocurence matrix constructed\")\n # vocab and coocurence matrix is loaded/generated\n nnz = coocur.coocur_mat.nonzero()\n model = glove(self.minibatch_size, vocab_size, self.dim, self.lr)\n # nnz has i,j indices of non-zero entries\n nz = np.zeros((nnz[0].shape[0], 2))\n nz[:, 0] = nnz[0]\n nz[:, 1] = nnz[1]\n logger.info(\"Training started\")\n try:\n for epoch in xrange(self.n_epochs):\n np.random.shuffle(nz)\n # Train only observed values in the co-occurence matrix\n for i in xrange(0, nnz[0].shape[0], self.minibatch_size):\n indw = np.asarray(nz[i:(i + self.minibatch_size), 0], dtype=np.int32)\n indw1 = np.asarray(nz[i:(i + self.minibatch_size), 1], dtype=np.int32)\n batch_size = indw.shape[0]\n X = np.asarray(coocur.coocur_mat[indw,\n indw1].todense(), dtype=np.float32).reshape(batch_size, )\n fX = np.zeros_like(X)\n for i in xrange(0, X.shape[0]):\n if X[i] > 100:\n fX[i] = (X[i] / float(100.0)) ** 0.75\n else:\n fX[i] = 1.\n X = np.log(X)\n if indw.shape[0] == self.minibatch_size:\n cost = model.sgd(indw, indw1, X, fX)\n logger.info(\"Cost in epoch %d is %f\" % (epoch, cost))\n else:\n logger.info(\"Current batch size less than user specified batch size, omitting\")\n except Exception as e:\n logger.debug(\"System encountered an error %s\" % e)\n\n logger.info(\"Training ended\")\n model.save_params()\n logger.info(\"parameters saved\")\n logger.info(\"Time to complete training is %f\" % (time.time() - t))\n\nif __name__ == \"__main__\":\n pdb.set_trace()\n model = train_glove()\n model.arg_parser()\n model.train_minibatch()\n" ]
[ [ "numpy.zeros_like", "numpy.asarray", "numpy.log", "numpy.zeros", "numpy.random.shuffle" ] ]
neuro-inc/ml-recipe-distributed-pytorch
[ "23666a026a08d80b57b288e8bdc62f52da859769" ]
[ "modules/model/utils/list_dataloader.py" ]
[ "import logging\nimport multiprocessing as mp\n\nimport numpy as np\n\nlogger = logging.getLogger(__file__)\n\n\nclass ListDalatoaderIterator:\n def __init__(self, processor):\n self.manager = mp.Manager()\n self.pool_queue = self.manager.Queue(processor.buffer_size)\n self.pool = mp.Pool(processor.n_jobs)\n\n self.processor = processor\n\n self.jobs = []\n\n self.num_done_jobs = 0\n\n def _job_done_callback(self, *args, **kwargs):\n self.num_done_jobs += 1\n\n def _job_error_callback(self, error):\n logger.error(error)\n self.pool.terminate()\n raise error\n\n @staticmethod\n def _worker_fun(dataset, idx, pool_queue):\n chunks = dataset[idx]\n for chunk in chunks:\n pool_queue.put(chunk)\n\n def __iter__(self):\n return self._generator()\n\n def _generator(self):\n try:\n idxs = np.arange(0, len(self.processor.dataset))\n if self.processor.shuffle:\n np.random.shuffle(idxs)\n\n for idx in idxs:\n self.jobs.append(self.pool.apply_async(ListDalatoaderIterator._worker_fun,\n (self.processor.dataset, idx, self.pool_queue),\n callback=self._job_done_callback,\n error_callback=self._job_error_callback))\n batch = []\n while True:\n chunk = self.pool_queue.get()\n batch.append(chunk)\n if len(batch) == self.processor.batch_size:\n yield self.processor.process_batch(batch)\n batch = []\n\n if self.pool_queue.empty() and self.num_done_jobs == len(idxs):\n break\n\n if len(batch):\n yield self.processor.process_batch(batch)\n\n except Exception as e:\n self._close_jobs()\n raise e\n\n def _close_jobs(self):\n self.pool.close()\n self.pool.join()\n\n self.manager.shutdown()\n\n def __del__(self):\n self._close_jobs()\n\n\nclass ListDataloader:\n def __init__(self, dataset, batch_size, *,\n n_jobs=4,\n collate_fun=None,\n buffer_size=1024,\n shuffle=False):\n self.dataset = dataset\n self.batch_size = batch_size\n\n self.collate_fun = collate_fun\n self.n_jobs = n_jobs\n\n self.buffer_size = buffer_size\n\n self.shuffle = shuffle\n\n def process_batch(self, batch):\n return self.collate_fun(batch) if self.collate_fun is not None else batch\n\n def __iter__(self):\n return iter(ListDalatoaderIterator(self))\n" ]
[ [ "numpy.random.shuffle" ] ]
jayant766/MIDAS-IIITD
[ "9a6085bff579a5846c58bac70264a736ed9da750" ]
[ "libraries/classification/inception/data/build_image_data.py" ]
[ "\"\"\"Converts image data to TFRecords file format with Example protos.\n\nThe image data set is expected to reside in JPEG files located in the\nfollowing directory structure.\n\n data_dir/label_0/image0.jpeg\n data_dir/label_0/image1.jpg\n ...\n data_dir/label_1/weird-image.jpeg\n data_dir/label_1/my-image.jpeg\n ...\n\nwhere the sub-directory is the unique label associated with these images.\n\nThis TensorFlow script converts the training and evaluation data into\na sharded data set consisting of TFRecord files\n\n train_directory/train-00000-of-01024\n train_directory/train-00001-of-01024\n ...\n train_directory/train-01023-of-01024\n\nand\n\n validation_directory/validation-00000-of-00128\n validation_directory/validation-00001-of-00128\n ...\n validation_directory/validation-00127-of-00128\n\nwhere we have selected 1024 and 128 shards for each data set. Each record\nwithin the TFRecord file is a serialized Example proto. The Example proto\ncontains the following fields:\n\n image/encoded: string containing JPEG encoded image in RGB colorspace\n image/height: integer, image height in pixels\n image/width: integer, image width in pixels\n image/colorspace: string, specifying the colorspace, always 'RGB'\n image/channels: integer, specifying the number of channels, always 3\n image/format: string, specifying the format, always 'JPEG'\n\n image/filename: string containing the basename of the image file\n e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'\n image/class/label: integer specifying the index in a classification layer.\n The label ranges from [0, num_labels] where 0 is unused and left as\n the background class.\n image/class/text: string specifying the human-readable version of the label\n e.g. 'dog'\n\nIf your data set involves bounding boxes, please look at build_imagenet_data.py.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os\nimport random\nimport sys\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\ntf.app.flags.DEFINE_string('train_directory', '/tmp/',\n 'Training data directory')\ntf.app.flags.DEFINE_string('validation_directory', '/tmp/',\n 'Validation data directory')\ntf.app.flags.DEFINE_string('output_directory', '/tmp/',\n 'Output data directory')\n\ntf.app.flags.DEFINE_integer('train_shards', 2,\n 'Number of shards in training TFRecord files.')\ntf.app.flags.DEFINE_integer('validation_shards', 2,\n 'Number of shards in validation TFRecord files.')\n\ntf.app.flags.DEFINE_integer('num_threads', 2,\n 'Number of threads to preprocess the images.')\n\n# The labels file contains a list of valid labels are held in this file.\n# Assumes that the file contains entries as such:\n# dog\n# cat\n# flower\n# where each line corresponds to a label. We map each label contained in\n# the file to an integer corresponding to the line number starting from 0.\ntf.app.flags.DEFINE_string('labels_file', '', 'Labels file')\n\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _convert_to_example(filename, image_buffer, label, text, height, width):\n \"\"\"Build an Example proto for an example.\n\n Args:\n filename: string, path to an image file, e.g., '/path/to/example.JPG'\n image_buffer: string, JPEG encoding of RGB image\n label: integer, identifier for the ground truth for the network\n text: string, unique human-readable, e.g. 'dog'\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _int64_feature(label),\n 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),\n 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),\n 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),\n 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))\n return example\n\n\nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _is_png(filename):\n \"\"\"Determine if a file contains a PNG format image.\n\n Args:\n filename: string, path of the image file.\n\n Returns:\n boolean indicating if the image is a PNG.\n \"\"\"\n return filename.endswith('.png')\n\n\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n\n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width\n\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n texts, labels, num_shards):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0, len(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename = filenames[i]\n label = labels[i]\n text = texts[i]\n\n try:\n image_buffer, height, width = _process_image(filename, coder)\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected error while decoding %s.' % filename)\n continue\n\n example = _convert_to_example(filename, image_buffer, label,\n text, height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n\ndef _process_image_files(name, filenames, texts, labels, num_shards):\n \"\"\"Process and save list of images as TFRecord of Example protos.\n\n Args:\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n assert len(filenames) == len(texts)\n assert len(filenames) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = ImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n texts, labels, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames)))\n sys.stdout.flush()\n\n\ndef _find_image_files(data_dir, labels_file):\n \"\"\"Build a list of all images files and labels in the data set.\n\n Args:\n data_dir: string, path to the root directory of images.\n\n Assumes that the image data set resides in JPEG files located in\n the following directory structure.\n\n data_dir/dog/another-image.JPEG\n data_dir/dog/my-image.jpg\n\n where 'dog' is the label associated with these images.\n\n labels_file: string, path to the labels file.\n\n The list of valid labels are held in this file. Assumes that the file\n contains entries as such:\n dog\n cat\n flower\n where each line corresponds to a label. We map each label contained in\n the file to an integer starting with the integer 0 corresponding to the\n label contained in the first line.\n\n Returns:\n filenames: list of strings; each string is a path to an image file.\n texts: list of strings; each string is the class, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth.\n \"\"\"\n print('Determining list of input files and labels from %s.' % data_dir)\n unique_labels = [l.strip() for l in tf.gfile.FastGFile(\n labels_file, 'r').readlines()]\n\n labels = []\n filenames = []\n texts = []\n\n # Leave label index 0 empty as a background class.\n label_index = 1\n\n # Construct the list of JPEG files and labels.\n for text in unique_labels:\n jpeg_file_path = '%s/%s/*' % (data_dir, text)\n matching_files = tf.gfile.Glob(jpeg_file_path)\n\n labels.extend([label_index] * len(matching_files))\n texts.extend([text] * len(matching_files))\n filenames.extend(matching_files)\n\n if not label_index % 100:\n print('Finished finding files in %d of %d classes.' % (\n label_index, len(labels)))\n label_index += 1\n\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(filenames)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n filenames = [filenames[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n\n print('Found %d JPEG files across %d labels inside %s.' %\n (len(filenames), len(unique_labels), data_dir))\n return filenames, texts, labels\n\n\ndef _process_dataset(name, directory, num_shards, labels_file):\n \"\"\"Process a complete data set and save it as a TFRecord.\n\n Args:\n name: string, unique identifier specifying the data set.\n directory: string, root path to the data set.\n num_shards: integer number of shards for this data set.\n labels_file: string, path to the labels file.\n \"\"\"\n filenames, texts, labels = _find_image_files(directory, labels_file)\n _process_image_files(name, filenames, texts, labels, num_shards)\n\n\ndef main(unused_argv):\n assert not FLAGS.train_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')\n assert not FLAGS.validation_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with '\n 'FLAGS.validation_shards')\n print('Saving results to %s' % FLAGS.output_directory)\n\n # Run it!\n _process_dataset('validation', FLAGS.validation_directory,\n FLAGS.validation_shards, FLAGS.labels_file)\n _process_dataset('train', FLAGS.train_directory,\n FLAGS.train_shards, FLAGS.labels_file)\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.train.BytesList", "tensorflow.app.flags.DEFINE_integer", "tensorflow.train.Int64List", "tensorflow.train.Coordinator", "tensorflow.app.flags.DEFINE_string", "tensorflow.gfile.Glob", "tensorflow.compat.as_bytes", "tensorflow.Session", "tensorflow.image.encode_jpeg", "tensorflow.python_io.TFRecordWriter", "numpy.linspace", "tensorflow.image.decode_png", "tensorflow.placeholder", "tensorflow.gfile.FastGFile", "numpy.arange", "tensorflow.app.run", "tensorflow.image.decode_jpeg" ] ]
jlmayfield/Cirq
[ "dc1294f54118a9a4f92546ca13780b91615dd675" ]
[ "cirq/testing/consistent_protocols_test.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Sequence, Union\n\nimport pytest\n\nimport numpy as np\n\nimport cirq\nfrom cirq.type_workarounds import NotImplementedType\n\n\nclass GoodGate(cirq.SingleQubitGate):\n\n def __init__(self,\n *,\n phase_exponent: Union[float, cirq.Symbol],\n exponent: Union[float, cirq.Symbol] = 1.0) -> None:\n self.phase_exponent = cirq.canonicalize_half_turns(phase_exponent)\n self.exponent = exponent\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if cirq.is_parameterized(self):\n return NotImplemented\n z = cirq.unitary(cirq.Z**self.phase_exponent)\n x = cirq.unitary(cirq.X**self.exponent)\n return np.dot(np.dot(z, x), np.conj(z))\n\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs\n ) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c.conj()\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n def _decompose_(self, qubits: Sequence[cirq.QubitId]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self.phase_exponent\n x = cirq.X(q)**self.exponent\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z**-1, x, z\n\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return GoodGate(\n exponent=self.exponent,\n phase_exponent=self.phase_exponent + phase_turns * 2)\n\n def __pow__(self, exponent: Union[float, cirq.Symbol]) -> 'GoodGate':\n new_exponent = cirq.mul(self.exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n # coverage: ignore\n return NotImplemented\n return GoodGate(phase_exponent=self.phase_exponent,\n exponent=new_exponent)\n\n def __repr__(self):\n args = ['phase_exponent={!r}'.format(self.phase_exponent)]\n if self.exponent != 1:\n args.append('exponent={!r}'.format(self.exponent))\n return 'GoodGate({})'.format(', '.join(args))\n\n def _is_parameterized_(self) -> bool:\n return (isinstance(self.exponent, cirq.Symbol) or\n isinstance(self.phase_exponent, cirq.Symbol))\n\n def _identity_tuple(self):\n return (GoodGate,\n self.phase_exponent,\n self.exponent)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n # coverage: ignore\n return NotImplemented\n return self._identity_tuple() == other._identity_tuple()\n\n\nclass BadGateApplyUnitaryToTensor(GoodGate):\n\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs\n ) -> Union[np.ndarray, NotImplementedType]:\n if self.exponent != 1 or cirq.is_parameterized(self):\n # coverage: ignore\n return NotImplemented\n\n zero = cirq.slice_for_qubits_equal_to(args.axes, 0)\n one = cirq.slice_for_qubits_equal_to(args.axes, 1)\n c = np.exp(1j * np.pi * self.phase_exponent)\n\n args.target_tensor[one] *= c\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n args.available_buffer[one] *= c\n\n return args.available_buffer\n\n\nclass BadGateDecompose(GoodGate):\n\n def _decompose_(self, qubits: Sequence[cirq.QubitId]) -> cirq.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self.phase_exponent\n x = cirq.X(q)**(2*self.exponent)\n if cirq.is_parameterized(z):\n # coverage: ignore\n return NotImplemented\n return z**-1, x, z\n\n\nclass BadGatePhaseBy(GoodGate):\n\n def _phase_by_(self, phase_turns, qubit_index):\n assert qubit_index == 0\n return BadGatePhaseBy(\n exponent=self.exponent,\n phase_exponent=self.phase_exponent + phase_turns * 4)\n\n\nclass BadGateRepr(GoodGate):\n\n def __repr__(self):\n args = ['phase_exponent={!r}'.format(2*self.phase_exponent)]\n if self.exponent != 1:\n # coverage: ignore\n args.append('exponent={!r}'.format(self.exponent))\n return 'BadGateRepr({})'.format(', '.join(args))\n\n\nclass GoodEigenGate(cirq.EigenGate, cirq.SingleQubitGate):\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def __repr__(self):\n return ('GoodEigenGate'\n '(exponent={!r}, global_shift={!r})'.format(\n self._exponent, self._global_shift))\n\n\nclass BadEigenGate(GoodEigenGate):\n\n def _eigen_shifts(self):\n return [0, 0]\n\n def __repr__(self):\n return ('BadEigenGate'\n '(exponent={!r}, global_shift={!r})'.format(\n self._exponent, self._global_shift))\n\n\ndef test_assert_implements_consistent_protocols():\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.0),\n global_vals={'GoodGate': GoodGate}\n )\n\n cirq.testing.assert_implements_consistent_protocols(\n GoodGate(phase_exponent=0.25),\n global_vals={'GoodGate': GoodGate}\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateApplyUnitaryToTensor(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateDecompose(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGatePhaseBy(phase_exponent=0.25)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_implements_consistent_protocols(\n BadGateRepr(phase_exponent=0.25),\n global_vals={'BadGateRepr': BadGateRepr}\n )\n\n\ndef test_assert_eigengate_implements_consistent_protocols():\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n GoodEigenGate,\n global_vals={'GoodEigenGate': GoodEigenGate})\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_eigengate_implements_consistent_protocols(\n BadEigenGate,\n global_vals={'BadEigenGate': BadEigenGate})\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.conj", "numpy.exp" ] ]
zju3dv/rnin-vio
[ "b030ecc94f151159973a9086c8ba76e22bdbc56e" ]
[ "ronin_3d/source/data_glob_heading.py" ]
[ "import json\nimport random\nimport sys\nfrom os import path as osp\n\nimport h5py\nimport numpy as np\nimport quaternion\nfrom scipy.ndimage import gaussian_filter1d\nfrom torch.utils.data import Dataset\n\nfrom math_util import orientation_to_angles\nfrom data_glob_speed import GlobSpeedSequence\nfrom data_utils import load_cached_sequences\n\n\nclass HeadingSequence(GlobSpeedSequence):\n target_dim = 2\n aux_dim = 2 # velocity\n\n def __init__(self, data_path=None, **kwargs):\n super().__init__(data_path, **kwargs)\n\n def load(self, data_path):\n super().load(data_path)\n self.velocities = self.targets[:, :2]\n with open(osp.join(data_path, 'info.json')) as f:\n info = json.load(f)\n rot_tango_to_body = info['align_tango_to_body']\n start_frame = info.get('start_frame', 0)\n\n with h5py.File(osp.join(data_path, 'data.hdf5')) as f:\n tango_ori = f['pose/tango_ori']\n body_ori_q = quaternion.from_float_array(tango_ori) * quaternion.from_float_array(rot_tango_to_body).conj()\n body_heading = orientation_to_angles(body_ori_q)[start_frame:, 0]\n self.targets = np.stack([np.sin(body_heading), np.cos(body_heading)], axis=-1)\n\n def get_feature(self):\n return self.features\n\n def get_target(self):\n return self.targets\n\n def get_aux(self):\n return self.velocities\n\n def get_meta(self):\n return '{}: device: {}, ori_error ({}): {:.3f}'.format(\n self.info['path'], self.info['device'], self.info['ori_source'], self.info['source_ori_error'])\n\n\nclass HeadingDataset(Dataset):\n # Input -imu\n # Targets - heading\n # Aux - velocity\n def __init__(self, seq_type, root_dir, data_list, cache_path=None, step_size=10, window_size=1000,\n random_shift=0, transform=None, **kwargs):\n super(HeadingDataset, self).__init__()\n self.seq_type = seq_type\n self.feature_dim = seq_type.feature_dim\n self.target_dim = seq_type.target_dim\n self.aux_dim = seq_type.aux_dim\n self.window_size = window_size\n self.step_size = step_size\n self.random_shift = random_shift\n self.transform = transform\n\n self.data_path = [osp.join(root_dir, data) for data in data_list]\n self.index_map = []\n\n self.features, self.targets, self.velocities = load_cached_sequences(\n seq_type, root_dir, data_list, cache_path, **kwargs)\n\n # Optionally smooth the sequence\n feat_sigma = kwargs.get('feature_sigma,', -1)\n targ_sigma = kwargs.get('target_sigma,', -1)\n if feat_sigma > 0:\n self.features = [gaussian_filter1d(feat, sigma=feat_sigma, axis=0) for feat in self.features]\n if targ_sigma > 0:\n self.targets = [gaussian_filter1d(targ, sigma=targ_sigma, axis=0) for targ in self.targets]\n\n max_norm = kwargs.get('max_velocity_norm', 3.0)\n for i in range(len(data_list)):\n self.features[i] = self.features[i][:-1]\n self.targets[i] = self.targets[i][:-1]\n self.velocities[i] = self.velocities[i]\n\n velocity = np.linalg.norm(self.velocities[i], axis=1) # Remove outlier ground truth data\n bad_data = velocity > max_norm\n for j in range(window_size + random_shift, self.targets[i].shape[0], step_size):\n if not bad_data[j - window_size - random_shift:j + random_shift].any():\n self.index_map.append([i, j])\n\n if kwargs.get('shuffle', True):\n random.shuffle(self.index_map)\n\n def __getitem__(self, item):\n seq_id, frame_id = self.index_map[item][0], self.index_map[item][1]\n if self.random_shift > 0:\n frame_id += random.randrange(-self.random_shift, self.random_shift)\n frame_id = max(self.window_size, min(frame_id, self.targets[seq_id].shape[0] - 1))\n\n feat = np.copy(self.features[seq_id][frame_id - self.window_size:frame_id])\n targ = np.copy(self.targets[seq_id][frame_id - self.window_size:frame_id])\n vel = np.copy(self.velocities[seq_id][frame_id - self.window_size:frame_id])\n\n if self.transform is not None:\n feat, targ = self.transform(feat, targ)\n\n return feat.astype(np.float32), targ.astype(np.float32), vel.astype(np.float32), seq_id, frame_id\n\n def __len__(self):\n return len(self.index_map)\n\n def description(self):\n return {'features': self.feature_dim, 'target': self.target_dim, 'velocity': self.aux_dim}\n\n def get_test_seq(self, i):\n return self.features[i].astype(np.float32)[np.newaxis, ], self.targets[i].astype(np.float32)\n" ]
[ [ "numpy.sin", "numpy.linalg.norm", "numpy.copy", "scipy.ndimage.gaussian_filter1d", "numpy.cos" ] ]
GKNL/R2CNN_Faster-RCNN_Tensorflow-Improved
[ "a163520b63858df4bcd573980897b33d017288dd" ]
[ "data/io/read_tfrecord.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport tensorflow as tf\nimport os\nfrom data.io import image_preprocess\nfrom libs.configs import cfgs\n\n\ndef read_single_example_and_decode(filename_queue):\n\n # tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)\n\n # reader = tf.TFRecordReader(options=tfrecord_options)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized=serialized_example,\n features={\n 'img_name': tf.FixedLenFeature([], tf.string),\n 'img_height': tf.FixedLenFeature([], tf.int64),\n 'img_width': tf.FixedLenFeature([], tf.int64),\n 'img': tf.FixedLenFeature([], tf.string),\n 'gtboxes_and_label': tf.FixedLenFeature([], tf.string),\n 'num_objects': tf.FixedLenFeature([], tf.int64)\n }\n )\n img_name = features['img_name']\n img_height = tf.cast(features['img_height'], tf.int32)\n img_width = tf.cast(features['img_width'], tf.int32)\n img = tf.decode_raw(features['img'], tf.uint8)\n\n img = tf.reshape(img, shape=[img_height, img_width, 3])\n\n gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)\n gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])\n\n num_objects = tf.cast(features['num_objects'], tf.int32)\n return img_name, img, gtboxes_and_label, num_objects\n\n\ndef read_and_prepocess_single_img(filename_queue, shortside_len, is_training):\n \"\"\"\n 读取图片,并对图像进行处理与变换从而进行数据增强\n :param filename_queue: tf内部的queue类型,存放着全部的文件名\n :param shortside_len: 图像较短一边(宽)的长度\n :param is_training: 训练or测试\n :return:\n \"\"\"\n\n img_name, img, gtboxes_and_label, num_objects = read_single_example_and_decode(filename_queue)\n\n img = tf.cast(img, tf.float32)\n img = img - tf.constant(cfgs.PIXEL_MEAN)\n if is_training:\n img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,\n target_shortside_len=shortside_len)\n img, gtboxes_and_label = image_preprocess.random_flip_left_right(img_tensor=img,\n gtboxes_and_label=gtboxes_and_label)\n\n else:\n img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,\n target_shortside_len=shortside_len)\n\n return img_name, img, gtboxes_and_label, num_objects\n\n\ndef next_batch(dataset_name, batch_size, shortside_len, is_training):\n '''\n 读出tfrecords中的图片等信息,并分割为若干个batch\n :return:\n img_name_batch: shape(1, 1)\n img_batch: shape:(1, new_imgH, new_imgW, C)\n gtboxes_and_label_batch: shape(1, Num_Of_objects, 5) .each row is [x1, y1, x2, y2, label] (写错了这里?应该是[x1, y1, x2, y2, x3, y3, x4, y4, (label)])\n '''\n assert batch_size == 1, \"we only support batch_size is 1.We may support large batch_size in the future\"\n\n if dataset_name not in ['DOTA', 'ship', 'ICDAR2015', 'pascal', 'coco', 'DOTA_TOTAL', 'FDDB', 'HRSC2016']:\n raise ValueError('dataSet name must be in pascal, coco spacenet and ship')\n\n if is_training:\n pattern = os.path.join('../data/tfrecord', dataset_name + '_train*')\n else:\n pattern = os.path.join('../data/tfrecord', dataset_name + '_test*')\n\n print('tfrecord path is -->', os.path.abspath(pattern))\n\n filename_tensorlist = tf.train.match_filenames_once(pattern) # # 判断是否读取到文件\n\n # 使用tf.train.string_input_producer函数把我们需要的全部文件打包为一个tf内部的queue类型,之后tf开文件就从这个queue中取目录了(要注意一点的是这个函数的shuffle参数默认是True)\n filename_queue = tf.train.string_input_producer(filename_tensorlist)\n\n # 这里对图像进行处理与变换从而进行数据增强 ,返回的是[文件名,图片,坐标及标签,以及物体的个数]\n img_name, img, gtboxes_and_label, num_obs = read_and_prepocess_single_img(filename_queue, shortside_len,\n is_training=is_training)\n\n # 这里产生batch,队列最大等待数为1,单线程处理\n img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch = \\\n tf.train.batch(\n [img_name, img, gtboxes_and_label, num_obs],\n batch_size=batch_size,\n capacity=1,\n num_threads=1,\n dynamic_pad=True)\n return img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch\n\n\nif __name__ == '__main__':\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n # img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \\\n # next_batch(dataset_name=cfgs.DATASET_NAME, # 'pascal', 'coco'\n # batch_size=cfgs.BATCH_SIZE,\n # shortside_len=cfgs.IMG_SHORT_SIDE_LEN,\n # is_training=True)\n # gtboxes_and_label = tf.reshape(gtboxes_and_label_batch, [-1, 9])\n #\n # init_op = tf.group(\n # tf.global_variables_initializer(),\n # tf.local_variables_initializer()\n # )\n #\n # config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n #\n # with tf.Session(config=config) as sess:\n # sess.run(init_op)\n #\n # coord = tf.train.Coordinator()\n # threads = tf.train.start_queue_runners(sess, coord)\n #\n # img_name_batch_, img_batch_, gtboxes_and_label_batch_, num_objects_batch_ \\\n # = sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch])\n #\n # print('debug')\n #\n # coord.request_stop()\n # coord.join(threads)\n img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \\\n next_batch(dataset_name=cfgs.DATASET_NAME,\n batch_size=cfgs.BATCH_SIZE,\n shortside_len=cfgs.IMG_SHORT_SIDE_LEN,\n is_training=True)\n\n with tf.Session() as sess:\n print(gtboxes_and_label_batch) # Tensor(\"batch:2\", shape=(1, ?, 9), dtype=int32)\n print(tf.squeeze(gtboxes_and_label_batch, 0)) # Tensor(\"Squeeze_1:0\", shape=(?, 9), dtype=int32)" ]
[ [ "tensorflow.decode_raw", "tensorflow.train.batch", "tensorflow.FixedLenFeature", "tensorflow.Session", "tensorflow.train.match_filenames_once", "tensorflow.reshape", "tensorflow.train.string_input_producer", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.TFRecordReader", "tensorflow.cast" ] ]
PaccMann/paccmann_omics
[ "dbb2d2d67afd5a6ff58ba316f53c699406f5c655" ]
[ "paccmann_omics/utils/hyperparams.py" ]
[ "\"\"\"Factories to ingest string model parameters.\"\"\"\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom .utils import gaussian_mixture, kl_divergence_loss\n\n# LSTM(10, 20, 2) -> input has 10 features, 20 hidden size and 2 layers.\n# NOTE: Make sure to set batch_first=True. Optionally set bidirectional=True\nRNN_CELL_FACTORY = {'lstm': nn.LSTM, 'gru': nn.GRU}\n\nOPTIMIZER_FACTORY = {\n 'Adadelta': optim.Adadelta,\n 'Adagrad': optim.Adagrad,\n 'Adam': optim.Adam,\n 'Adamax': optim.Adamax,\n 'RMSprop': optim.RMSprop,\n 'SGD': optim.SGD\n}\n\nACTIVATION_FN_FACTORY = {\n 'relu': nn.ReLU(),\n 'sigmoid': nn.Sigmoid(),\n 'selu': nn.SELU(),\n 'tanh': nn.Tanh(),\n 'lrelu': nn.LeakyReLU(),\n 'elu': nn.ELU(),\n 'celu': nn.CELU()\n}\nLOSS_FN_FACTORY = {\n 'mse': nn.MSELoss(reduction='sum'),\n 'l1': nn.L1Loss(),\n 'binary_cross_entropy': nn.BCELoss(),\n 'kld': kl_divergence_loss\n}\n\nAAE_DISTRIBUTION_FACTORY = {\n 'Gaussian': torch.randn,\n 'Uniform': torch.rand,\n 'Gaussian_Mixture': gaussian_mixture\n}\n" ]
[ [ "torch.nn.SELU", "torch.nn.CELU", "torch.nn.MSELoss", "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.L1Loss", "torch.nn.ReLU", "torch.nn.BCELoss", "torch.nn.ELU" ] ]
Muraru-taketa/100_knocks
[ "d34c0157d15a0fda45ac60e41e93bd6b73cebb58" ]
[ "np23.py" ]
[ "#np23.py\nimport re\nimport pandas as pd\n#ここらは一緒です。\ndf = pd.read_json(\"jawiki-country.json\", lines=True)\nsec_txt = df.query('title==\"イギリス\"')['text'].values[0]\nans = r'^(={2,})\\s*(.+?)\\s*\\1$'#参照下\nfor line in sec_txt.split(\"\\n\"):#分割改行\n result = re.match(ans, line)\n if result is None: #resultがマッチしていれば返す\n #print(line)\n continue #このまま出してもまんまテキストでるだけそのためここで処理をスキップして\n #下の処理を行いセクションの抽出を行う\n print(result.group(2).strip(' ='), len(result.group(1)))#g2はレベルが==の数でg1は見出しの抽出 \n#^ 文字列の先頭\n#(={2,}) キャプチャ対象、2回以上の'='\n#\\s* 非キャプチャ、余分な0個以上の空白\n#(.+?) キャプチャ対象、任意の文字が1文字以上\n#\\s* 非キャプチャ、余分な0個以上の空白\n#\\1 後方参照、1番目のキャプチャ対象(={2,})と同じ内容\n#$ 行末" ]
[ [ "pandas.read_json" ] ]
ikoktz/super-mra
[ "822f79792bbafbcda1aeddfa74b32aade2808a48" ]
[ "models.py" ]
[ "import numpy as np\nfrom keras.layers import Conv2D, Concatenate, Conv3D, MaxPooling3D, Conv3DTranspose\nfrom keras.layers import UpSampling3D, Dropout, BatchNormalization, Activation\nfrom keras.models import Input, Model\n\n\n# 3D U-Net\ndef unet3d(img_shape, out_ch=1, start_ch=64, depth=4, inc_rate=2., activation='relu',\n dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False, zdim=8, true_unet=True, kzmax=3):\n \"\"\"\n ##################################\n # 3D U-Net\n ##################################\n \"\"\"\n i = Input(shape=img_shape)\n print(img_shape)\n o = level_block3d(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual, zdim,\n true_unet, kzmax)\n o = Conv3D(1, (1, 1, 1), padding='valid', activation='linear')(o) # 3D input and 3D output have the same size\n return Model(inputs=i, outputs=o)\n\n\n# convolution block for 3D U-Net\ndef conv_block3d(m, dim, acti, bn, res, zdim, kzmax=3, do=0):\n # kzmax should be odd integer, the maximum kernel size in z direction\n kernz = max(zdim, kzmax)\n kernz = (kernz // 2) * 2 + 1\n n = Conv3D(dim, (3, 3, kernz), activation=acti, padding='same')(m)\n n = BatchNormalization()(n) if bn else n\n n = Dropout(do)(n) if do else n\n n = Conv3D(dim, (3, 3, kernz), activation=acti, padding='same')(n)\n n = BatchNormalization()(n) if bn else n\n return Concatenate()([m, n]) if res else n\n\n\n# level block for 3D U-Net\ndef level_block3d(m, dim, depth, inc, acti, do, bn, mp, up, res, zdim, true_unet, kzmax):\n print('m', m)\n print('dim', dim)\n if depth > 0:\n n = conv_block3d(m, dim, acti, bn, res, zdim, kzmax)\n if zdim == 1:\n m = MaxPooling3D(pool_size=(2, 2, 1))(n) if mp else Conv3D(dim, 3, strides=2, padding='same')(n)\n zstride = 1\n zdim_next = 1\n elif zdim == 3:\n m = MaxPooling3D(pool_size=(2, 2, 3))(n) if mp else Conv3D(dim, 3, strides=2, padding='same')(n)\n zstride = 3 if true_unet else 1 # set zstride = 1 for expansion if Unet3Din2Dout is true\n zdim_next = 1\n else:\n zdim_next = zdim // 2\n zstride = np.int(zdim / zdim_next)\n m = MaxPooling3D(pool_size=(2, 2, zstride))(n) if mp else Conv3D(dim, 3, strides=2, padding='same')(n)\n zstride = np.int(\n zdim / zdim_next) if true_unet else 1 # set zstride to 1 for expansion if Unet3Din2Dout is true\n m = level_block3d(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res, zdim_next, true_unet, kzmax)\n if up:\n m = UpSampling3D()(m)\n m = Conv3D(dim, 2, activation=acti, padding='same')(m)\n else:\n print(zstride)\n if zstride == 1:\n m = Conv3DTranspose(dim, 3, strides=(2, 2, 1), activation=acti, padding='same')(m)\n elif zstride == 2:\n m = Conv3DTranspose(dim, 3, strides=(2, 2, 2), activation=acti, padding='same')(m)\n elif zstride == 3:\n m = Conv3DTranspose(dim, 3, strides=(2, 2, 3), activation=acti, padding='same')(m)\n else:\n print(\"error in Unet3d ....\")\n return\n n = Concatenate()([n, m])\n m = conv_block3d(n, dim, acti, bn, res, zdim, kzmax)\n else:\n m = conv_block3d(m, dim, acti, bn, res, zdim, kzmax, do)\n return m\n\n\n# 3D 'resnet' (i.e. serial convolution + residual connection)\ndef scrc3d(input_shape, filters=64, filter_out=1, depth=20, activation='relu', dropout=0.5):\n \"\"\"\n ##################################\n # 3D ResNet (Serial Convolution + Residual Connection)\n ##################################\n \"\"\"\n in_ = Input(shape=input_shape)\n out_ = in_\n for i in range(depth - 1):\n if ((i != depth // 2) & (dropout > 0)) or dropout == 0:\n out_ = Conv3D(filters, 3, activation=activation, padding='same')(out_)\n else:\n out_ = Conv3D(filters, 3, activation=None, padding='same')(out_)\n out_ = Dropout(dropout)(out_)\n out_ = Activation(activation=activation)(out_)\n # it is said: As a rule of thumb, place the dropout after the activate function for all activation functions other than relu\n out_ = Conv3D(filter_out, 3, padding='same')(out_)\n return Model(inputs=in_, outputs=out_)\n\n\n# 3D 'resnet' (i.e. serial convolution + residual connection w/ variable 'filtersize')\ndef scrc3dflexfiltersize(input_shape, filters=64, filtersize=(3, 3, 3), filter_out=1, depth=20, activation='relu',\n dropout=0.5):\n \"\"\"\n ##################################\n # 3D ResNet (Serial Convolution + Residual Connection) w/ variable 'filtersize'\n ##################################\n \"\"\"\n in_ = Input(shape=input_shape)\n out_ = in_\n for i in range(depth - 1):\n if ((i != depth // 2) & (dropout > 0)) or dropout == 0:\n out_ = Conv3D(filters, filtersize, activation=activation, padding='same')(out_)\n else:\n out_ = Conv3D(filters, filtersize, activation=None, padding='same')(out_)\n out_ = Dropout(dropout)(out_)\n out_ = Activation(activation=activation)(out_)\n # it is said: As a rule of thumb, place the dropout after the activate function for all activation functions other than relu\n out_ = Conv3D(filter_out, filtersize, padding='same')(out_)\n return Model(inputs=in_, outputs=out_)\n\n\n# 2D 'resnet' (i.e. serial convolution + residual connection)\ndef scrc2d(input_shape, filters=64, filter_out=1, depth=20, activation='relu', dropout=0.5):\n \"\"\"\n ##################################\n # 2D ResNet (Serial Convolution + Residual Connection)\n ##################################\n \"\"\"\n in_ = Input(shape=input_shape)\n out_ = in_\n for i in range(depth - 1):\n if ((i != depth // 2) & (dropout > 0)) or dropout == 0:\n out_ = Conv2D(filters, 3, activation=activation, padding='same')(out_)\n else:\n out_ = Conv2D(filters, 3, activation=None, padding='same')(out_)\n out_ = Dropout(dropout)(out_)\n out_ = Activation(activation=activation)(out_)\n # it is said: As a rule of thumb, place the dropout after the activate function for all activation functions other than relu\n out_ = Conv2D(filter_out, 3, padding='same')(out_)\n return Model(inputs=in_, outputs=out_)\n" ]
[ [ "numpy.int" ] ]
KeepSilenceQP/tensorflow
[ "3a3d3591923501aa6fb03ef4da1d64400ad209d8" ]
[ "tensorflow/python/distribute/mirrored_variable_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test MirroredVariable in MirroredStrategy and MultiWorkerMirroredStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\n\n\ndef _replica_id():\n replica_id = ds_context.get_replica_context().replica_id_in_sync_group\n if not isinstance(replica_id, ops.Tensor):\n replica_id = constant_op.constant(replica_id)\n return replica_id\n\n\ndef _mimic_two_cpus():\n cpus = config.list_physical_devices(\"CPU\")\n\n config.set_logical_device_configuration(cpus[0], [\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration(),\n ])\n\n\[email protected](\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.NamedDistribution(\n \"Collective2CPUs\",\n # pylint: disable=g-long-lambda\n lambda: collective_all_reduce_strategy.\n CollectiveAllReduceStrategy._from_local_devices((\n \"/device:CPU:0\", \"/device:CPU:1\")),\n required_gpus=0)\n ],\n mode=[\"graph\", \"eager\"]))\nclass MirroredVariableCreationTest(test.TestCase):\n \"\"\"Base class that tests mirrored variable creator.\n\n Currently it assumes all strategy objects have two replicas.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n _mimic_two_cpus()\n\n def assertAllDifferent(self, objs):\n for i in range(len(objs)):\n for j in range(len(objs)):\n if i == j:\n continue\n self.assertIsNot(objs[i], objs[j])\n\n # TODO(priyag): Modify more tests to use this helper and check more\n # properties.\n def _test_mv_properties(self, var, name, strategy):\n self.assertIsInstance(var, values.MirroredVariable)\n self.assertEqual(name, var.name)\n self.assertIs(strategy, var.distribute_strategy)\n for i, d in enumerate(var._devices):\n self.assertEqual(d, strategy.experimental_local_results(var)[i].device)\n self.assertIs(\n strategy,\n strategy.experimental_local_results(var)[i]._distribute_strategy) # pylint: disable=protected-access\n\n def testVariableInFuncGraph(self, distribution):\n\n def model_fn():\n v = variable_scope.variable(2.0, name=\"bar\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with func_graph.FuncGraph(\"fg\").as_default(), distribution.scope():\n v1 = variable_scope.variable(1.0, name=\"foo\")\n v2 = distribution.extended.call_for_each_replica(model_fn)\n\n self._test_mv_properties(v1, \"foo:0\", distribution)\n self._test_mv_properties(v2, \"bar:0\", distribution)\n\n def testVariableWithTensorInitialValueInFunction(self, distribution):\n if not context.executing_eagerly():\n self.skipTest(\"`tf.function` is an eager-only feature\")\n\n v = [None]\n\n def model_fn():\n if v[0] is None:\n init_val = array_ops.zeros([])\n v[0] = variables.Variable(init_val)\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v[0]\n\n @def_function.function(autograph=False)\n def make_v1():\n return distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn))\n\n self.assertAllEqual([0, 0], make_v1())\n\n def testSingleVariable(self, distribution):\n\n def model_fn():\n # This variable should be created only once across the threads because of\n # special variable_creator functions used by\n # `distribution.extended.call_for_each_replica`.\n v = variable_scope.variable(1.0, name=\"foo\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self._test_mv_properties(result, \"foo:0\", distribution)\n\n def testUnnamedVariable(self, distribution):\n\n def model_fn():\n v = variable_scope.variable(1.0)\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self._test_mv_properties(result, \"Variable:0\", distribution)\n\n def testMultipleVariables(self, distribution):\n\n def model_fn():\n vs = []\n for i in range(5):\n vs.append(variable_scope.variable(1.0, name=\"foo\" + str(i)))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return vs\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n for i, v in enumerate(result):\n self._test_mv_properties(v, \"foo\" + str(i) + \":0\", distribution)\n\n def testMultipleVariablesWithSameCanonicalName(self, distribution):\n\n def model_fn():\n vs = []\n vs.append(variable_scope.variable(1.0, name=\"foo/bar\"))\n vs.append(variable_scope.variable(1.0, name=\"foo_1/bar\"))\n vs.append(variable_scope.variable(1.0, name=\"foo_1/bar_1\"))\n vs.append(variable_scope.variable(1.0, name=\"foo/bar_1\"))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return vs\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n for v in result:\n self.assertIsInstance(v, values.MirroredVariable)\n self.assertEqual(4, len(result))\n self.assertEqual(\"foo/bar:0\", result[0].name)\n self.assertEqual(\"foo_1/bar:0\", result[1].name)\n self.assertEqual(\"foo_1/bar_1:0\", result[2].name)\n self.assertEqual(\"foo/bar_1:0\", result[3].name)\n\n def testVariableWithSameCanonicalNameAcrossThreads(self, distribution):\n\n def model_fn():\n replica_id = self.evaluate(_replica_id())\n v = variable_scope.variable(1.0, name=\"foo_\" + str(replica_id))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertIsInstance(result, values.MirroredVariable)\n # The resulting mirrored variable will use the name from the first device.\n self.assertEqual(\"foo_0:0\", result.name)\n\n def testWithVariableAndVariableScope(self, distribution):\n\n def model_fn():\n v0 = variable_scope.variable(1.0, name=\"var0\", aggregation=None)\n with variable_scope.variable_scope(\"common\"):\n v1 = variable_scope.variable(1.0, name=\"var1\")\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n v2 = variable_scope.variable(\n 1.0,\n name=\"var2\",\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v3 = variable_scope.variable(\n 1.0,\n name=\"var3\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=variable_scope.VariableAggregation.MEAN)\n\n return v0, v1, v2, v3\n\n with distribution.scope():\n v = variable_scope.variable(1.0, name=\"var-main0\")\n self.assertEqual(\"var-main0:0\", v.name)\n\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(4, len(result))\n v0, v1, v2, v3 = result\n self.assertIsInstance(v0, values.MirroredVariable)\n self.assertEqual(\"var0:0\", v0.name)\n self.assertIsInstance(v1, values.MirroredVariable)\n self.assertEqual(\"common/var1:0\", v1.name)\n self.assertIsInstance(v2, values.SyncOnReadVariable)\n self.assertEqual(\"common/var2:0\", v2.name)\n self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)\n self.assertIsInstance(v3, values.MirroredVariable)\n self.assertEqual(\"common/var3:0\", v3.name)\n self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation)\n\n def testWithGetVariableAndVariableScope(self, distribution):\n\n def model_fn():\n v0 = variable_scope.get_variable(\"var0\", [1])\n with variable_scope.variable_scope(\"common\"):\n v1 = variable_scope.get_variable(\"var1\", [1])\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n v2 = variable_scope.get_variable(\n \"var2\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v3 = variable_scope.get_variable(\n \"var3\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=variable_scope.VariableAggregation.MEAN)\n\n return v0, v1, v2, v3\n\n with distribution.scope():\n with variable_scope.variable_scope(\"main\"):\n v = variable_scope.get_variable(\"var-main0\", [1])\n self.assertEqual(\"main/var-main0:0\", v.name)\n\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(4, len(result))\n v0, v1, v2, v3 = result\n self.assertIsInstance(v0, values.MirroredVariable)\n self.assertEqual(\"main/var0:0\", v0.name)\n self.assertIsInstance(v1, values.MirroredVariable)\n self.assertEqual(\"main/common/var1:0\", v1.name)\n self.assertIsInstance(v2, values.SyncOnReadVariable)\n self.assertEqual(\"main/common/var2:0\", v2.name)\n self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)\n self.assertIsInstance(v3, values.MirroredVariable)\n self.assertEqual(\"main/common/var3:0\", v3.name)\n self.assertEqual(variable_scope.VariableAggregation.MEAN,\n v3.aggregation)\n\n def testOnlyFirstReplicaUpdatesVariables(self, distribution):\n\n def create_fn():\n aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA\n v0 = variable_scope.variable(\n 2.0,\n name=\"on_read\",\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n v1 = variable_scope.variable(\n 3.0,\n name=\"on_write\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=aggregation)\n return v0, v1\n\n with distribution.scope():\n v0, v1 = distribution.extended.call_for_each_replica(create_fn)\n self.evaluate(v0.initializer)\n self.assertEqual(\n 2.0, self.evaluate(distribution.experimental_local_results(v0)[0]))\n self.assertEqual(\n 2.0, self.evaluate(distribution.experimental_local_results(v0)[1]))\n self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0)))\n self.evaluate(v1.initializer)\n self.assertEqual(\n 3.0, self.evaluate(distribution.experimental_local_results(v1)[0]))\n self.assertEqual(\n 3.0, self.evaluate(distribution.experimental_local_results(v1)[1]))\n self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1)))\n\n def replica_id_plus_one():\n return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32)\n\n # Update using the assign_add member function.\n def update_member_fn():\n update0 = v0.assign_add(5.0 * replica_id_plus_one())\n update1 = v1.assign_add(7.0 * replica_id_plus_one())\n return update0, update1\n\n update0a, update1a = distribution.extended.call_for_each_replica(\n update_member_fn)\n\n # Update \"sync on read\" variable.\n self.evaluate(distribution.group(update0a))\n local_results = self.evaluate(distribution.experimental_local_results(v0))\n self.assertEqual(2.0 + 5.0, local_results[0])\n # Writes are not synchronized for \"sync on read\" variables,\n # so device[1] can end up with a different value.\n self.assertEqual(2.0 + 2 * 5.0, local_results[1])\n # Always reads from device 0.\n self.assertEqual(2.0 + 5.0,\n self.evaluate(distribution.extended.read_var(v0)))\n\n # Update \"sync on write\" variable.\n self.evaluate(distribution.group(update1a))\n local_results1 = self.evaluate(\n distribution.experimental_local_results(v1))\n self.assertEqual(3.0 + 7.0, local_results1[0])\n # Writes are synchronized for v1, only the argument to assign_add on\n # device[0] is used.\n self.assertEqual(3.0 + 7.0, local_results1[1])\n self.assertEqual(3.0 + 7.0,\n self.evaluate(distribution.extended.read_var(v1)))\n\n # Update using state_ops.assign_add global function.\n def update_state_ops_fn():\n update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one())\n update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one())\n return update0, update1\n\n update0b, update1b = distribution.extended.call_for_each_replica(\n update_state_ops_fn)\n self.evaluate(distribution.group(update0b))\n\n # Update \"sync on read\" variable.\n local_results = self.evaluate(distribution.experimental_local_results(v0))\n self.assertEqual(2.0 + 5.0 + 11.0, local_results[0])\n self.assertEqual(2.0 + 2 * 5.0 + 2 * 11.0, local_results[1])\n self.assertEqual(2.0 + 5.0 + 11.0,\n self.evaluate(distribution.extended.read_var(v0)))\n\n # Update \"sync on write\" variable.\n self.evaluate(distribution.group(update1b))\n local_results1 = self.evaluate(\n distribution.experimental_local_results(v1))\n self.assertEqual(3.0 + 7.0 + 13.0, local_results1[0])\n self.assertEqual(3.0 + 7.0 + 13.0, local_results1[1])\n self.assertEqual(3.0 + 7.0 + 13.0,\n self.evaluate(distribution.extended.read_var(v1)))\n\n def testNoneSynchronizationWithGetVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegex(\n ValueError, \"`NONE` variable synchronization mode is not \"\n \"supported with \"):\n variable_scope.get_variable(\n \"v\", [1],\n synchronization=variable_scope.VariableSynchronization.NONE)\n\n def testNoneSynchronizationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegex(\n ValueError, \"`NONE` variable synchronization mode is not \"\n \"supported with \"):\n variable_scope.variable(\n 1.0,\n name=\"v\",\n synchronization=variable_scope.VariableSynchronization.NONE)\n\n def testInvalidSynchronizationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegex(\n ValueError, \"Invalid variable synchronization mode: Invalid for \"\n \"variable: v\"):\n variable_scope.variable(1.0, name=\"v\", synchronization=\"Invalid\")\n\n def testInvalidAggregationWithGetVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegex(\n ValueError, \"Invalid variable aggregation mode: invalid for \"\n \"variable: v\"):\n variable_scope.get_variable(\n \"v\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=\"invalid\")\n\n def testInvalidAggregationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegex(\n ValueError, \"Invalid variable aggregation mode: invalid for \"\n \"variable: v\"):\n variable_scope.variable(\n 1.0,\n name=\"v\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=\"invalid\")\n\n def testNonMatchingVariableCreation(self, distribution):\n\n def model_fn(name):\n v = variable_scope.variable(1.0, name=name)\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n names = values.PerReplica((\"foo\", \"bar\"))\n with self.assertRaises(RuntimeError):\n _ = distribution.extended.call_for_each_replica(model_fn, args=(names,))\n\n def testSyncOnReadVariable(self, distribution):\n if context.executing_eagerly():\n self.skipTest(\"Skip the test due to b/137400477.\")\n\n all_v_sum = {}\n all_v_mean = {}\n components_sum = {}\n components_mean = {}\n\n def model_fn():\n replica_id = self.evaluate(_replica_id())\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v_mean = variable_scope.variable(\n 4.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.MEAN)\n self.assertIsInstance(v_sum, values.SyncOnReadVariable)\n self.assertIsInstance(v_mean, values.SyncOnReadVariable)\n updates = [\n v_sum.assign_add(2.0 + replica_id),\n v_mean.assign(6.0 * replica_id)\n ]\n all_v_sum[replica_id] = v_sum\n all_v_mean[replica_id] = v_mean\n c_sum = v_sum._get()\n c_mean = v_mean._get()\n components_sum[replica_id] = c_sum\n components_mean[replica_id] = c_mean\n self.assertIsNot(v_sum, c_sum)\n self.assertIsNot(v_mean, c_mean)\n return updates, v_sum, v_mean, c_sum, c_mean\n\n with distribution.scope():\n # Create \"sum\" and \"mean\" versions of SyncOnReadVariables.\n ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (\n distribution.extended.call_for_each_replica(model_fn))\n # Should see the same wrapping instance in all replicas.\n self.assertIs(all_v_sum[0], ret_v_sum)\n self.assertIs(all_v_mean[0], ret_v_mean)\n self.assertIs(all_v_sum[0], all_v_sum[1])\n self.assertIs(all_v_mean[0], all_v_mean[1])\n\n # Regroup should recover the same wrapper.\n self.assertIs(ret_v_sum, regrouped_sum)\n self.assertIs(ret_v_mean, regrouped_mean)\n self.assertIsNot(components_sum[0], components_sum[1])\n self.assertIsNot(components_mean[0], components_mean[1])\n\n # Apply updates\n self.evaluate(variables.global_variables_initializer())\n self.evaluate([\n y for x in ret_ops # pylint: disable=g-complex-comprehension\n for y in distribution.experimental_local_results(x)\n ])\n expected_sum = 0.0\n expected_mean = 0.0\n for i, _ in enumerate(distribution.extended.worker_devices):\n # Should see different values on different devices.\n v_sum_value = self.evaluate(\n distribution.experimental_local_results(ret_v_sum)[i].read_value())\n v_mean_value = self.evaluate(\n distribution.experimental_local_results(ret_v_mean)[i].read_value())\n expected = i + 3.0\n self.assertEqual(expected, v_sum_value)\n expected_sum += expected\n expected = i * 6.0\n self.assertEqual(expected, v_mean_value)\n expected_mean += expected\n expected_mean /= len(distribution.extended.worker_devices)\n\n # Without get(device), should return the value you get by\n # applying the reduction across all replicas (whether you use\n # read_var(), get(), or nothing).\n self.assertEqual(expected_sum, self.evaluate(\n distribution.extended.read_var(ret_v_sum)))\n self.assertEqual(expected_mean, self.evaluate(\n distribution.extended.read_var(ret_v_mean)))\n self.assertEqual(expected_sum, self.evaluate(ret_v_sum._get()))\n self.assertEqual(expected_mean, self.evaluate(ret_v_mean._get()))\n self.assertEqual(expected_sum, self.evaluate(ret_v_sum))\n self.assertEqual(expected_mean, self.evaluate(ret_v_mean))\n\n # TODO(priyag): Update this test to work in eager mode as well.\n def testDynamicRnnVariables(self, distribution):\n\n def model_fn():\n inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])\n cell_fw = rnn_cell_impl.LSTMCell(300)\n cell_bw = rnn_cell_impl.LSTMCell(300)\n (outputs, _) = rnn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs, dtype=dtypes.float32)\n return outputs\n\n with context.graph_mode(), distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n # Two variables are created by the RNN layer.\n self.assertEqual(2, len(result))\n for v in result:\n self.assertIsInstance(v, values.DistributedValues)\n _, v1 = distribution.experimental_local_results(v)\n self.assertStartsWith(v1._op.name, \"replica_1/\")\n\n def testSyncOnReadVariableUpdate(self, distribution):\n if context.executing_eagerly():\n self.skipTest(\"Skip the test due to b/137400477.\")\n\n def model_fn():\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n self.assertIsInstance(v_sum, values.SyncOnReadVariable)\n return v_sum\n\n def update(var, value):\n return var.assign(value)\n\n with distribution.scope():\n ret_v_sum = distribution.extended.call_for_each_replica(model_fn)\n\n # Initialize variables.\n self.evaluate(variables.global_variables_initializer())\n # Assert that the aggregated value of the sync on read var is the sum\n # of the individual values before running the update ops.\n self.assertEqual(\n 1.0,\n self.evaluate(\n distribution.experimental_local_results(ret_v_sum)\n [0].read_value()))\n self.assertEqual(2.0, self.evaluate(ret_v_sum))\n\n # Apply updates.\n update_ops = distribution.extended.update(\n ret_v_sum, update, args=(5.0,), group=False)\n self.evaluate(update_ops)\n # Assert that the aggregated value of the sync on read vars is the sum\n # of the individual values after running the update ops.\n self.assertEqual(\n 5.0,\n self.evaluate(\n distribution.experimental_local_results(ret_v_sum)\n [0].read_value()))\n self.assertEqual(10.0, self.evaluate(ret_v_sum))\n\n def testVarDistributeStrategy(self, distribution):\n with distribution.scope():\n mirrored = variable_scope.variable(1.0)\n sync_on_read = variable_scope.variable(\n 1.0, synchronization=variable_scope.VariableSynchronization.ON_READ)\n self.assertIs(distribution, mirrored.distribute_strategy)\n self.assertIs(distribution, sync_on_read.distribute_strategy)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.distribute.values.PerReplica", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.variable_scope.variable", "tensorflow.python.eager.context.LogicalDeviceConfiguration", "tensorflow.python.ops.rnn.bidirectional_dynamic_rnn", "tensorflow.python.distribute.distribution_strategy_context.get_replica_context", "tensorflow.python.eager.test.main", "tensorflow.python.framework.func_graph.FuncGraph", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.rnn_cell_impl.LSTMCell", "tensorflow.python.framework.config.list_physical_devices", "tensorflow.python.eager.def_function.function", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.distribute.collective_all_reduce_strategy.CollectiveAllReduceStrategy._from_local_devices", "tensorflow.python.framework.constant_op.constant" ] ]
nachovizzo/pyLiDAR-SLAM
[ "fd8fcc23380abd0b2ef34388a9f177c4cd41575e" ]
[ "src/pylidar_slam/odometry/odometry_runner.py" ]
[ "import dataclasses\nimport logging\nfrom pathlib import Path\nfrom typing import Optional\nimport time\n\nimport os\nimport torch\n\nfrom abc import ABC\nimport numpy as np\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport shutil\n\n# Hydra and OmegaConf imports\nfrom hydra.core.config_store import ConfigStore\nfrom hydra.conf import dataclass, MISSING, field\n\n# Project Imports\nfrom pylidar_slam.common.pose import Pose\nfrom pylidar_slam.common.torch_utils import collate_fun\nfrom pylidar_slam.common.utils import check_sizes, assert_debug, get_git_hash\nfrom pylidar_slam.dataset import DatasetLoader, DATASET\nfrom pylidar_slam.eval.eval_odometry import OdometryResults\nfrom pylidar_slam.dataset.configuration import DatasetConfig\n\nfrom pylidar_slam.slam import SLAMConfig, SLAM\n\n\n@dataclass\nclass SLAMRunnerConfig:\n \"\"\"The configuration dataclass\"\"\"\n\n # --------------------------------\n # SLAMConfig\n slam: SLAMConfig = MISSING\n dataset: DatasetConfig = MISSING\n\n # ------------------\n # Default parameters\n log_dir: str = field(default_factory=os.getcwd)\n num_workers: int = 2\n pin_memory: bool = True\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n pose: str = \"euler\"\n\n fail_dir: str = field(default_factory=os.getcwd) # By default the fail_dir is the same directory\n move_if_fail: bool = False\n\n # ----------------\n # Debug parameters\n viz_num_pointclouds: int = 200\n debug: bool = True\n\n\n# -------------\n# HYDRA Feature\n# Automatically casts the config as a SLAMConfig object, and raises errors if it cannot do so\ncs = ConfigStore.instance()\ncs.store(name=\"slam_config\", node=SLAMRunnerConfig)\n\n\nclass SLAMRunner(ABC):\n \"\"\"\n A SLAMRunner runs a LiDAR SLAM algorithm on a set of pytorch datasets,\n And if the ground truth is present, it evaluates the performance of the algorithm and saved the results to disk\n \"\"\"\n\n def __init__(self, config: SLAMRunnerConfig):\n super().__init__()\n\n self.config: SLAMRunnerConfig = config\n\n # Pytorch parameters extracted\n self.num_workers = self.config.num_workers\n self.batch_size = 1\n self.pin_memory = self.config.pin_memory\n self.log_dir = self.config.log_dir\n self.device = torch.device(self.config.device)\n\n self.pose = Pose(self.config.pose)\n self.viz_num_pointclouds = self.config.viz_num_pointclouds\n\n # Dataset config\n dataset_config: DatasetConfig = self.config.dataset\n self.dataset_loader: DatasetLoader = DATASET.load(dataset_config)\n\n self.slam_config: SLAMConfig = self.config.slam\n\n def save_config(self):\n \"\"\"Saves the config to Disk\"\"\"\n with open(str(Path(self.log_dir) / \"config.yaml\"), \"w\") as config_file:\n # Add the git hash to improve tracking of modifications\n config_dict = self.config.__dict__\n\n git_hash = get_git_hash()\n if git_hash is not None:\n config_dict[\"git_hash\"] = git_hash\n config_dict[\"_working_dir\"] = os.getcwd()\n config_file.write(OmegaConf.to_yaml(config_dict))\n\n def handle_failure(self):\n \"\"\"Handles Failure cases of the SLAM runner\"\"\"\n # In case of failure move the current working directory and its content to another directory\n if self.config.move_if_fail:\n try:\n fail_dir = Path(self.config.fail_dir)\n assert_debug(fail_dir.exists(),\n f\"[SLAM] -- The `failure` directory {str(fail_dir)} does not exist on disk\")\n current_dir: Path = Path(os.getcwd())\n\n if fail_dir.absolute() == current_dir.absolute():\n logging.warning(\n \"The `fail_dir` variable points to the current working directory. It will not be moved.\")\n return\n\n destination_dir = fail_dir\n if not destination_dir.exists():\n destination_dir.mkdir()\n\n shutil.move(str(current_dir), str(destination_dir))\n assert_debug(not current_dir.exists(), \"Could not move current working directory\")\n except (Exception, AssertionError, KeyboardInterrupt):\n logging.warning(\"[PyLIDAR-SLAM] Could not move the directory\")\n\n def run_odometry(self):\n \"\"\"Runs the LiDAR Odometry algorithm on the different datasets\"\"\"\n try:\n # Load the Datasets\n datasets: list = self.load_datasets()\n # Load the Slam algorithm\n slam = self.load_slam_algorithm()\n self.save_config()\n except (KeyboardInterrupt, Exception) as e:\n self.handle_failure()\n raise\n\n for sequence_name, dataset in datasets:\n # Build dataloader\n dataloader = DataLoader(dataset,\n collate_fn=collate_fun,\n pin_memory=self.pin_memory,\n batch_size=self.batch_size,\n num_workers=self.num_workers)\n\n # Init the SLAM\n slam.init()\n\n elapsed = 0.0\n relative_ground_truth = self.ground_truth(sequence_name)\n\n def catch_exception():\n _relative_poses = slam.get_relative_poses()\n if _relative_poses is not None and len(_relative_poses) > 0:\n self.save_and_evaluate(sequence_name, _relative_poses, None)\n print(\"[ERRROR] running SLAM : the estimated trajectory was dumped\")\n self.handle_failure()\n\n try:\n for b_idx, data_dict in self._progress_bar(dataloader, desc=f\"Sequence {sequence_name}\"):\n data_dict = self._send_to_device(data_dict)\n start = time.time()\n\n # Process next frame\n slam.process_next_frame(data_dict)\n\n # Measure the time spent on the processing of the next frame\n elapsed_sec = time.time() - start\n elapsed += elapsed_sec\n except KeyboardInterrupt:\n catch_exception()\n raise\n except (KeyboardInterrupt, Exception, RuntimeError, AssertionError) as e:\n catch_exception()\n raise e\n\n # Dump trajectory constraints in case of loop closure\n slam.dump_all_constraints(str(Path(self.log_dir) / sequence_name))\n\n # Evaluate the SLAM if it has a ground truth\n relative_poses = slam.get_relative_poses()\n check_sizes(relative_poses, [-1, 4, 4])\n if relative_ground_truth is not None:\n check_sizes(relative_ground_truth, [relative_poses.shape[0], 4, 4])\n\n self.save_and_evaluate(sequence_name, relative_poses, relative_ground_truth, elapsed=elapsed)\n\n def save_and_evaluate(self, sequence_name: str,\n trajectory: np.ndarray,\n ground_truth: Optional[np.ndarray],\n elapsed: Optional[float] = None):\n \"\"\"Saves metrics and trajectory in a folder on disk\"\"\"\n\n odo_results = OdometryResults(str(Path(self.log_dir) / sequence_name))\n odo_results.add_sequence(sequence_name,\n trajectory,\n ground_truth,\n elapsed)\n odo_results.close()\n\n @staticmethod\n def _progress_bar(dataloader: DataLoader, desc: str = \"\"):\n return tqdm(enumerate(dataloader, 0),\n desc=desc,\n total=len(dataloader),\n ncols=120, ascii=True)\n\n def _send_to_device(self, data_dict: dict):\n output_dict: dict = {}\n for key, item in data_dict.items():\n if isinstance(item, torch.Tensor):\n output_dict[key] = item.to(device=self.device)\n else:\n output_dict[key] = item\n return output_dict\n\n def load_datasets(self) -> list:\n \"\"\"\n Loads the Datasets for which the odometry is evaluated\n\n Returns\n -------\n A list of pairs (sequence_name :str, dataset_config :Dataset)\n Where :\n sequence_name is the name of a sequence which will be constructed\n \"\"\"\n train_dataset, _, _, _ = self.dataset_loader.sequences()\n assert_debug(train_dataset is not None)\n pairs = [(train_dataset[1][idx], train_dataset[0][idx]) for idx in range(len(train_dataset[0]))]\n return pairs\n\n def load_slam_algorithm(self) -> SLAM:\n \"\"\"\n Returns the SLAM algorithm which will be run\n \"\"\"\n slam = SLAM(self.config.slam,\n projector=self.dataset_loader.projector(),\n pose=self.pose,\n device=self.device,\n viz_num_pointclouds=self.viz_num_pointclouds)\n return slam\n\n def ground_truth(self, sequence_name: str) -> Optional[np.ndarray]:\n \"\"\"\n Returns the ground truth associated with the sequence\n \"\"\"\n return self.dataset_loader.get_ground_truth(sequence_name)\n" ]
[ [ "torch.device", "torch.cuda.is_available", "torch.utils.data.DataLoader" ] ]
python-marketing-research/python-marketing-research-1ed
[ "9fa18b68f5b165149e0a4d7a89a403b8ad337fb6" ]
[ "python_marketing_research_functions/chapter7.py" ]
[ "\"\"\"\nThis module contains all function from Chapter 7 of Python for \nMarketing Research and Analytics\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom statsmodels.graphics import gofplots, regressionplots\n\n\ndef generate_satisfaction_scores(mean, std, halo,\n score_range=(0, 100)):\n \"\"\"Simulate satisfaction scores of a survey questions from normal\n distributions.\n \n arguments:\n mean: desired mean of satisfaction score distribution\n std: desired standard deviation of satisfaction score distribution\n halo: an array of individual-level effects, sets the size of returned array\n score_range: tuple of form (max, min), values outside range are clipped\n\n returns:\n scores: array of simulated satisfaction scores\n\"\"\"\n\n # Draw scores from a normal distribution\n scores = np.random.normal(loc=mean, scale=std, size=len(halo))\n \n # Add the halo\n scores = scores + halo\n \n # Floor the scores so that they are all integers and clip to limit range\n scores = np.floor(scores)\n scores = np.clip(scores, score_range[0], score_range[1])\n\n return scores\n\n\ndef plot_gof_figures(model):\n \"\"\"Plot a multipanel figure of goodness of fit plots\n \n arguments:\n model: a fitted ols() object from statsmodels.formula.api\n \n output:\n Prints a multipanel figure including:\n * Residual vs fitted value plot\n * Scale-location plot\n * Q-Q plot\n * Leverage vs normalized residual plot\n \"\"\"\n \n fig = plt.figure(figsize=(16,16))\n ax = plt.subplot(2,2,1)\n sns.residplot(model.fittedvalues, model.resid, lowess=True)\n plt.xlabel('Fitted values')\n plt.ylabel('Residuals')\n plt.title('Residuals vs Fitted')\n ax = plt.subplot(2,2,2)\n _=gofplots.qqplot(model.resid, fit=True, line='45', ax=ax)\n plt.title('Normal Q-Q')\n ax = plt.subplot(2,2,3)\n plt.scatter(model.fittedvalues, np.abs(model.resid)**.5)\n plt.xlabel('Fitted values')\n plt.ylabel('Square root of the standardized residuals')\n plt.title('Scale-Location')\n ax = plt.subplot(2,2,4)\n _ = regressionplots.plot_leverage_resid2(model, ax=ax)" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.floor", "numpy.clip", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.subplot" ] ]
MarcTLaw/DRPR_Flowers_experiment
[ "81b4131ed56da914cace6dc83e1a9c9d918f165e" ]
[ "flowers_train_and_test.py" ]
[ "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport numpy as np\nimport sys\n\nbatch_size = 5000\nthreshold_compare = -35.0\n\ntest_on_test_dataset = True\nif test_on_test_dataset:\n test_dataset_string = \"test\"\nelse:\n test_dataset_string = \"val\"\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Flowers zero shot learning experiment')\nparser.add_argument('--batch-size', type=int, default=batch_size, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=10000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nmy_temperature = 50\n\n\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\nelse:\n torch.manual_seed(args.seed)\n\ndataset = 'flowers_data'\nuse_both_proba_and_target = False\n\n#loading training image data\n\nproba = np.load('%s/train_new_proba_10.npy' % dataset).astype(float)\nproba = torch.from_numpy(proba).float()\n\ndata = torch.from_numpy(np.load('%s/train_classes_data.npy' % dataset)).float()\n\ntrain_batch_size = 735\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(data, proba),\n batch_size=train_batch_size, shuffle=True, **kwargs)\n\n\n#loading test (or validation) image data\n\ntest_labels = np.load('%s/%s_classes_onehot.npy' % (dataset, test_dataset_string)).astype(float)\ntest_labels = torch.from_numpy(test_labels).float()\n\ntest_data = torch.from_numpy(np.load('%s/%s_classes_data.npy' % (dataset, test_dataset_string))).float()\n\ntest_batch_size = 100\ntest_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_data, test_labels),\n batch_size=test_batch_size, shuffle=True, **kwargs)\n\n# loading attribute information\n\ntest_centers = torch.from_numpy(np.load('%s/%s_classes_centroids_l2normalized.npy' % (dataset, test_dataset_string))).float().cuda()\ntest_centers = Variable(test_centers)\n\ntrain_centers = torch.from_numpy(np.load('%s/train_classes_centroids_l2normalized.npy' % dataset)).float().cuda()\ntrain_centers = Variable(train_centers).cuda()\n\nnb_test_categories = 50\n\n#################################\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.hidden = nn.Linear(1024, 1024)\n self.hidden2 = nn.Linear(1024, 1024)\n self.out = nn.Linear(1024,512)\n\n def forward(self, x):\n x = F.tanh(self.hidden(x))\n x = F.tanh(self.hidden2(x))\n x = self.out(x)\n return x\n\nmodel = Net()\nmodel2 = Net()\nif args.cuda:\n model.cuda()\n model2.cuda()\n\n\noptimizer = optim.Adam(list(model.parameters()) + list(model2.parameters()), lr=0.00001, weight_decay=0)\n\n\n\ndef softmax_class(x_query, x_proto, weights=None, temperature=100):\n n_example = x_query.size(0)\n n_query = n_example\n n_class = x_proto.size(0)\n d = x_query.size(1)\n assert d == x_proto.size(1)\n \n y = torch.pow(x_proto.unsqueeze(0).expand(n_query, n_class, d) - x_query.unsqueeze(1).expand(n_query, n_class, d), 2).sum(2).squeeze()\n y = -y / temperature\n \n a = y.size()\n if len(a) > 1:\n [ymax,ymax_indices] = torch.max(y,1)\n else:\n [ymax,ymax_indices] = torch.max(y,0)\n thres_indices = ((torch.le(ymax,threshold_compare)).data).cpu().numpy() #threshold_vector\n nb_zero_indices = np.sum(thres_indices)\n cpt_index = -1\n if nb_zero_indices:\n ymax_indices = (ymax_indices.data).cpu().numpy()\n for i in range(n_example):\n cpt_index += 1\n if thres_indices[i]:\n qqqqq = ymax_indices[i]\n for j in range(n_class):\n if j == qqqqq:\n y[cpt_index,j] = 0\n else:\n y[cpt_index,j] = -10\n \n y = torch.exp(y)\n if weights is not None:\n y = y * weights.unsqueeze(0).expand_as(y)\n y = y / y.sum(1, keepdim=True).expand_as(y)\n return y\n\ndef kldivergence(y_target, y_pred):\n return (y_target * torch.log(((y_target) / (y_pred)))).sum()\n\ndef zeroshot_train(epoch):\n model.train()\n model2.train()\n enum_train = enumerate(train_loader)\n for batch_idx, (data, target) in enum_train:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n z = model(data)\n pi = None\n mu = model2(train_centers)\n y_hat = softmax_class(z, mu, weights=pi,temperature=my_temperature)\n loss = kldivergence(target,y_hat)\n loss.backward()\n optimizer.step()\n\n\ndef my_test():\n model.eval()\n correct = 0\n accuracy_dict = {}\n category_dict = {}\n l2_normalize_data = False\n l2_normalize_centroids = False\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n z = model(data)\n if l2_normalize_data:\n qn = torch.norm(z, p=2, dim=1).detach()\n z = z.div(qn.expand_as(z))\n test_c = model2(test_centers)\n if l2_normalize_centroids:\n qn = torch.norm(test_c, p=2, dim=1).detach()\n test_c = test_c.div(qn.expand_as(test_c))\n y_hat = softmax_class(z, test_c)\n [a1, a2] = torch.max(y_hat,1)\n [b1, b2] = torch.max(target,1)\n\n a3 = a2.size()\n for uu in range(a3[0]):\n true_category = b2[uu].cpu().data.numpy().astype(int)[0]\n predicted_category = a2[uu].cpu().data.numpy().astype(int)[0]\n correct_prediction = (predicted_category == true_category)\n correct += (correct_prediction)\n if true_category in category_dict:\n category_dict[true_category] += 1\n accuracy_dict[true_category] += int(correct_prediction)\n else:\n category_dict[true_category] = 1\n accuracy_dict[true_category] = int(correct_prediction)\n mean_accuracy = 0\n cpt = 0\n for (k,v) in category_dict.items():\n cpt += 1\n mean_accuracy += float(accuracy_dict[k]) / v\n mean_accuracy = mean_accuracy / cpt\n #print(\"test mean accuracy: %f percent\" % (100.0 * mean_accuracy))\n return (100.0 * mean_accuracy)\n\n\n\nnb_epochs = 1000\n\nscore_list = []\nepoch_list = []\n\nfor epoch in range(1, nb_epochs+1):\n zeroshot_train(epoch) \n if not(epoch % 3000):\n my_temperature = my_temperature * 0.9\n if not(epoch % 100): \n score = my_test() \n print(\"test mean accuracy %f : current epoch %d\" % (score,epoch))\n score_list.append(score)\n epoch_list.append(epoch)\n\n\nscores_file = open('%s_flowers_scores.txt' % test_dataset_string,'w')\nfor iscore in range(len(score_list)):\n scores_file.write(\"\\nepoch %f : %d\" % (score_list[iscore],epoch_list[iscore]))\nscores_file.close()\n" ]
[ [ "torch.nn.Linear", "torch.cuda.manual_seed", "torch.max", "torch.autograd.Variable", "numpy.sum", "torch.norm", "numpy.load", "torch.le", "torch.from_numpy", "torch.manual_seed", "torch.cuda.is_available", "torch.log", "torch.exp", "torch.utils.data.TensorDataset" ] ]
Bertinus/gene-graph-analysis
[ "a596987f075844f853e7ecd8306b62d068c4aac8" ]
[ "clinical_pipeline/adjacency_regularized_classification.py" ]
[ "import meta_dataloader.TCGA\nfrom torch.utils.data import DataLoader, TensorDataset\nimport numpy as np\nimport pandas as pd\nfrom torch import Tensor\nfrom torch import nn\nimport networkx as nx\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import functional as F\nfrom torch.nn import init\nimport math\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import StratifiedKFold\nfrom data.gene_graphs import StringDBGraph, HetIOGraph, FunCoupGraph, HumanNetV2Graph, GeneManiaGraph, \\\n RegNetGraph\n\n\n########################################################################################################################\n# Define model\n########################################################################################################################\n\nclass MaskedNetwork(torch.nn.Module):\n \"\"\"\n One fully connected masked by adj matrix and then sigmoid and scalar product with the vector having 1 in every\n component\n \"\"\"\n def __init__(self, input_dim, output_dim, adjacency_matrix=None):\n super(MaskedNetwork, self).__init__()\n self.weight = Parameter(torch.Tensor(input_dim, input_dim))\n self.bias = None # Parameter(torch.Tensor(input_dim))\n self.adj = adjacency_matrix\n self.nonlin = torch.nn.ReLU() # torch.nn.Sigmoid()\n self.second_layer = nn.Linear(input_dim, 1, bias=True)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x):\n if self.adj is not None:\n # return self.second_layer(self.sig(F.linear(x, self.weight * self.adj, self.bias)))\n return self.second_layer(self.nonlin(F.linear(x, self.weight * self.adj)))\n else:\n # return self.second_layer(self.sig(F.linear(x, self.weight, self.bias)))\n return self.second_layer(self.nonlin(F.linear(x, self.weight)))\n\n########################################################################################################################\n# Get adjacency matrix and corresponding data with right column ordering\n########################################################################################################################\n\n\ndef getdata(graph_index, datastore=\"data\",\n covered_genes=None):\n \"\"\"\n :param datastore: /Users/paul/Desktop/user1/PycharmProjects/gene-graph-conv/genegraphconv/data (local)\n /network/home/bertinpa/Documents/gene-graph-conv/genegraphconv/data (server)\n :return:\n \"\"\"\n if graph_names_list[graph_index] == \"landmark\":\n graph_name = graph_names_list[graph_index]\n print(\"Training with the\", graph_names_list[graph_index], \"graph\")\n landmarkgene_path = \"data/datastore/random_landmark_genes_seed0.npy\"\n landmark_genes = np.load(landmarkgene_path)\n is_landmark = [int(i in landmark_genes) for i in covered_genes]\n adj_matrix = np.array([is_landmark for i in covered_genes])\n adj_matrix = adj_matrix + adj_matrix.T + np.identity(len(covered_genes))\n adj_matrix = torch.Tensor(adj_matrix.astype(bool).astype(int))\n if torch.cuda.is_available():\n adj_matrix = adj_matrix.cuda()\n\n elif graph_names_list[graph_index]:\n print(\"Training with the\", graph_names_list[graph_index], \"graph\")\n graph = graph_initializer_list[graph_index](datastore=datastore)\n graph_name = graph_names_list[graph_index]\n\n # Restrict to covered genes only\n if covered_genes is None:\n graph_genes = list(graph.nx_graph.nodes)\n dataset_genes = task.gene_ids\n covered_genes = list(set(graph_genes).intersection(dataset_genes))\n\n # Get subggraph\n subgraph = graph.nx_graph.subgraph(covered_genes)\n # subgraph_genes = list(subgraph.nodes)\n\n # Get Adjacency Matrix of the subgraph\n adj_matrix = torch.Tensor(np.array(nx.adjacency_matrix(subgraph, nodelist=covered_genes).todense()))\n adj_matrix += torch.eye(adj_matrix.shape[0]) # add diagonal\n if torch.cuda.is_available():\n adj_matrix = adj_matrix.cuda()\n\n else:\n print(\"Training without graph\")\n graph_name = \"none\"\n adj_matrix = None\n\n # Get matrix with the columns in the same order as adjacency matrix\n X = pd.DataFrame(task._samples, columns=task.gene_ids)[covered_genes].to_numpy()\n y = np.array(task._labels)\n\n return graph_name, X, y, adj_matrix\n\n########################################################################################################################\n# Training pipeline\n########################################################################################################################\n\n\ndef train(savedir=\"results/prediction_pipeline_losses6\", fold=0,\n plot=False):\n train_loss_list = []\n test_loss_list = []\n test_pred_list = []\n cpt = 0\n\n for epoch in range(int(epochs)):\n print(\"\\t\\tEpoch\", epoch, \"over\", epochs)\n # train\n for i, (data, label) in enumerate(train_dataloader):\n optimizer.zero_grad()\n if torch.cuda.is_available():\n data = data.cuda()\n label = label.cuda()\n outputs = model(data)\n loss = criterion(outputs[:, 0], label)\n loss.backward()\n train_loss_list.append((cpt, loss.item()))\n cpt += 1\n optimizer.step()\n\n # test\n for i, (data, label) in enumerate(test_dataloader):\n # Only one batch\n optimizer.zero_grad()\n if torch.cuda.is_available():\n data = data.cuda()\n label = label.cuda()\n outputs = model(data)\n loss = criterion(outputs[:, 0], label)\n test_loss_list.append((cpt, loss.item()))\n label_pred = outputs.detach().cpu()\n test_pred_list.append((cpt, label.cpu(), label_pred.numpy()))\n # print(\"\\t\\t\", np.bincount(np.array(label_pred)[:, 0]))\n # test_acc_list.append((cpt, (accuracy_score(label.cpu(), label_pred))))\n\n train_loss_list = np.array(train_loss_list)\n test_loss_list = np.array(test_loss_list)\n # Save\n np.save(os.path.join(savedir, \"train_loss_list_\" + graph_name + \"_\" + str(fold)), train_loss_list)\n np.save(os.path.join(savedir, \"test_loss_list_\" + graph_name + \"_\" + str(fold)), test_loss_list)\n np.save(os.path.join(savedir, \"test_pred_list_\" + graph_name + \"_\" + str(fold)), test_pred_list)\n\n # Plot\n if plot:\n plt.ylim(0, 2)\n plt.plot(train_loss_list[:, 0], train_loss_list[:, 1], label=\"train\")\n plt.plot(test_loss_list[:, 0], test_loss_list[:, 1], label=\"test\")\n plt.legend()\n plt.show()\n\n########################################################################################################################\n# Main\n########################################################################################################################\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Arguments for prediction pipeline')\n parser.add_argument('--learning_rate', type=float, default=0.000001)\n parser.add_argument('--savedir', default=\"results/prediction_pipeline_losses_test\")\n\n args = parser.parse_args()\n\n batch_size = 32\n epochs = 100\n learning_rate = args.learning_rate\n savedir = args.savedir\n\n print(\"learning rate\", learning_rate, \"saving in\", savedir)\n\n ####################################################################################################################\n # Evaluate simple classification pipeline on a specific task\n ####################################################################################################################\n\n task = meta_dataloader.TCGA.TCGATask(('PAM50Call_RNAseq', 'BRCA'))\n covered_genes = np.load(\"data/covered_genes.npy\")\n\n ####################################################################################################################\n # List of graphs\n ####################################################################################################################\n\n graph_initializer_list = [StringDBGraph, HetIOGraph, FunCoupGraph, HumanNetV2Graph, GeneManiaGraph] # RegNetGraph]\n graph_names_list = [\"stringdb\", \"hetio\", \"funcoup\", \"humannet\", \"genemania\", \"landmark\", None] # \"regnet\"\n\n # graph_index = 0 # Chose a graph in the list ny its index\n\n for graph_index in range(7):\n\n ################################################################################################################\n # Get data\n ################################################################################################################\n\n graph_name, X, y, M = getdata(graph_index, covered_genes=covered_genes)\n\n print(\"data of shape\", X.shape)\n\n # import pdb\n # pdb.set_trace()\n\n ################################################################################################################\n # Prepare data + train test split\n ################################################################################################################\n\n # Turn it into a binary classification (all against type 2)\n y = np.array([int(i == 2) for i in y])\n\n # # Usual train test split\n # X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X,\n # y,\n # stratify=y,\n # train_size=0.8,\n # shuffle=True,\n # random_state=0)\n\n # Cross validation\n skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)\n fold = 0\n\n for train_index, test_index in skf.split(X, y):\n fold += 1\n print(\"\\tComputing fold\", fold)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n model = MaskedNetwork(X.shape[1], X.shape[1], M)\n if torch.cuda.is_available():\n model = model.cuda()\n\n # Loss\n criterion = torch.nn.BCEWithLogitsLoss()\n\n # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n train_set = TensorDataset(Tensor(X_train), Tensor(y_train))\n test_set = TensorDataset(Tensor(X_test), Tensor(y_test))\n\n train_dataloader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n test_dataloader = torch.utils.data.DataLoader(test_set, batch_size=len(test_set), shuffle=True)\n\n ############################################################################################################\n # Train\n ############################################################################################################\n\n train(savedir=savedir,\n fold=fold, plot=False)\n # /Users/paul/Desktop/user1/PycharmProjects/\n # /network/home/bertinpa/Documents/\n" ]
[ [ "torch.nn.Linear", "numpy.array", "sklearn.model_selection.StratifiedKFold", "torch.nn.init.uniform_", "pandas.DataFrame", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.legend", "torch.nn.ReLU", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.nn.functional.linear", "torch.cuda.is_available", "torch.eye", "torch.utils.data.DataLoader", "torch.nn.BCEWithLogitsLoss", "matplotlib.pyplot.show", "torch.Tensor" ] ]
ZymHedy/CV_course
[ "989b9ddad1de83ad8108229bc7c5bef408a0e586" ]
[ "week4/logistic.py" ]
[ "import numpy as np\nfrom numpy.linalg import cholesky\nimport matplotlib.pyplot as plt\n\n\n# Make dataset\n\n# ground truth label: 0 or 1\n# predict probs: (0, 1)\n# logistic loss\n\ndef gen_sample_data():\n sampleNo = 1000\n mu = np.array([[1, 5]])\n sigma = np.array([[2, 0], [0, 3]])\n R = cholesky(sigma)\n s = np.dot(np.random.randn(sampleNo, 2), R) + mu\n x1 = np.hstack((s, np.ones((sampleNo, 1))))\n # plt.plot(s[:, 0], s[:, 1], \"+\")\n\n mu = np.array([[6, 0]])\n sigma = np.array([[2, 1], [1, 2]])\n R = cholesky(sigma)\n s = np.dot(np.random.randn(sampleNo, 2), R) + mu\n x2 = np.hstack((s, np.zeros((sampleNo, 1))))\n # plt.plot(s[:, 0], s[:, 1], \"x\")\n # plt.show()\n\n X = np.vstack((x1, x2))\n return X\n\n\n# inference from data to probs\ndef sigmoid(w1, w2, b, x):\n pred_y = 1 / (1 + np.exp(-(w1 * x[:, 0] +\n w2 * x[:, 1] + b)))\n return pred_y\n\n\n# cost function\ndef eval_loss(w1, w2, b, x, y):\n loss = -(y * np.log(sigmoid(w1, w2, b, x)) + \\\n (1 - y) * np.log(1 - sigmoid(w1, w2, b, x)))\n return np.mean(loss)\n\n\n# single sample's gradient\ndef gradient(pred_y, y, x):\n diff = pred_y - y\n dw1 = diff * x[:, 0]\n dw2 = diff * x[:, 1]\n db = diff\n return dw1, dw2, db\n\n\n# update w,b\ndef cal_step_gradient(batch_x, batch_y, w1, w2, b, lr):\n pred_y = sigmoid(w1, w2, b, batch_x)\n dw1, dw2, db = gradient(pred_y, batch_y, batch_x)\n w1 -= lr * np.mean(dw1)\n w2 -= lr * np.mean(dw2)\n b -= lr * np.mean(db)\n return w1, w2, b\n\n\ndef train(x, batch_size, lr, max_iter):\n w1 = w2 = b = 0\n x_axe = np.linspace(np.min(x[:, 0]), np.max(x[:, 0]), 1000)\n plt.ion()\n fig, ax = plt.subplots()\n for i in range(max_iter):\n batch_idxs = np.random.choice(len(x), batch_size, False)\n batch_x = np.array([x[j][:2] for j in batch_idxs])\n batch_y = np.array([x[j][2] for j in batch_idxs])\n w1, w2, b = cal_step_gradient(batch_x, batch_y, w1, w2, b, lr)\n print(f\"w1:{w1}, w2:{w2}, b:{b}\")\n print(f\"loss: {eval_loss(w1, w2, b, batch_x, batch_y)}\")\n\n plt.xlim(np.min(x[:, 0]) * 1.1, np.max(x[:, 0]) * 1.1)\n plt.ylim(np.min(x[:, 1]) * 1.1, np.max(x[:, 1]) * 1.1)\n plt.scatter(x[:, 0], x[:, 1], c=x[:, 2])\n # y_axe*w2 + x_axe*w1 +b = 0\n # Construct line with predict params w1, w2, b\n y_axe = (-b - x_axe * w1) / w2\n plt.plot(x_axe, y_axe, linewidth=2)\n plt.title(f\"LOGISTIC REGRESSION ITER: {i+1}\")\n plt.pause(0.5)\n if i != max_iter - 1:\n ax.cla()\n plt.ioff()\n plt.show()\n\n return w1, w2, b\n\n\nX = gen_sample_data()\ntrain(X, 100, 0.1, 50)\n" ]
[ [ "numpy.max", "numpy.array", "matplotlib.pyplot.ion", "numpy.zeros", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "numpy.min", "numpy.mean", "matplotlib.pyplot.subplots", "numpy.random.randn", "numpy.ones", "numpy.exp", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter", "matplotlib.pyplot.pause", "numpy.linalg.cholesky", "matplotlib.pyplot.ioff", "numpy.vstack" ] ]
3neutronstar/ml_benchmark
[ "b5c689109ddffc75b206a8152d06c27d7e315c5c" ]
[ "Model/vgg.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nimport torch.optim as optim\n\ncfg = {\n 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\nclass VGG(nn.Module):\n def __init__(self, configs):\n super(VGG, self).__init__()\n final_out=configs['num_classes']\n self.features = self._make_layers(cfg[configs['model']])\n self.classifier = nn.Sequential(nn.Linear(7*7*512, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, final_out),\n )\n self.optim = optim.SGD(params=self.parameters(),\n momentum=configs['momentum'], lr=configs['lr'], nesterov=configs['nesterov'],weight_decay=configs['weight_decay'])\n self.loss=nn.CrossEntropyLoss()\n self.scheduler = optim.lr_scheduler.MultiStepLR(optimizer=self.optim, milestones=[\n 150, 225], gamma=0.1)\n\n #basic configs\n self.input_channels=3\n\n vgg_name=configs['model']\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n layers += [nn.AdaptiveAvgPool2d(output_size=(7, 7))]\n\n return nn.Sequential(*layers)\n\ndef test():\n net = VGG('VGG11')\n x = torch.randn(2, 3, 32, 32)\n y = net(x)\n print(y.size())\n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.optim.lr_scheduler.MultiStepLR", "torch.nn.ReLU", "torch.randn", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.CrossEntropyLoss" ] ]
RyanC1681/RCAI1122
[ "c9683110b58c255a7a78d880ff73df7ff2329405" ]
[ "runner_competition_1126.py" ]
[ "import logging, warnings\nimport numpy as np\nimport time\nfrom ROAR_Sim.configurations.configuration import Configuration as CarlaConfig\nfrom ROAR.configurations.configuration import Configuration as AgentConfig\nfrom pathlib import Path\nfrom ROAR.agent_module.pure_pursuit_agent \\\n import PurePursuitAgent\nfrom ROAR_Sim.carla_client.carla_runner import CarlaRunner\nfrom typing import Tuple\nfrom prettytable import PrettyTable\n\n\n\n# old def compute_score(carla_runner: CarlaRunner, min_bounding_box = np.array([0,-2,30]), max_bounding_box = np.array([60,2,60])) -> Tuple[float, int, bool]:\n\nfrom ROAR.agent_module.pid_agent import PIDAgent\n\nfrom pit_stop import PitStop as PitStop\n\n\n#def compute_score(carla_runner: CarlaRunner) -> Tuple[float, int, int]:\n#def compute_score(carla_runner: CarlaRunner, min_bounding_box=np.array([5, -5, 0]),\n# max_bounding_box=np.array([13, 5, 50])) -> Tuple[float, int, int]:\ndef compute_score(carla_runner: CarlaRunner, min_bounding_box=np.array([-815, 20, -760]),\n max_bounding_box=np.array([-770, 120, -600])) -> Tuple[float, int, int]:\n \"\"\"\n Calculates the score of the vehicle upon completion of the track based on certain metrics\n Args:\n carla_runner ():\n min_bounding_box ():\n max_bounding_box ():\n\n Returns:\n time_elapsed:\n num_collision: number of collisions during simulation\n laps_completed: Number of laps completed\n\n \"\"\"\n time_elapsed: float = carla_runner.end_simulation_time - carla_runner.start_simulation_time\n num_collision: int = carla_runner.agent_collision_counter\n #laps_completed = 0 if carla_runner.completed_lap_count < 0 else carla_runner.completed_lap_count\n laps_completed = min(0, carla_runner.completed_lap_count)\n return time_elapsed, num_collision, laps_completed\n\n#def run(agent_class, agent_config_file_path: Path, carla_config_file_path: Path, num_laps: int = 10) -> Tuple[\n# float, int, bool]:\n\ndef run(agent_class, agent_config_file_path: Path, carla_config_file_path: Path,\n num_laps: int = 10) -> Tuple[float, int, int]:\n \"\"\"\n Run the agent along the track and produce a score based on certain metrics\n Args:\n num_laps: int number of laps that the agent should run\n agent_class: the participant's agent\n agent_config_file_path: agent configuration path\n carla_config_file_path: carla configuration path\n Returns:\n float between 0 - 1 representing scores\n \"\"\"\n\n agent_config: AgentConfig = AgentConfig.parse_file(agent_config_file_path)\n carla_config = CarlaConfig.parse_file(carla_config_file_path)\n #run_sim\n\t#\"\"\"\n #Pit Stop:\n # Use different kinds of 'set' functions at PitStop to tune/fix your own car!\n #\"\"\"\n pitstop = PitStop(carla_config, agent_config)\n\t#pitstop: object = PitStop(carla_config, agent_config)\n pitstop.set_carla_version(version = \"0.9.10\")\n pitstop.set_carla_sync_mode(True)\n pitstop.set_autopilot_mode(True)\n #pitstop.set_car_color(CarlaCarColor(r = 0,g = 0,b = 255,a = 255))\n pitstop.set_num_laps(num=10)\n pitstop.set_output_data_folder_path(\"./data/output\")\n pitstop.set_output_data_file_name(time.strftime(\"%Y%m%d-%H%M%S-\") + \"map-waypoints\")\n pitstop.set_max_speed(speed = 200)\n pitstop.set_target_speed(speed = 120)\n print(agent_config.target_speed, \" target speed\")\n #print(agent_config. , \" target speed\")\n #print(pitstop)\n pitstop.set_steering_boundary(boundary = (-1.0, 1.0))\n pitstop.set_throttle_boundary(boundary = (0, 1))\n\n pitstop.set_waypoints_look_ahead_values(values={\n \"60\": 5,\n \"80\": 10,\n \"100\": 20,\n \"120\": 50,\n \"150\": 55})\n pid_value = {\n \"longitudinal_controller\": {\n \"40\": {\n \"Kp\": 0.8,\n \"Kd\": 0.2,\n \"Ki\": 0\n },\n \"60\": {\n \"Kp\": 0.7,\n \"Kd\": 0.2,\n \"Ki\": 0\n },\n \"80\": {\n \"Kp\": 0.5,\n \"Kd\": 0.15,\n \"Ki\": 0.05\n },\n \"100\": {\n \"Kp\": 0.5,\n \"Kd\": 0.1,\n \"Ki\": 0\n },\n\t\t\t\t\t\t\"120\": {\n \t\t\t\"Kp\": 0.2,\n \t\t\t\"Kd\": 0.1,\n \t\t\t\"Ki\": 0.1\n \t}\n\t\t\t\t\t},\t\n \"latitudinal_controller\": {\n\t\t\t\n \"60\": {\n \"Kp\": 0.8,\n \"Kd\": 0.1,\n \"Ki\": 0.2\n },\n \"80\": {\n \"Kp\": 0.6,\n \"Kd\": 0.2,\n \"Ki\": 0.1\n },\n \"100\": {\n \"Kp\": 0.5,\n \"Kd\": 0.2,\n \"Ki\": 0.1\n },\n \"120\": {\n \"Kp\": 0.4,\n \"Kd\": 0.2,\n \"Ki\": 0.2\n }\n }\n }\n pitstop.set_pid_values(pid_value)\n\n \"\"\"Passing configurations to Carla and Agent\"\"\"\n #carla_runner = CarlaRunner(carla_settings=carla_config, # ROAR Academy: fine\n # agent_settings=agent_config, # ROAR Academy: fine\n # npc_agent_class=PurePursuitAgent) \n # hard code agent config such that it reflect competition requirements\n agent_config.num_laps = num_laps\n carla_runner = CarlaRunner(carla_settings=carla_config,\n agent_settings=agent_config,\n npc_agent_class=PurePursuitAgent,\n competition_mode=True,\n\t\t\t\t\t\t\t start_bbox=np.array([-815, 20, -760, -770, 120, -600]),\n lap_count=num_laps)\n try:\n my_vehicle = carla_runner.set_carla_world()\n agent = agent_class(vehicle=my_vehicle, agent_settings=agent_config)\n carla_runner.start_game_loop(agent=agent, use_manual_control=False)\n return compute_score(carla_runner)\n except Exception as e:\n print(f\"something bad happened during initialization: {e}\")\n carla_runner.on_finish()\n logging.error(f\"{e}. Might be a good idea to restart Server\")\n return 0, 0, False\n\n\ndef suppress_warnings():\n logging.basicConfig(format='%(levelname)s - %(asctime)s - %(name)s '\n '- %(message)s',\n level=logging.INFO)\n logging.getLogger(\"matplotlib\").setLevel(logging.WARNING)\n warnings.simplefilter(\"ignore\")\n np.set_printoptions(suppress=True)\n\n\ndef main():\n suppress_warnings()\n agent_class = PIDAgent\n num_trials = 3\n total_score = 0\n num_laps = 10\n table = PrettyTable()\n table.field_names = [\"time_elapsed (sec)\", \"num_collisions\", \"laps completed\"]\n for i in range(num_trials):\n scores = run(agent_class=agent_class,\n agent_config_file_path=Path(\"./ROAR/configurations/carla/carla_agent_configuration.json\"),\n\t\t\t\t\t #agent_config_file_path=Path(\"./ROAR/configurations/carla/agent_configuration.json\"),\n carla_config_file_path=Path(\"./ROAR/configurations/configuration.json\"),\n\t\t\t\t\t #agent_config_file_path=Path(\"./ROAR_Sim/configurations/agent_configuration.json\"),\n #carla_config_file_path=Path(\"./ROAR_Sim/configurations/configuration.json\"),\n num_laps=num_laps)\n table.add_row(scores)\n print(table)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.set_printoptions", "numpy.array" ] ]
PastelDew/RGB-D-Mask-R-CNN
[ "912a488f0dbe5cb7bca5b975d08f0f1cbf098a8d" ]
[ "PDStereo/Camera/Utils.py" ]
[ "import numpy as np\nimport cv2\nimport os\n\nply_header = '''ply\nformat ascii 1.0\nelement vertex %(vert_num)d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header\n'''\n\ndef changeAllToNumpy(lst, dtype=np.float32):\n if not isinstance(lst, list):\n return lst\n return np.array([changeAllToNumpy(item) for item in lst], dtype=dtype)\n\ndef write_ply(fn, verts, colors):\n verts = verts.reshape(-1, 3)\n colors = colors.reshape(-1, 3)\n verts = np.hstack([verts, colors])\n with open(fn, 'w') as f:\n f.write(ply_header % dict(vert_num=len(verts)))\n np.savetxt(f, verts, '%f %f %f %d %d %d')\n\ndef get_disparity_map(leftFrame, rightFrame,\n window_size=5, lmbda=80000, sigma=1.2, visual_multiplier=1.0,\n min_disp=16, num_disp=96): #num_disp = 112-min_disp\n \"\"\"left_matcher = cv2.StereoSGBM_create(\n minDisparity=0,\n numDisparities=160, # max_disp has to be dividable by 16 f. E. HH 192, 256\n blockSize=5,\n P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n P2=32 * 3 * window_size ** 2,\n disp12MaxDiff=1,\n uniquenessRatio=15,\n speckleWindowSize=0,\n speckleRange=2,\n preFilterCap=63,\n mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n )\"\"\"\n left_matcher = cv2.StereoSGBM_create(\n minDisparity=min_disp,\n numDisparities=num_disp, # max_disp has to be dividable by 16 f. E. HH 192, 256\n blockSize=5,\n P1=8 * 3 * window_size ** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n P2=32 * 3 * window_size ** 2,\n disp12MaxDiff=10,\n uniquenessRatio=1,\n speckleWindowSize=150,\n speckleRange=2,\n preFilterCap=4,\n mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n )\n \"\"\"\n # disparity range is tuned for 'aloe' image pair\n window_size = 3\n min_disp = 16\n num_disp = 112-min_disp\n stereo = cv2.StereoSGBM_create(minDisparity = min_disp,\n numDisparities = num_disp,\n blockSize = 16,\n P1 = 8*3*window_size**2,\n P2 = 32*3*window_size**2,\n disp12MaxDiff = 1,\n uniquenessRatio = 10,\n speckleWindowSize = 100,\n speckleRange = 32\n )\n\n disp = stereo.compute(leftFrame, rightFrame).astype(np.float32) / 16.0\n \"\"\"\n \n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n wls_filter.setSigmaColor(sigma)\n\n displ = left_matcher.compute(leftFrame, rightFrame)\n dispr = right_matcher.compute(rightFrame, leftFrame)\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, leftFrame, None, dispr)\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)\n filteredImg = np.uint8(filteredImg)\n #visible_filteredImg = filteredImg\n #visible_filteredImg = cv2.normalize(src=filteredImg, dst=visible_filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)\n #visible_filteredImg = np.uint8(visible_filteredImg)\n #cv2.imshow('Disparity Map', visible_filteredImg)\n return filteredImg #, visible_filteredImg\n #cv2.imshow('Disparity Map', np.uint8(cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)))\n \ndef savePointClound(filename, leftFrame, disparity, Q, imgSize):\n points_3D = cv2.reprojectImageTo3D(disparity, Q)\n colors = cv2.cvtColor(leftFrame, cv2.COLOR_BGR2RGB)\n mask = disparity > disparity.min()\n out_points = points_3D[mask]\n out_colors = colors[mask]\n\n out_fn = filename + '.ply'\n write_ply(out_fn, out_points, out_colors)\n\ndef saveRGBD(dir, filename, leftFrame, disparity):\n if not os.path.exists(dir):\n os.mkdir(dir)\n rgbd = np.dstack((leftFrame, disparity))\n cv2.imwrite('{}/color-{}.png'.format(dir, filename), leftFrame)\n cv2.imwrite('{}/depth-{}.png'.format(dir, filename), disparity)\n cv2.imwrite('{}/merged-{}.png'.format(dir, filename), rgbd)\n return rgbd" ]
[ [ "numpy.uint8", "numpy.savetxt", "numpy.dstack", "numpy.hstack", "numpy.int16" ] ]
TheEagerLearner/hacktoberfest2021-2
[ "6ac33dfdf34c97d07a303d855ce09769f6b448f4" ]
[ "Python/Linear Regression in Python/Random_Forest_Regression.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset=pd.read_csv(\"Position_Salaries.csv\")\nX=dataset.iloc[:,1:-1].values\nY=dataset.iloc[:,-1].values\n\nfrom sklearn.ensemble import RandomForestRegressor\n\n\nregressor=RandomForestRegressor(n_estimators=10,random_state=0)\nregressor.fit(X,Y)\n\nX_grid=np.arange(min(X),max(X),0.1)\nX_grid=X_grid.reshape(len(X_grid),1)\n\nplt.scatter(X,Y,color=\"red\")\nplt.plot(X_grid,regressor.predict(X_grid))\nplt.title(\"Random Forest Regression\")\nplt.show()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.scatter", "pandas.read_csv" ] ]
ecomodeller/pytides
[ "78aa785d70cea5bf8c47e23d5d3e82ed4e028af6" ]
[ "pytides/astro.py" ]
[ "from collections import namedtuple\nimport numpy as np\n\n\n# Most of this is based around Meeus's Astronomical Algorithms, since it\n# presents reasonably good approximations of all the quantities we require in a\n# clear fashion. Reluctant to go all out and use VSOP87 unless it can be shown\n# to make a significant difference to the resulting accuracy of harmonic\n# analysis.\n\n# Convert a sexagesimal angle into decimal degrees\ndef s2d(degrees, arcmins = 0, arcsecs = 0, mas = 0, muas = 0):\n\treturn (\n\t\t\tdegrees\n\t\t\t+ (arcmins / 60.0)\n\t\t\t+ (arcsecs / (60.0*60.0))\n\t\t\t+ (mas\t / (60.0*60.0*1e3))\n\t\t\t+ (muas / (60.0*60.0*1e6))\n\t)\n\n# Evaluate a polynomial at argument\ndef polynomial(coefficients, argument):\n\treturn sum([c * (argument ** i) for i,c in enumerate(coefficients)])\n\n# Evaluate the first derivative of a polynomial at argument\ndef d_polynomial(coefficients, argument):\n\treturn sum([c * i * (argument ** (i-1)) for i,c in enumerate(coefficients)])\n\n# Meeus formula 11.1\ndef T(t):\n\treturn (JD(t) - 2451545.0)/36525\n\n# Meeus formula 7.1\ndef JD(t):\n\tY, M = t.year, t.month\n\tD = (\n\t\tt.day\n\t\t+ t.hour / (24.0)\n\t\t+ t.minute / (24.0*60.0)\n\t\t+ t.second / (24.0*60.0*60.0)\n\t\t+ t.microsecond / (24.0 * 60.0 * 60.0 * 1e6)\n\t)\n\tif M <= 2:\n\t\tY = Y - 1\n\t\tM = M + 12\n\tA = np.floor(Y / 100.0)\n\tB = 2 - A + np.floor(A / 4.0)\n\treturn np.floor(365.25*(Y+4716)) + np.floor(30.6001*(M+1)) + D + B - 1524.5\n\n# Meeus formula 21.3\nterrestrial_obliquity_coefficients = (\n\ts2d(23,26,21.448),\n\t-s2d(0,0,4680.93),\n\t-s2d(0,0,1.55),\n\ts2d(0,0,1999.25),\n\t-s2d(0,0,51.38),\n\t-s2d(0,0,249.67),\n\t-s2d(0,0,39.05),\n\ts2d(0,0,7.12),\n\ts2d(0,0,27.87),\n\ts2d(0,0,5.79),\n\ts2d(0,0,2.45)\n)\n\n# Adjust these coefficients for parameter T rather than U\nterrestrial_obliquity_coefficients = [\n\tc * (1e-2) ** i for i,c in enumerate(terrestrial_obliquity_coefficients)\n]\n\n# Not entirely sure about this interpretation, but this is the difference\n# between Meeus formulae 24.2 and 24.3 and seems to work\nsolar_perigee_coefficients = (\n\t280.46645 - 357.52910,\n\t36000.76932 - 35999.05030,\n\t0.0003032 + 0.0001559,\n\t0.00000048\n)\n\n# Meeus formula 24.2\nsolar_longitude_coefficients = (\n\t280.46645,\n\t36000.76983,\n\t0.0003032\n)\n\n# This value is taken from JPL Horizon and is essentially constant\nlunar_inclination_coefficients = (\n\t5.145,\n)\n\n# Meeus formula 45.1\nlunar_longitude_coefficients = (\n\t218.3164591,\n\t481267.88134236,\n\t-0.0013268,\n\t1/538841.0\n\t-1/65194000.0\n)\n\n# Meeus formula 45.7\nlunar_node_coefficients = (\n\t125.0445550,\n\t-1934.1361849,\n\t0.0020762,\n\t1/467410.0,\n\t-1/60616000.0\n)\n\n# Meeus, unnumbered formula directly preceded by 45.7\nlunar_perigee_coefficients = (\n\t83.3532430,\n\t4069.0137111,\n\t-0.0103238,\n\t-1/80053.0,\n\t1/18999000.0\n)\n\n# Now follow some useful auxiliary values, we won't need their speed.\n# See notes on Table 6 in Schureman for I, nu, xi, nu', 2nu''\ndef _I(N, i, omega):\n\tN, i, omega = np.radians(N), np.radians(i), np.radians(omega)\n\tcosI = np.cos(i)*np.cos(omega)-np.sin(i)*np.sin(omega)*np.cos(N)\n\treturn np.degrees(np.arccos(cosI))\n\ndef _xi(N, i, omega):\n\tN, i, omega = np.radians(N), np.radians(i), np.radians(omega)\n\te1 = np.cos(0.5*(omega-i))/np.cos(0.5*(omega+i)) * np.tan(0.5*N)\n\te2 = np.sin(0.5*(omega-i))/np.sin(0.5*(omega+i)) * np.tan(0.5*N)\n\te1, e2 = np.arctan(e1), np.arctan(e2)\n\te1, e2 = e1 - 0.5*N, e2 - 0.5*N\n\treturn np.degrees(-(e1 + e2))\n\ndef _nu(N, i, omega):\n\tN, i, omega = np.radians(N), np.radians(i), np.radians(omega)\n\te1 = np.cos(0.5*(omega-i))/np.cos(0.5*(omega+i)) * np.tan(0.5*N)\n\te2 = np.sin(0.5*(omega-i))/np.sin(0.5*(omega+i)) * np.tan(0.5*N)\n\te1, e2 = np.arctan(e1), np.arctan(e2)\n\te1, e2 = e1 - 0.5*N, e2 - 0.5*N\n\treturn np.degrees((e1 - e2))\n\n# Schureman equation 224\n# Can we be more precise than B \"the solar coefficient\" = 0.1681?\ndef _nup(N, i, omega):\n\tI = np.radians(_I(N, i, omega))\n\tnu = np.radians(_nu(N, i, omega))\n\treturn np.degrees(np.arctan(np.sin(2*I)*np.sin(nu)/(np.sin(2*I)*np.cos(nu)+0.3347)))\n\n# Schureman equation 232\ndef _nupp(N, i, omega):\n\tI = np.radians(_I(N, i, omega))\n\tnu = np.radians(_nu(N, i, omega))\n\ttan2nupp = (np.sin(I)**2*np.sin(2*nu))/(np.sin(I)**2*np.cos(2*nu)+0.0727)\n\treturn np.degrees(0.5 * np.arctan(tan2nupp))\n\nAstronomicalParameter = namedtuple('AstronomicalParameter', ['value', 'speed'])\n\ndef astro(t):\n\ta = {}\n\t# We can use polynomial fits from Meeus to obtain good approximations to\n\t# some astronomical values (and therefore speeds).\n\tpolynomials = {\n\t\t\t's': lunar_longitude_coefficients,\n\t\t\t'h': solar_longitude_coefficients,\n\t\t\t'p': lunar_perigee_coefficients,\n\t\t\t'N': lunar_node_coefficients,\n\t\t\t'pp': solar_perigee_coefficients,\n\t\t\t'90': (90.0,),\n\t\t\t'omega': terrestrial_obliquity_coefficients,\n\t\t\t'i': lunar_inclination_coefficients\n\t}\n\t# Polynomials are in T, that is Julian Centuries; we want our speeds to be\n\t# in the more convenient unit of degrees per hour.\n\tdT_dHour = 1 / (24 * 365.25 * 100)\n\tfor name, coefficients in list(polynomials.items()):\n\t\ta[name] = AstronomicalParameter(\n\t\t\t\tnp.mod(polynomial(coefficients, T(t)), 360.0),\n\t\t\t\td_polynomial(coefficients, T(t)) * dT_dHour\n\t\t)\n\n\t# Some other parameters defined by Schureman which are dependent on the\n\t# parameters N, i, omega for use in node factor calculations. We don't need\n\t# their speeds.\n\targs = list(each.value for each in [a['N'], a['i'], a['omega']])\n\tfor name, function in list({\n\t\t'I': _I,\n\t\t'xi': _xi,\n\t\t'nu': _nu,\n\t\t'nup': _nup,\n\t\t'nupp': _nupp\n\t}.items()):\n\t\ta[name] = AstronomicalParameter(np.mod(function(*args), 360.0), None)\n\n\t# We don't work directly with the T (hours) parameter, instead our spanning\n\t# set for equilibrium arguments # is given by T+h-s, s, h, p, N, pp, 90.\n\t# This is in line with convention.\n\thour = AstronomicalParameter((JD(t) - np.floor(JD(t))) * 360.0, 15.0)\n\ta['T+h-s'] = AstronomicalParameter(\n\t\thour.value + a['h'].value - a['s'].value,\n\t\thour.speed + a['h'].speed - a['s'].speed\n\t)\n\t# It is convenient to calculate Schureman's P here since several node\n\t# factors need it, although it could be argued that these\n\t# (along with I, xi, nu etc) belong somewhere else.\n\ta['P'] = AstronomicalParameter(\n\t\tnp.mod(a['p'].value -a['xi'].value,360.0),\n\t\tNone\n\t)\n\treturn a\n" ]
[ [ "numpy.sin", "numpy.arccos", "numpy.tan", "numpy.degrees", "numpy.radians", "numpy.arctan", "numpy.cos", "numpy.mod", "numpy.floor" ] ]
panzheyi/AutoSTG
[ "4e53146863b1a0353af810765ab25aab06b7cdf7" ]
[ "src/train.py" ]
[ "import logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport random\nimport numpy as np\nimport torch\nfrom setting import config as cfg\nfrom data.dataset import TrafficDataset\nfrom model.nas import AutoSTG\nfrom run_manager import RunManager\n\n\ndef system_init():\n \"\"\" Initialize random seed. \"\"\"\n random.seed(cfg.sys.seed)\n np.random.seed(cfg.sys.seed)\n torch.manual_seed(cfg.sys.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n\n\ndef main(num_epoch):\n system_init()\n\n # load data\n dataset = TrafficDataset(\n path=cfg.data.path,\n train_prop=cfg.data.train_prop,\n valid_prop=cfg.data.valid_prop,\n num_sensors=cfg.data.num_sensors,\n in_length=cfg.data.in_length,\n out_length=cfg.data.out_length,\n batch_size_per_gpu=cfg.data.batch_size_per_gpu,\n num_gpus=1\n )\n\n net = AutoSTG(\n in_length=cfg.data.in_length,\n out_length=cfg.data.out_length,\n node_hiddens=[dataset.node_fts.shape[1], ] + cfg.model.node_hiddens,\n edge_hiddens=[dataset.adj_mats.shape[2], ] + cfg.model.edge_hiddens,\n in_channels=cfg.data.in_channels,\n out_channels=cfg.data.out_channels,\n hidden_channels=cfg.model.hidden_channels,\n skip_channels=cfg.model.skip_channels,\n end_channels=cfg.model.end_channels,\n layer_names=cfg.model.layer_names,\n num_mixed_ops=cfg.model.num_mixed_ops,\n candidate_op_profiles=cfg.model.candidate_op_profiles\n )\n\n run_manager = RunManager(\n name=cfg.model.name,\n net=net,\n dataset=dataset,\n\n arch_lr=cfg.trainer.arch_lr,\n arch_lr_decay_milestones=cfg.trainer.arch_lr_decay_milestones,\n arch_lr_decay_ratio=cfg.trainer.arch_lr_decay_ratio,\n arch_decay=cfg.trainer.arch_decay,\n arch_clip_gradient=cfg.trainer.arch_clip_gradient,\n\n weight_lr=cfg.trainer.weight_lr,\n weight_lr_decay_milestones=[20, 40, 60, 80], # cfg.trainer.weight_lr_decay_milestones,\n weight_lr_decay_ratio=cfg.trainer.weight_lr_decay_ratio,\n weight_decay=cfg.trainer.weight_decay,\n weight_clip_gradient=cfg.trainer.weight_clip_gradient,\n\n num_search_iterations=cfg.trainer.num_search_iterations,\n num_search_arch_samples=cfg.trainer.num_search_arch_samples,\n num_train_iterations=cfg.trainer.num_train_iterations,\n\n criterion=cfg.trainer.criterion,\n metric_names=cfg.trainer.metric_names,\n metric_indexes=cfg.trainer.metric_indexes,\n print_frequency=cfg.trainer.print_frequency,\n\n device_ids=[0]\n )\n\n run_manager.load(mode='search')\n run_manager.clear_records()\n run_manager.initialize()\n print('# of params', run_manager._net.num_weight_parameters())\n run_manager.train(num_epoch)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', type=str)\n parser.add_argument('--epoch', type=int)\n args = parser.parse_args()\n\n cfg.load_config(args.config)\n main(args.epoch)\n" ]
[ [ "numpy.random.seed", "numpy.set_printoptions", "torch.manual_seed" ] ]
JasonJerome/Pointer-meter-reading-algorithm-by-Python
[ "3cb2b260301875e4b4bb07dc8d7a86d8affc88a5" ]
[ "train.py" ]
[ "\"\"\"\r\nRetrain the YOLO model for your own dataset.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport keras.backend as K\r\nfrom keras.layers import Input, Lambda\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\r\n\r\nfrom yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\r\nfrom yolo3.utils import get_random_data\r\n\r\n\r\ndef _main():\r\n annotation_path = 'train.txt'\r\n log_dir = 'logs/000/'\r\n classes_path = 'model_data/voc_classes.txt'\r\n anchors_path = 'model_data/yolo_anchors.txt'\r\n class_names = get_classes(classes_path)\r\n num_classes = len(class_names)\r\n anchors = get_anchors(anchors_path)\r\n\r\n input_shape = (416,416) # multiple of 32, hw\r\n\r\n is_tiny_version = len(anchors)==6 # default setting\r\n if is_tiny_version:\r\n model = create_tiny_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='logs/000/trained_weights_final.h5')\r\n else:\r\n model = create_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='logs/000/trained_weights_final.h5') # make sure you know what you freeze\r\n\r\n logging = TensorBoard(log_dir=log_dir)\r\n checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)\r\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\r\n\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n\r\n # Train with frozen layers first, to get a stable loss.\r\n # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.\r\n if True:\r\n model.compile(optimizer=Adam(lr=1e-6), loss={\r\n # use custom yolo_loss Lambda layer.\r\n 'yolo_loss': lambda y_true, y_pred: y_pred})\r\n\r\n batch_size = 4\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=0,\r\n initial_epoch=0,\r\n callbacks=[logging, checkpoint])\r\n model.save(log_dir + 'trained_weights_stage_1.h5')\r\n\r\n # Unfreeze and continue training, to fine-tune.\r\n # Train longer if the result is not good.\r\n if True:\r\n for i in range(len(model.layers)):\r\n model.layers[i].trainable = True\r\n model.compile(optimizer=Adam(lr=1e-6), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change\r\n print('Unfreeze all of the layers.')\r\n\r\n batch_size = 4 # note that more GPU memory is required after unfreezing the body\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=10,\r\n initial_epoch=0,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping])\r\n model.save(log_dir + 'trained_weights_final.h5')\r\n\r\n # Further training if needed.\r\n\r\ndef get_classes(classes_path):\r\n '''loads the classes'''\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\ndef get_anchors(anchors_path):\r\n '''loads the anchors from a file'''\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape(-1, 2)\r\n\r\n\r\ndef create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='logs/000/trained_weights_final.h5'):\r\n '''create the training model'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \\\r\n num_anchors//3, num_classes+5)) for l in range(3)]\r\n\r\n model_body = yolo_body(image_input, num_anchors//3, num_classes)\r\n print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze darknet53 body or freeze all but 3 output layers.\r\n num = (185, len(model_body.layers)-3)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='logs/000/trained_weights_final.h5'):\r\n '''create the training model, for Tiny YOLOv3'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \\\r\n num_anchors//2, num_classes+5)) for l in range(2)]\r\n\r\n model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)\r\n print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze the darknet body or freeze all but 2 output layers.\r\n num = (20, len(model_body.layers)-2)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n '''data generator for fit_generator'''\r\n n = len(annotation_lines)\r\n i = 0\r\n while True:\r\n image_data = []\r\n box_data = []\r\n for b in range(batch_size):\r\n if i==0:\r\n np.random.shuffle(annotation_lines)\r\n image, box = get_random_data(annotation_lines[i], input_shape, random=True)\r\n image_data.append(image)\r\n box_data.append(box)\r\n i = (i+1) % n\r\n image_data = np.array(image_data)\r\n box_data = np.array(box_data)\r\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)\r\n yield [image_data, *y_true], np.zeros(batch_size)\r\n\r\ndef data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n n = len(annotation_lines)\r\n if n==0 or batch_size<=0: return None\r\n return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n" ]
[ [ "numpy.random.seed", "numpy.array", "numpy.zeros", "numpy.random.shuffle" ] ]
Dogacel/mmfashion
[ "e49613245c8501042edd7aeeaa8fb93e5ea13238" ]
[ "demo/test_retriever.py" ]
[ "from __future__ import division\nimport argparse\n\nimport torch\nfrom mmcv import Config\nfrom mmcv.runner import load_checkpoint\n\nfrom mmfashion.core import ClothesRetriever\nfrom mmfashion.datasets import build_dataloader, build_dataset\nfrom mmfashion.models import build_retriever\nfrom mmfashion.utils import get_img_tensor\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMFashion In-shop Clothes Retriever Demo')\n parser.add_argument(\n '--input',\n type=str,\n help='input image path',\n default='demo/imgs/06_1_front.jpg')\n parser.add_argument(\n '--topk', type=int, default=5, help='retrieve topk items')\n parser.add_argument(\n '--config',\n help='train config file path',\n default='configs/retriever_in_shop/global_retriever_vgg_loss_id.py')\n parser.add_argument(\n '--checkpoint',\n type=str,\n default='checkpoint/Retrieve/vgg/global/epoch_100.pth',\n help='the checkpoint file to resume from')\n parser.add_argument(\n '--use_cuda', type=bool, default=True, help='use gpu or not')\n args = parser.parse_args()\n return args\n\n\ndef _process_embeds(dataset, model, cfg, use_cuda=True):\n data_loader = build_dataloader(\n dataset,\n cfg.data.imgs_per_gpu,\n cfg.data.workers_per_gpu,\n len(cfg.gpus.test),\n dist=False,\n shuffle=False)\n\n print(cfg.data)\n embeds = []\n with torch.no_grad():\n i = 0\n print(\"Data loader size: \" + str(len(data_loader)))\n for data in data_loader:\n print(i)\n i += 1\n img = data['img']\n if use_cuda:\n img = data['img'].cuda()\n embed = model(img, landmark=data['landmark'], return_loss=False)\n embeds.append(embed)\n\n embeds = torch.cat(embeds)\n embeds = embeds.data.cpu().numpy()\n return embeds\n\n\ndef main():\n seed = 0\n\n torch.manual_seed(seed)\n args = parse_args()\n if args.use_cuda and torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n cfg = Config.fromfile(args.config)\n\n model = build_retriever(cfg.model)\n load_checkpoint(model, args.checkpoint, map_location=torch.device('cuda:0'))\n print('load checkpoint from {}'.format(args.checkpoint))\n\n if args.use_cuda:\n model.cuda()\n model.eval()\n\n print('Model evaled')\n img_tensor = get_img_tensor(args.input, args.use_cuda)\n print('Image tensor got.')\n query_feat = model(img_tensor, landmark=None, return_loss=False)\n print('Query feat 1')\n query_feat = query_feat.data.cpu().numpy()\n print('Query feat 2')\n gallery_set = build_dataset(cfg.data.gallery)\n print('Gallery set')\n gallery_embeds = _process_embeds(gallery_set, model, cfg)\n print('Gallery embeds')\n retriever = ClothesRetriever(cfg.data.gallery.img_file, cfg.data_root,\n cfg.data.gallery.img_path)\n print('Retriever')\n results = retriever.show_retrieved_images(query_feat, gallery_embeds)\n print('Show retriever')\n for result in results:\n print(result)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "torch.cat", "torch.cuda.manual_seed_all", "torch.no_grad", "torch.manual_seed", "torch.cuda.is_available" ] ]
gupta-abhay/pytorch-frn
[ "cc86a984fcbae61431ed41f37695e78f5e4b196e" ]
[ "frn.py" ]
[ "import torch\nimport torch.nn as nn\n\n\n__all__ = ['FilterResponseNorm1d', 'FilterResponseNorm2d',\n 'FilterResponseNorm3d']\n\n\nclass FilterResponseNormNd(nn.Module):\n \n def __init__(self, ndim, num_features, eps=1e-6,\n learnable_eps=False):\n \"\"\"\n Input Variables:\n ----------------\n ndim: An integer indicating the number of dimensions of the expected input tensor.\n num_features: An integer indicating the number of input feature dimensions.\n eps: A scalar constant or learnable variable.\n learnable_eps: A bool value indicating whether the eps is learnable.\n \"\"\"\n assert ndim in [3, 4, 5], \\\n 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'\n super(FilterResponseNormNd, self).__init__()\n shape = (1, num_features) + (1, ) * (ndim - 2)\n self.eps = nn.Parameter(torch.ones(*shape) * eps)\n if not learnable_eps:\n self.eps.requires_grad_(False)\n self.gamma = nn.Parameter(torch.Tensor(*shape))\n self.beta = nn.Parameter(torch.Tensor(*shape))\n self.tau = nn.Parameter(torch.Tensor(*shape))\n self.reset_parameters()\n \n def forward(self, x):\n avg_dims = tuple(range(2, x.dim()))\n nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)\n x = x * torch.rsqrt(nu2 + torch.abs(self.eps))\n return torch.max(self.gamma * x + self.beta, self.tau)\n\n def reset_parameters(self):\n nn.init.ones_(self.gamma)\n nn.init.zeros_(self.beta)\n nn.init.zeros_(self.tau)\n\n\nclass FilterResponseNorm1d(FilterResponseNormNd):\n\n def __init__(self, num_features, eps=1e-6, learnable_eps=False):\n super(FilterResponseNorm1d, self).__init__(\n 3, num_features, eps=eps, learnable_eps=learnable_eps)\n\n\nclass FilterResponseNorm2d(FilterResponseNormNd):\n\n def __init__(self, num_features, eps=1e-6, learnable_eps=False):\n super(FilterResponseNorm2d, self).__init__(\n 4, num_features, eps=eps, learnable_eps=learnable_eps)\n\n\nclass FilterResponseNorm3d(FilterResponseNormNd):\n\n def __init__(self, num_features, eps=1e-6, learnable_eps=False):\n super(FilterResponseNorm3d, self).__init__(\n 5, num_features, eps=eps, learnable_eps=learnable_eps)\n" ]
[ [ "torch.max", "torch.nn.init.ones_", "torch.ones", "torch.abs", "torch.nn.init.zeros_", "torch.Tensor", "torch.pow" ] ]
cosmozhang-lab/motion-illusion-model
[ "32a5ccab920095818b220642bae491429ff71f27" ]
[ "largescale/src/neuron/neuron/neuron.py" ]
[ "# Neuron base\r\n\r\nimport numpy as np\r\nimport largescale.src.support.cl_support as clspt\r\nfrom largescale.src.support.common import CommonConfig\r\nfrom program import chain2\r\n\r\nT_EXCITATORY = 1\r\nT_INHIBITORY = 2\r\nT_EXC = T_EXCITATORY\r\nT_E = T_EXCITATORY\r\nT_INH = T_INHIBITORY\r\nT_I = T_INHIBITORY\r\n\r\nT_ON = 3\r\nT_OFF = 4\r\nT_O = T_ON\r\nT_F = T_OFF\r\n\r\nclass NeuronGroup:\r\n \"\"\"\r\n config:\r\n coor: a tuple that contains (x,y,z) coordinates, each as a np.array\r\n types: neuron types\r\n v_reset: reset voltage after spike\r\n v_thre: spike voltage threshold\r\n t_ref: refactory time\r\n \"\"\"\r\n def __init__(self, config = CommonConfig()):\r\n self.nshape = config.nshape\r\n self.nneurons = int(np.prod(self.nshape)) # Number of neurons\r\n self.shape = self.nshape\r\n self.size = self.nneurons\r\n if config.coor:\r\n coor = config.coor\r\n if len(coor) > 0: self._x = clspt.Variable( np.array(coor[0]).astype(np.float32), read_only = True )\r\n if len(coor) > 1: self._y = clspt.Variable( np.array(coor[1]).astype(np.float32), read_only = True )\r\n if len(coor) > 2: self._z = clspt.Variable( np.array(coor[2]).astype(np.float32), read_only = True )\r\n\r\n self._temps = {}\r\n \r\n self.types = clspt.Variable( np.array(config.types).astype(np.int32), read_only = True ) if (not config.types is None) else None\r\n self.v = clspt.Variable( np.zeros(self.shape).astype(np.float32) )\r\n self.v_reset = config.fetch(\"v_reset\", 0.0)\r\n self.v_thre = config.fetch(\"v_thre\", 0.0)\r\n\r\n trefs = np.zeros(self.shape).astype(np.float32) + config.fetch(\"t_ref\", 0.0)\r\n self.trefs = clspt.Variable( trefs, read_only=True )\r\n\r\n # for rk2 voltage evolving\r\n self.alpha0 = clspt.Variable( np.zeros(self.shape).astype(np.float32) )\r\n self.beta0 = clspt.Variable( np.zeros(self.shape).astype(np.float32) )\r\n self.alpha1 = clspt.Variable( np.zeros(self.shape).astype(np.float32) )\r\n self.beta1 = clspt.Variable( np.zeros(self.shape).astype(np.float32) )\r\n \r\n \"\"\"\r\n Recording spikes.\r\n Each neuron will spike once at most in each time bin, the probability\r\n of spiking is firing_rate*dt. So each group will spike nneurons times\r\n at most. So we use nneurons-sized buffers to record the spikes.\r\n Remarks: As we need to inspect the spikes step-by-step, we cannot\r\n accelerate this operation with OpenCL, so we do not need Variable here.\r\n \"\"\"\r\n # number of spikes in the time bin\r\n self.nspikes = 0 \r\n # Time of spikes\r\n # In iteration, this is used to record the time of spike of each neuron.\r\n # In this case the index is the neuron index.\r\n # After iteration, we rearrange this so that the spikes are arranged in\r\n # time sequence. In this case the index is the spike index.\r\n self.tspikes = np.zeros((self.nneurons,)).astype(np.float32)\r\n self.tspikes = clspt.Variable( self.tspikes )\r\n # Which neuron spiked\r\n self.ispikes = np.zeros((self.nneurons,)).astype(np.int32)\r\n self.ispikes = clspt.Variable( self.ispikes )\r\n\r\n def __getattr__(self, name):\r\n if name[0:4] == \"temp\":\r\n idx = name[4:]\r\n if not idx in self._temps: self._temps[idx] = clspt.Variable( shape=self.shape, dtype=np.float32 )\r\n return self._temps[idx]\r\n return object.__getattr__(self, name)\r\n\r\n def step(self, t, dt):\r\n pass\r\n" ]
[ [ "numpy.array", "numpy.prod", "numpy.zeros" ] ]
MartinThoma/clan
[ "f7645c7483d4476ffe7d5ee56f746e0ee392a082" ]
[ "clana/clustering.py" ]
[ "\"\"\"Everything about clustering classes of a confusion matrix.\"\"\"\n\n# Core Library\nimport logging\nimport random\nfrom typing import List, TypeVar, Union, cast\n\n# Third party\nimport numpy as np\nimport numpy.typing as npt\n\n# First party\nimport clana.utils\n\ncfg = clana.utils.load_cfg()\nlogger = logging.getLogger(__name__)\n\n\nT = TypeVar(\"T\")\n\n\ndef apply_grouping(labels: List[T], grouping: List[int]) -> List[List[T]]:\n \"\"\"\n Return list of grouped labels.\n\n Parameters\n ----------\n labels : List[T]\n grouping : List[int]\n\n Returns\n -------\n grouped_labels : List[List[T]]\n\n Examples\n --------\n >>> labels = ['de', 'en', 'fr']\n >>> grouping = [False, True]\n >>> apply_grouping(labels, grouping)\n [['de', 'en'], ['fr']]\n \"\"\"\n groups = []\n current_group = [labels[0]]\n for label, cut in zip(labels[1:], grouping):\n if cut:\n groups.append(current_group)\n current_group = [label]\n else:\n current_group.append(label)\n groups.append(current_group)\n return groups\n\n\ndef _remove_single_element_groups(hierarchy: List[List[T]]) -> List[Union[T, List[T]]]:\n \"\"\"\n Flatten sub-lists of length 1.\n\n Parameters\n ----------\n hierarchy : List[List]\n\n Returns\n -------\n hierarchy : list of el / lists\n\n Examples\n --------\n >>> hierarchy = [[0], [1, 2]]\n >>> _remove_single_element_groups(hierarchy)\n [0, [1, 2]]\n \"\"\"\n h_new: List[Union[T, List[T]]] = []\n for el in hierarchy:\n if len(el) > 1:\n h_new.append(el)\n else:\n h_new.append(el[0])\n return h_new\n\n\ndef extract_clusters(\n cm: npt.NDArray,\n labels: List[str],\n steps: int = 10**4,\n lambda_: float = 0.013,\n method: str = \"local-connectivity\",\n interactive: bool = False,\n) -> List[int]:\n \"\"\"\n Find clusters in cm.\n\n Idea:\n mininmize lambda (error between clusters) - (count of clusters)\n s.t.: Each inter-cluster accuracy has to be lower than the overall\n accuracy\n\n Parameters\n ----------\n cm : npt.NDArray\n labels : List[str]\n steps : int\n lambda_ : float\n The closer to 0, the more groups\n The bigger, the bigger groups\n method : {'local-connectivity', 'energy'}\n interactive : bool\n\n Returns\n -------\n clustes : List[int]\n \"\"\"\n if method == \"energy\":\n n = len(cm)\n grouping = [0 for _ in range(n - 1)]\n minimal_score = get_score(cm, grouping, lambda_)\n best_grouping = grouping[:]\n for _ in range(steps):\n pos = random.randint(0, n - 2)\n grouping = best_grouping[:]\n grouping[pos] = bool((grouping[pos] + 1) % 2)\n current_score = get_score(cm, grouping, lambda_)\n if current_score < minimal_score:\n best_grouping = grouping\n minimal_score = current_score\n logger.info(f\"Best grouping: {grouping} (score: {minimal_score})\")\n elif method == \"local-connectivity\":\n if interactive:\n thres: Union[float, int] = find_thres_interactive(cm, labels)\n else:\n thres = find_thres(cm, cfg[\"visualize\"][\"threshold\"])\n logger.info(f\"Found threshold for local connection: {thres}\")\n best_grouping = split_at_con_thres(cm, thres, labels, interactive=interactive)\n else:\n raise NotImplementedError(f\"method='{method}'\")\n logger.info(f\"Found {sum(best_grouping) + 1} clusters\")\n return best_grouping\n\n\ndef create_weight_matrix(grouping: List[int]) -> npt.NDArray:\n \"\"\"\n Create a matrix which contains the distance to the diagonal.\n\n Parameters\n ----------\n grouping : List[int]\n\n Returns\n -------\n weight_matrix : npt.NDArray\n A symmetric matrix\n \"\"\"\n n = len(grouping) + 1\n weight_matrix = np.zeros((n, n))\n for i in range(n):\n seen_1 = False\n for j in range(i + 1, n):\n if seen_1:\n weight_matrix[i][j] = 1\n elif grouping[j - 1] == 1:\n seen_1 = True\n weight_matrix[i][j] = 1\n return weight_matrix + weight_matrix.transpose()\n\n\ndef get_score(cm: npt.NDArray, grouping: List[int], lambda_: float) -> float:\n \"\"\"\n Get the score of a confusion matrix.\n\n Parameters\n ----------\n cm : npt.NDArray\n grouping : List[int]\n lambda_ : float\n\n Returns\n -------\n score : float\n \"\"\"\n # First party\n from clana.visualize_cm import calculate_score\n\n inter_cluster_err = 0.0\n weights = create_weight_matrix(grouping)\n inter_cluster_err = calculate_score(cm, weights)\n return lambda_ * inter_cluster_err - sum(grouping)\n\n\ndef find_thres(cm: npt.NDArray, percentage: float) -> float:\n \"\"\"\n Find a threshold for grouping.\n\n Parameters\n ----------\n cm : npt.NDArray\n percentage : float\n Probability that two neighboring classes belong togehter\n\n Returns\n -------\n connectivity : float\n \"\"\"\n n = int(len(cm) * (1.0 - percentage)) - 1\n con = sorted(get_neighboring_connectivity(cm))\n return con[n]\n\n\ndef find_thres_interactive(cm: npt.NDArray, labels: List[str]) -> float:\n \"\"\"\n Find a threshold for grouping.\n\n The threshold is the minimum connection strength for two classes to be\n within the same cluster.\n\n Parameters\n ----------\n cm : npt.NDArray\n labels : List[str]\n\n Returns\n -------\n pos_str : float\n \"\"\"\n n = len(cm)\n con = sorted(zip(get_neighboring_connectivity(cm), zip(range(n - 1), range(1, n))))\n pos_str = None\n\n # Lowest position from which we know that they are connected\n pos_up = n - 1\n\n # Highest position from which we know that they are not connected\n neg_low = 0\n while pos_up - 1 > neg_low:\n print(f\"pos_up={pos_up}, neg_low={neg_low}, pos_str={pos_str}\")\n pos = int((pos_up + neg_low) / 2)\n con_str, (i1, i2) = con[pos]\n should_be_conn = input(\n f\"Should {labels[i1]} and {labels[i2]} be in one cluster? (y/n): \"\n )\n if should_be_conn == \"n\":\n neg_low = pos\n elif should_be_conn == \"y\":\n pos_up = pos\n pos_str = con_str\n else:\n print(f\"Please type only 'y' or 'n'. You typed {should_be_conn}.\")\n pos_str = cast(float, pos_str)\n return pos_str\n\n\ndef get_neighboring_connectivity(cm: npt.NDArray) -> List[float]:\n \"\"\"\n Get how strong neighboring classes are connected.\n\n Parameters\n ----------\n cm : npt.NDArray\n\n Returns\n -------\n con : List[float]\n \"\"\"\n con = []\n n = len(cm)\n for i in range(n - 1):\n con.append(cm[i][i + 1] + cm[i + 1][i])\n return con\n\n\ndef split_at_con_thres(\n cm: npt.NDArray, thres: float, labels: List[str], interactive: bool\n) -> List[int]:\n \"\"\"\n Two classes are not in the same group if they are not connected strong.\n\n Minimum connection strength is thres. The bigger this value, the more\n clusters / the smaller clusters you will get.\n \"\"\"\n con = get_neighboring_connectivity(cm)\n grouping = []\n for i, el in enumerate(con):\n if el == thres and interactive:\n should_conn = \"-\"\n while should_conn not in [\"y\", \"n\"]:\n should_conn = input(\n f\"Should {labels[i]} and {labels[i + 1]} be in one \"\n \"cluster? (y/n): \"\n )\n if should_conn == \"y\":\n grouping.append(0)\n elif should_conn == \"n\":\n grouping.append(1)\n else:\n print(\"please type either 'y' or 'n'\")\n else:\n grouping.append(el < thres)\n return grouping\n" ]
[ [ "numpy.zeros" ] ]
sotirios4/xgboost
[ "7f101d1b331caae99c0ba685bf6e426b7e68b9ad" ]
[ "python-package/xgboost/core.py" ]
[ "# coding: utf-8\n# pylint: disable=too-many-arguments, too-many-branches, invalid-name\n# pylint: disable=too-many-lines, too-many-locals\n\"\"\"Core XGBoost Library.\"\"\"\nimport collections\n# pylint: disable=no-name-in-module,import-error\nfrom collections.abc import Mapping\n# pylint: enable=no-name-in-module,import-error\nimport ctypes\nimport os\nimport re\nimport sys\nimport json\nimport warnings\n\nimport numpy as np\nimport scipy.sparse\n\nfrom .compat import (STRING_TYPES, DataFrame, py_str, PANDAS_INSTALLED,\n lazy_isinstance)\nfrom .libpath import find_lib_path\n\n# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h\nc_bst_ulong = ctypes.c_uint64\n\n\nclass XGBoostError(ValueError):\n \"\"\"Error thrown by xgboost trainer.\"\"\"\n\n\nclass EarlyStopException(Exception):\n \"\"\"Exception to signal early stopping.\n\n Parameters\n ----------\n best_iteration : int\n The best iteration stopped.\n \"\"\"\n\n def __init__(self, best_iteration):\n super().__init__()\n self.best_iteration = best_iteration\n\n\n# Callback environment used by callbacks\nCallbackEnv = collections.namedtuple(\n \"XGBoostCallbackEnv\",\n [\"model\",\n \"cvfolds\",\n \"iteration\",\n \"begin_iteration\",\n \"end_iteration\",\n \"rank\",\n \"evaluation_result_list\"])\n\n\ndef from_pystr_to_cstr(data):\n \"\"\"Convert a list of Python str to C pointer\n\n Parameters\n ----------\n data : list\n list of str\n \"\"\"\n\n if not isinstance(data, list):\n raise NotImplementedError\n pointers = (ctypes.c_char_p * len(data))()\n data = [bytes(d, 'utf-8') for d in data]\n pointers[:] = data\n return pointers\n\n\ndef from_cstr_to_pystr(data, length):\n \"\"\"Revert C pointer to Python str\n\n Parameters\n ----------\n data : ctypes pointer\n pointer to data\n length : ctypes pointer\n pointer to length of data\n \"\"\"\n res = []\n for i in range(length.value):\n try:\n res.append(str(data[i].decode('ascii')))\n except UnicodeDecodeError:\n res.append(str(data[i].decode('utf-8')))\n return res\n\n\ndef _expect(expectations, got):\n \"\"\"Translate input error into string.\n\n Parameters\n ----------\n expectations: sequence\n a list of expected value.\n got:\n actual input\n\n Returns\n -------\n msg: str\n \"\"\"\n msg = 'Expecting '\n for t in range(len(expectations) - 1):\n msg += str(expectations[t])\n msg += ' or '\n msg += str(expectations[-1])\n msg += '. Got ' + str(got)\n return msg\n\n\ndef _log_callback(msg):\n \"\"\"Redirect logs from native library into Python console\"\"\"\n print(\"{0:s}\".format(py_str(msg)))\n\n\ndef _get_log_callback_func():\n \"\"\"Wrap log_callback() method in ctypes callback type\"\"\"\n # pylint: disable=invalid-name\n CALLBACK = ctypes.CFUNCTYPE(None, ctypes.c_char_p)\n return CALLBACK(_log_callback)\n\n\ndef _load_lib():\n \"\"\"Load xgboost Library.\"\"\"\n lib_paths = find_lib_path()\n if not lib_paths:\n return None\n try:\n pathBackup = os.environ['PATH'].split(os.pathsep)\n except KeyError:\n pathBackup = []\n lib_success = False\n os_error_list = []\n for lib_path in lib_paths:\n try:\n # needed when the lib is linked with non-system-available\n # dependencies\n os.environ['PATH'] = os.pathsep.join(\n pathBackup + [os.path.dirname(lib_path)])\n lib = ctypes.cdll.LoadLibrary(lib_path)\n lib_success = True\n except OSError as e:\n os_error_list.append(str(e))\n continue\n finally:\n os.environ['PATH'] = os.pathsep.join(pathBackup)\n if not lib_success:\n libname = os.path.basename(lib_paths[0])\n raise XGBoostError(\n 'XGBoost Library ({}) could not be loaded.\\n'.format(libname) +\n 'Likely causes:\\n' +\n ' * OpenMP runtime is not installed ' +\n '(vcomp140.dll or libgomp-1.dll for Windows, libomp.dylib for Mac OSX, ' +\n 'libgomp.so for Linux and other UNIX-like OSes). Mac OSX users: Run ' +\n '`brew install libomp` to install OpenMP runtime.\\n' +\n ' * You are running 32-bit Python on a 64-bit OS\\n' +\n 'Error message(s): {}\\n'.format(os_error_list))\n lib.XGBGetLastError.restype = ctypes.c_char_p\n lib.callback = _get_log_callback_func()\n if lib.XGBRegisterLogCallback(lib.callback) != 0:\n raise XGBoostError(lib.XGBGetLastError())\n return lib\n\n\n# load the XGBoost library globally\n_LIB = _load_lib()\n\n\ndef _check_call(ret):\n \"\"\"Check the return value of C API call\n\n This function will raise exception when error occurs.\n Wrap every API call with this function\n\n Parameters\n ----------\n ret : int\n return value from API calls\n \"\"\"\n if ret != 0:\n raise XGBoostError(py_str(_LIB.XGBGetLastError()))\n\n\ndef ctypes2numpy(cptr, length, dtype):\n \"\"\"Convert a ctypes pointer array to a numpy array.\"\"\"\n NUMPY_TO_CTYPES_MAPPING = {\n np.float32: ctypes.c_float,\n np.uint32: ctypes.c_uint,\n }\n if dtype not in NUMPY_TO_CTYPES_MAPPING:\n raise RuntimeError('Supported types: {}'.format(\n NUMPY_TO_CTYPES_MAPPING.keys()))\n ctype = NUMPY_TO_CTYPES_MAPPING[dtype]\n if not isinstance(cptr, ctypes.POINTER(ctype)):\n raise RuntimeError('expected {} pointer'.format(ctype))\n res = np.zeros(length, dtype=dtype)\n if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):\n raise RuntimeError('memmove failed')\n return res\n\n\ndef ctypes2cupy(cptr, length, dtype):\n \"\"\"Convert a ctypes pointer array to a cupy array.\"\"\"\n # pylint: disable=import-error\n import cupy\n from cupy.cuda.memory import MemoryPointer\n from cupy.cuda.memory import UnownedMemory\n CUPY_TO_CTYPES_MAPPING = {\n cupy.float32: ctypes.c_float,\n cupy.uint32: ctypes.c_uint\n }\n if dtype not in CUPY_TO_CTYPES_MAPPING.keys():\n raise RuntimeError('Supported types: {}'.format(\n CUPY_TO_CTYPES_MAPPING.keys()\n ))\n addr = ctypes.cast(cptr, ctypes.c_void_p).value\n # pylint: disable=c-extension-no-member,no-member\n device = cupy.cuda.runtime.pointerGetAttributes(addr).device\n # The owner field is just used to keep the memory alive with ref count. As\n # unowned's life time is scoped within this function we don't need that.\n unownd = UnownedMemory(\n addr, length.value * ctypes.sizeof(CUPY_TO_CTYPES_MAPPING[dtype]),\n owner=None)\n memptr = MemoryPointer(unownd, 0)\n # pylint: disable=unexpected-keyword-arg\n mem = cupy.ndarray((length.value, ), dtype=dtype, memptr=memptr)\n assert mem.device.id == device\n arr = cupy.array(mem, copy=True)\n return arr\n\n\ndef ctypes2buffer(cptr, length):\n \"\"\"Convert ctypes pointer to buffer type.\"\"\"\n if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):\n raise RuntimeError('expected char pointer')\n res = bytearray(length)\n rptr = (ctypes.c_char * length).from_buffer(res)\n if not ctypes.memmove(rptr, cptr, length):\n raise RuntimeError('memmove failed')\n return res\n\n\ndef c_str(string):\n \"\"\"Convert a python string to cstring.\"\"\"\n return ctypes.c_char_p(string.encode('utf-8'))\n\n\ndef c_array(ctype, values):\n \"\"\"Convert a python string to c array.\"\"\"\n if (isinstance(values, np.ndarray)\n and values.dtype.itemsize == ctypes.sizeof(ctype)):\n return (ctype * len(values)).from_buffer_copy(values)\n return (ctype * len(values))(*values)\n\n\ndef _convert_unknown_data(data, meta=None, meta_type=None):\n if meta is not None:\n try:\n data = np.array(data, dtype=meta_type)\n except Exception as e:\n raise TypeError('Can not handle data from {}'.format(\n type(data).__name__)) from e\n else:\n warnings.warn(\n 'Unknown data type: ' + str(type(data)) +\n ', coverting it to csr_matrix')\n try:\n data = scipy.sparse.csr_matrix(data)\n except Exception as e:\n raise TypeError('Can not initialize DMatrix from'\n ' {}'.format(type(data).__name__)) from e\n return data\n\n\nclass DataIter:\n '''The interface for user defined data iterator. Currently is only\n supported by Device DMatrix.\n\n Parameters\n ----------\n\n rows : int\n Total number of rows combining all batches.\n cols : int\n Number of columns for each batch.\n '''\n def __init__(self):\n proxy_handle = ctypes.c_void_p()\n _check_call(_LIB.XGProxyDMatrixCreate(ctypes.byref(proxy_handle)))\n self._handle = DeviceQuantileDMatrix(proxy_handle)\n self.exception = None\n\n @property\n def proxy(self):\n '''Handler of DMatrix proxy.'''\n return self._handle\n\n def reset_wrapper(self, this): # pylint: disable=unused-argument\n '''A wrapper for user defined `reset` function.'''\n self.reset()\n\n def next_wrapper(self, this): # pylint: disable=unused-argument\n '''A wrapper for user defined `next` function.\n\n `this` is not used in Python. ctypes can handle `self` of a Python\n member function automatically when converting it to c function\n pointer.\n\n '''\n if self.exception is not None:\n return 0\n\n def data_handle(data, label=None, weight=None, base_margin=None,\n group=None,\n label_lower_bound=None, label_upper_bound=None,\n feature_names=None, feature_types=None,\n feature_weights=None):\n from .data import dispatch_device_quantile_dmatrix_set_data\n from .data import _device_quantile_transform\n data, feature_names, feature_types = _device_quantile_transform(\n data, feature_names, feature_types\n )\n dispatch_device_quantile_dmatrix_set_data(self.proxy, data)\n self.proxy.set_info(label=label, weight=weight,\n base_margin=base_margin,\n group=group,\n label_lower_bound=label_lower_bound,\n label_upper_bound=label_upper_bound,\n feature_names=feature_names,\n feature_types=feature_types,\n feature_weights=feature_weights)\n try:\n # Differ the exception in order to return 0 and stop the iteration.\n # Exception inside a ctype callback function has no effect except\n # for printing to stderr (doesn't stop the execution).\n ret = self.next(data_handle) # pylint: disable=not-callable\n except Exception as e: # pylint: disable=broad-except\n tb = sys.exc_info()[2]\n # On dask the worker is restarted and somehow the information is\n # lost.\n self.exception = e.with_traceback(tb)\n return 0\n return ret\n\n def reset(self):\n '''Reset the data iterator. Prototype for user defined function.'''\n raise NotImplementedError()\n\n def next(self, input_data):\n '''Set the next batch of data.\n\n Parameters\n ----------\n\n data_handle: callable\n A function with same data fields like `data`, `label` with\n `xgboost.DMatrix`.\n\n Returns\n -------\n 0 if there's no more batch, otherwise 1.\n\n '''\n raise NotImplementedError()\n\n\nclass DMatrix: # pylint: disable=too-many-instance-attributes\n \"\"\"Data Matrix used in XGBoost.\n\n DMatrix is a internal data structure that used by XGBoost\n which is optimized for both memory efficiency and training speed.\n You can construct DMatrix from multiple different sources of data.\n \"\"\"\n\n def __init__(self, data, label=None, weight=None, base_margin=None,\n missing=None,\n silent=False,\n feature_names=None,\n feature_types=None,\n nthread=None,\n enable_categorical=False):\n \"\"\"Parameters\n ----------\n data : os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/\n dt.Frame/cudf.DataFrame/cupy.array/dlpack\n Data source of DMatrix.\n When data is string or os.PathLike type, it represents the path\n libsvm format txt file, csv file (by specifying uri parameter\n 'path_to_csv?format=csv'), or binary file that xgboost can read\n from.\n label : list, numpy 1-D array or cudf.DataFrame, optional\n Label of the training data.\n missing : float, optional\n Value in the input data which needs to be present as a missing\n value. If None, defaults to np.nan.\n weight : list, numpy 1-D array or cudf.DataFrame , optional\n Weight for each instance.\n\n .. note:: For ranking task, weights are per-group.\n\n In ranking task, one weight is assigned to each group (not each\n data point). This is because we only care about the relative\n ordering of data points within each group, so it doesn't make\n sense to assign weights to individual data points.\n\n silent : boolean, optional\n Whether print messages during construction\n feature_names : list, optional\n Set names for features.\n feature_types : list, optional\n Set types for features.\n nthread : integer, optional\n Number of threads to use for loading data when parallelization is\n applicable. If -1, uses maximum threads available on the system.\n\n enable_categorical: boolean, optional\n\n .. versionadded:: 1.3.0\n\n Experimental support of specializing for categorical features. Do\n not set to True unless you are interested in development.\n Currently it's only available for `gpu_hist` tree method with 1 vs\n rest (one hot) categorical split. Also, JSON serialization format,\n `gpu_predictor` and pandas input are required.\n\n \"\"\"\n if isinstance(data, list):\n raise TypeError('Input data can not be a list.')\n\n self.missing = missing if missing is not None else np.nan\n self.nthread = nthread if nthread is not None else -1\n self.silent = silent\n\n # force into void_p, mac need to pass things in as void_p\n if data is None:\n self.handle = None\n return\n\n from .data import dispatch_data_backend\n handle, feature_names, feature_types = dispatch_data_backend(\n data, missing=self.missing,\n threads=self.nthread,\n feature_names=feature_names,\n feature_types=feature_types,\n enable_categorical=enable_categorical)\n assert handle is not None\n self.handle = handle\n\n self.set_info(label=label, weight=weight, base_margin=base_margin)\n\n self.feature_names = feature_names\n self.feature_types = feature_types\n\n def __del__(self):\n if hasattr(self, \"handle\") and self.handle:\n _check_call(_LIB.XGDMatrixFree(self.handle))\n self.handle = None\n\n def set_info(self,\n label=None, weight=None, base_margin=None,\n group=None,\n label_lower_bound=None,\n label_upper_bound=None,\n feature_names=None,\n feature_types=None,\n feature_weights=None):\n '''Set meta info for DMatrix.'''\n if label is not None:\n self.set_label(label)\n if weight is not None:\n self.set_weight(weight)\n if base_margin is not None:\n self.set_base_margin(base_margin)\n if group is not None:\n self.set_group(group)\n if label_lower_bound is not None:\n self.set_float_info('label_lower_bound', label_lower_bound)\n if label_upper_bound is not None:\n self.set_float_info('label_upper_bound', label_upper_bound)\n if feature_names is not None:\n self.feature_names = feature_names\n if feature_types is not None:\n self.feature_types = feature_types\n if feature_weights is not None:\n from .data import dispatch_meta_backend\n dispatch_meta_backend(matrix=self, data=feature_weights,\n name='feature_weights')\n\n def get_float_info(self, field):\n \"\"\"Get float property from the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n Returns\n -------\n info : array\n a numpy array of float information of the data\n \"\"\"\n length = c_bst_ulong()\n ret = ctypes.POINTER(ctypes.c_float)()\n _check_call(_LIB.XGDMatrixGetFloatInfo(self.handle,\n c_str(field),\n ctypes.byref(length),\n ctypes.byref(ret)))\n return ctypes2numpy(ret, length.value, np.float32)\n\n def get_uint_info(self, field):\n \"\"\"Get unsigned integer property from the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n Returns\n -------\n info : array\n a numpy array of unsigned integer information of the data\n \"\"\"\n length = c_bst_ulong()\n ret = ctypes.POINTER(ctypes.c_uint)()\n _check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,\n c_str(field),\n ctypes.byref(length),\n ctypes.byref(ret)))\n return ctypes2numpy(ret, length.value, np.uint32)\n\n def set_float_info(self, field, data):\n \"\"\"Set float type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, data, field, 'float')\n\n def set_float_info_npy2d(self, field, data):\n \"\"\"Set float type property into the DMatrix\n for numpy 2d array input\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, data, field, 'float')\n\n def set_uint_info(self, field, data):\n \"\"\"Set uint type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, data, field, 'uint32')\n\n def save_binary(self, fname, silent=True):\n \"\"\"Save DMatrix to an XGBoost buffer. Saved binary can be later loaded\n by providing the path to :py:func:`xgboost.DMatrix` as input.\n\n Parameters\n ----------\n fname : string or os.PathLike\n Name of the output buffer file.\n silent : bool (optional; default: True)\n If set, the output is suppressed.\n \"\"\"\n _check_call(_LIB.XGDMatrixSaveBinary(self.handle,\n c_str(os.fspath(fname)),\n ctypes.c_int(silent)))\n\n def set_label(self, label):\n \"\"\"Set label of dmatrix\n\n Parameters\n ----------\n label: array like\n The label information to be set into DMatrix\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, label, 'label', 'float')\n\n def set_weight(self, weight):\n \"\"\"Set weight of each instance.\n\n Parameters\n ----------\n weight : array like\n Weight for each data point\n\n .. note:: For ranking task, weights are per-group.\n\n In ranking task, one weight is assigned to each group (not each\n data point). This is because we only care about the relative\n ordering of data points within each group, so it doesn't make\n sense to assign weights to individual data points.\n\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, weight, 'weight', 'float')\n\n def set_base_margin(self, margin):\n \"\"\"Set base margin of booster to start from.\n\n This can be used to specify a prediction value of existing model to be\n base_margin However, remember margin is needed, instead of transformed\n prediction e.g. for logistic regression: need to put in value before\n logistic transformation see also example/demo.py\n\n Parameters\n ----------\n margin: array like\n Prediction margin of each datapoint\n\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, margin, 'base_margin', 'float')\n\n def set_group(self, group):\n \"\"\"Set group size of DMatrix (used for ranking).\n\n Parameters\n ----------\n group : array like\n Group size of each group\n \"\"\"\n from .data import dispatch_meta_backend\n dispatch_meta_backend(self, group, 'group', 'uint32')\n\n def get_label(self):\n \"\"\"Get the label of the DMatrix.\n\n Returns\n -------\n label : array\n \"\"\"\n return self.get_float_info('label')\n\n def get_weight(self):\n \"\"\"Get the weight of the DMatrix.\n\n Returns\n -------\n weight : array\n \"\"\"\n return self.get_float_info('weight')\n\n def get_base_margin(self):\n \"\"\"Get the base margin of the DMatrix.\n\n Returns\n -------\n base_margin : float\n \"\"\"\n return self.get_float_info('base_margin')\n\n def num_row(self):\n \"\"\"Get the number of rows in the DMatrix.\n\n Returns\n -------\n number of rows : int\n \"\"\"\n ret = c_bst_ulong()\n _check_call(_LIB.XGDMatrixNumRow(self.handle,\n ctypes.byref(ret)))\n return ret.value\n\n def num_col(self):\n \"\"\"Get the number of columns (features) in the DMatrix.\n\n Returns\n -------\n number of columns : int\n \"\"\"\n ret = c_bst_ulong()\n _check_call(_LIB.XGDMatrixNumCol(self.handle,\n ctypes.byref(ret)))\n return ret.value\n\n def slice(self, rindex, allow_groups=False):\n \"\"\"Slice the DMatrix and return a new DMatrix that only contains `rindex`.\n\n Parameters\n ----------\n rindex : list\n List of indices to be selected.\n allow_groups : boolean\n Allow slicing of a matrix with a groups attribute\n\n Returns\n -------\n res : DMatrix\n A new DMatrix containing only selected indices.\n \"\"\"\n res = DMatrix(None)\n res.handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixSliceDMatrixEx(\n self.handle,\n c_array(ctypes.c_int, rindex),\n c_bst_ulong(len(rindex)),\n ctypes.byref(res.handle),\n ctypes.c_int(1 if allow_groups else 0)))\n res.feature_names = self.feature_names\n res.feature_types = self.feature_types\n return res\n\n @property\n def feature_names(self):\n \"\"\"Get feature names (column labels).\n\n Returns\n -------\n feature_names : list or None\n \"\"\"\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n _check_call(_LIB.XGDMatrixGetStrFeatureInfo(self.handle,\n c_str('feature_name'),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n feature_names = from_cstr_to_pystr(sarr, length)\n if not feature_names:\n feature_names = ['f{0}'.format(i)\n for i in range(self.num_col())]\n return feature_names\n\n @feature_names.setter\n def feature_names(self, feature_names):\n \"\"\"Set feature names (column labels).\n\n Parameters\n ----------\n feature_names : list or None\n Labels for features. None will reset existing feature names\n \"\"\"\n if feature_names is not None:\n # validate feature name\n try:\n if not isinstance(feature_names, str):\n feature_names = list(feature_names)\n else:\n feature_names = [feature_names]\n except TypeError:\n feature_names = [feature_names]\n\n if len(feature_names) != len(set(feature_names)):\n raise ValueError('feature_names must be unique')\n if len(feature_names) != self.num_col() and self.num_col() != 0:\n msg = 'feature_names must have the same length as data'\n raise ValueError(msg)\n # prohibit to use symbols may affect to parse. e.g. []<\n if not all(isinstance(f, STRING_TYPES) and\n not any(x in f for x in set(('[', ']', '<')))\n for f in feature_names):\n raise ValueError('feature_names must be string, and may not contain [, ] or <')\n c_feature_names = [bytes(f, encoding='utf-8')\n for f in feature_names]\n c_feature_names = (ctypes.c_char_p *\n len(c_feature_names))(*c_feature_names)\n _check_call(_LIB.XGDMatrixSetStrFeatureInfo(\n self.handle, c_str('feature_name'),\n c_feature_names,\n c_bst_ulong(len(feature_names))))\n else:\n # reset feature_types also\n _check_call(_LIB.XGDMatrixSetStrFeatureInfo(\n self.handle,\n c_str('feature_name'),\n None,\n c_bst_ulong(0)))\n self.feature_types = None\n\n @property\n def feature_types(self):\n \"\"\"Get feature types (column types).\n\n Returns\n -------\n feature_types : list or None\n \"\"\"\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n _check_call(_LIB.XGDMatrixGetStrFeatureInfo(self.handle,\n c_str('feature_type'),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n res = from_cstr_to_pystr(sarr, length)\n if not res:\n return None\n return res\n\n @feature_types.setter\n def feature_types(self, feature_types):\n \"\"\"Set feature types (column types).\n\n This is for displaying the results and unrelated\n to the learning process.\n\n Parameters\n ----------\n feature_types : list or None\n Labels for features. None will reset existing feature names\n \"\"\"\n if feature_types is not None:\n if not isinstance(feature_types, (list, str)):\n raise TypeError(\n 'feature_types must be string or list of strings')\n if isinstance(feature_types, STRING_TYPES):\n # single string will be applied to all columns\n feature_types = [feature_types] * self.num_col()\n try:\n if not isinstance(feature_types, str):\n feature_types = list(feature_types)\n else:\n feature_types = [feature_types]\n except TypeError:\n feature_types = [feature_types]\n c_feature_types = [bytes(f, encoding='utf-8')\n for f in feature_types]\n c_feature_types = (ctypes.c_char_p *\n len(c_feature_types))(*c_feature_types)\n _check_call(_LIB.XGDMatrixSetStrFeatureInfo(\n self.handle, c_str('feature_type'),\n c_feature_types,\n c_bst_ulong(len(feature_types))))\n\n if len(feature_types) != self.num_col():\n msg = 'feature_types must have the same length as data'\n raise ValueError(msg)\n else:\n # Reset.\n _check_call(_LIB.XGDMatrixSetStrFeatureInfo(\n self.handle,\n c_str('feature_type'),\n None,\n c_bst_ulong(0)))\n\n\nclass DeviceQuantileDMatrix(DMatrix):\n \"\"\"Device memory Data Matrix used in XGBoost for training with\n tree_method='gpu_hist'. Do not use this for test/validation tasks as some\n information may be lost in quantisation. This DMatrix is primarily designed\n to save memory in training from device memory inputs by avoiding\n intermediate storage. Set max_bin to control the number of bins during\n quantisation.\n\n You can construct DeviceQuantileDMatrix from cupy/cudf/dlpack.\n\n .. versionadded:: 1.1.0\n \"\"\"\n\n def __init__(self, data, label=None, weight=None, # pylint: disable=W0231\n base_margin=None,\n missing=None,\n silent=False,\n feature_names=None,\n feature_types=None,\n nthread=None, max_bin=256):\n self.max_bin = max_bin\n self.missing = missing if missing is not None else np.nan\n self.nthread = nthread if nthread is not None else 1\n\n if isinstance(data, ctypes.c_void_p):\n self.handle = data\n return\n from .data import init_device_quantile_dmatrix\n handle, feature_names, feature_types = init_device_quantile_dmatrix(\n data, missing=self.missing, threads=self.nthread,\n max_bin=self.max_bin,\n label=label, weight=weight,\n base_margin=base_margin,\n group=None,\n label_lower_bound=None,\n label_upper_bound=None,\n feature_names=feature_names,\n feature_types=feature_types)\n self.handle = handle\n\n self.feature_names = feature_names\n self.feature_types = feature_types\n\n def _set_data_from_cuda_interface(self, data):\n '''Set data from CUDA array interface.'''\n interface = data.__cuda_array_interface__\n interface_str = bytes(json.dumps(interface, indent=2), 'utf-8')\n _check_call(\n _LIB.XGDeviceQuantileDMatrixSetDataCudaArrayInterface(\n self.handle,\n interface_str\n )\n )\n\n def _set_data_from_cuda_columnar(self, data):\n '''Set data from CUDA columnar format.1'''\n from .data import _cudf_array_interfaces\n interfaces_str = _cudf_array_interfaces(data)\n _check_call(\n _LIB.XGDeviceQuantileDMatrixSetDataCudaColumnar(\n self.handle,\n interfaces_str\n )\n )\n\n\nclass Booster(object):\n # pylint: disable=too-many-public-methods\n \"\"\"A Booster of XGBoost.\n\n Booster is the model of xgboost, that contains low level routines for\n training, prediction and evaluation.\n \"\"\"\n\n feature_names = None\n\n def __init__(self, params=None, cache=(), model_file=None):\n # pylint: disable=invalid-name\n \"\"\"\n Parameters\n ----------\n params : dict\n Parameters for boosters.\n cache : list\n List of cache items.\n model_file : string/os.PathLike/Booster/bytearray\n Path to the model file if it's string or PathLike.\n \"\"\"\n for d in cache:\n if not isinstance(d, DMatrix):\n raise TypeError('invalid cache item: {}'.format(type(d).__name__), cache)\n self._validate_features(d)\n\n dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])\n self.handle = ctypes.c_void_p()\n _check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(len(cache)),\n ctypes.byref(self.handle)))\n params = params or {}\n if isinstance(params, list):\n params.append(('validate_parameters', True))\n else:\n params['validate_parameters'] = True\n\n self.set_param(params or {})\n if (params is not None) and ('booster' in params):\n self.booster = params['booster']\n else:\n self.booster = 'gbtree'\n if isinstance(model_file, Booster):\n assert self.handle is not None\n # We use the pickle interface for getting memory snapshot from\n # another model, and load the snapshot with this booster.\n state = model_file.__getstate__()\n handle = state['handle']\n del state['handle']\n ptr = (ctypes.c_char * len(handle)).from_buffer(handle)\n length = c_bst_ulong(len(handle))\n _check_call(\n _LIB.XGBoosterUnserializeFromBuffer(self.handle, ptr, length))\n self.__dict__.update(state)\n elif isinstance(model_file, (STRING_TYPES, os.PathLike, bytearray)):\n self.load_model(model_file)\n elif model_file is None:\n pass\n else:\n raise TypeError('Unknown type:', model_file)\n\n def __del__(self):\n if hasattr(self, 'handle') and self.handle is not None:\n _check_call(_LIB.XGBoosterFree(self.handle))\n self.handle = None\n\n def __getstate__(self):\n # can't pickle ctypes pointers, put model content in bytearray\n this = self.__dict__.copy()\n handle = this['handle']\n if handle is not None:\n length = c_bst_ulong()\n cptr = ctypes.POINTER(ctypes.c_char)()\n _check_call(_LIB.XGBoosterSerializeToBuffer(self.handle,\n ctypes.byref(length),\n ctypes.byref(cptr)))\n buf = ctypes2buffer(cptr, length.value)\n this[\"handle\"] = buf\n return this\n\n def __setstate__(self, state):\n # reconstruct handle from raw data\n handle = state['handle']\n if handle is not None:\n buf = handle\n dmats = c_array(ctypes.c_void_p, [])\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGBoosterCreate(\n dmats, c_bst_ulong(0), ctypes.byref(handle)))\n length = c_bst_ulong(len(buf))\n ptr = (ctypes.c_char * len(buf)).from_buffer(buf)\n _check_call(\n _LIB.XGBoosterUnserializeFromBuffer(handle, ptr, length))\n state['handle'] = handle\n self.__dict__.update(state)\n\n def __getitem__(self, val):\n if isinstance(val, int):\n val = slice(val, val+1)\n if isinstance(val, tuple):\n raise ValueError('Only supports slicing through 1 dimension.')\n if not isinstance(val, slice):\n msg = _expect((int, slice), type(val))\n raise TypeError(msg)\n if isinstance(val.start, type(Ellipsis)) or val.start is None:\n start = 0\n else:\n start = val.start\n if isinstance(val.stop, type(Ellipsis)) or val.stop is None:\n stop = 0\n else:\n stop = val.stop\n if stop < start:\n raise ValueError('Invalid slice', val)\n\n step = val.step if val.step is not None else 1\n\n start = ctypes.c_int(start)\n stop = ctypes.c_int(stop)\n step = ctypes.c_int(step)\n\n sliced_handle = ctypes.c_void_p()\n status = _LIB.XGBoosterSlice(self.handle, start, stop, step,\n ctypes.byref(sliced_handle))\n if status == -2:\n raise IndexError('Layer index out of range')\n _check_call(status)\n\n sliced = Booster()\n _check_call(_LIB.XGBoosterFree(sliced.handle))\n sliced.handle = sliced_handle\n return sliced\n\n def save_config(self):\n '''Output internal parameter configuration of Booster as a JSON\n string.\n\n .. versionadded:: 1.0.0\n '''\n json_string = ctypes.c_char_p()\n length = c_bst_ulong()\n _check_call(_LIB.XGBoosterSaveJsonConfig(\n self.handle,\n ctypes.byref(length),\n ctypes.byref(json_string)))\n json_string = json_string.value.decode()\n return json_string\n\n def load_config(self, config):\n '''Load configuration returned by `save_config`.\n\n .. versionadded:: 1.0.0\n '''\n assert isinstance(config, str)\n _check_call(_LIB.XGBoosterLoadJsonConfig(\n self.handle,\n c_str(config)))\n\n def __copy__(self):\n return self.__deepcopy__(None)\n\n def __deepcopy__(self, _):\n '''Return a copy of booster.'''\n return Booster(model_file=self)\n\n def copy(self):\n \"\"\"Copy the booster object.\n\n Returns\n -------\n booster: `Booster`\n a copied booster model\n \"\"\"\n return self.__copy__()\n\n def load_rabit_checkpoint(self):\n \"\"\"Initialize the model by load from rabit checkpoint.\n\n Returns\n -------\n version: integer\n The version number of the model.\n \"\"\"\n version = ctypes.c_int()\n _check_call(_LIB.XGBoosterLoadRabitCheckpoint(\n self.handle, ctypes.byref(version)))\n return version.value\n\n def save_rabit_checkpoint(self):\n \"\"\"Save the current booster to rabit checkpoint.\"\"\"\n _check_call(_LIB.XGBoosterSaveRabitCheckpoint(self.handle))\n\n def attr(self, key):\n \"\"\"Get attribute string from the Booster.\n\n Parameters\n ----------\n key : str\n The key to get attribute from.\n\n Returns\n -------\n value : str\n The attribute value of the key, returns None if attribute do not exist.\n \"\"\"\n ret = ctypes.c_char_p()\n success = ctypes.c_int()\n _check_call(_LIB.XGBoosterGetAttr(\n self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))\n if success.value != 0:\n return py_str(ret.value)\n return None\n\n def attributes(self):\n \"\"\"Get attributes stored in the Booster as a dictionary.\n\n Returns\n -------\n result : dictionary of attribute_name: attribute_value pairs of strings.\n Returns an empty dict if there's no attributes.\n \"\"\"\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n _check_call(_LIB.XGBoosterGetAttrNames(self.handle,\n ctypes.byref(length),\n ctypes.byref(sarr)))\n attr_names = from_cstr_to_pystr(sarr, length)\n return {n: self.attr(n) for n in attr_names}\n\n def set_attr(self, **kwargs):\n \"\"\"Set the attribute of the Booster.\n\n Parameters\n ----------\n **kwargs\n The attributes to set. Setting a value to None deletes an attribute.\n \"\"\"\n for key, value in kwargs.items():\n if value is not None:\n if not isinstance(value, STRING_TYPES):\n raise ValueError(\"Set Attr only accepts string values\")\n value = c_str(str(value))\n _check_call(_LIB.XGBoosterSetAttr(\n self.handle, c_str(key), value))\n\n def set_param(self, params, value=None):\n \"\"\"Set parameters into the Booster.\n\n Parameters\n ----------\n params: dict/list/str\n list of key,value pairs, dict of key to value or simply str key\n value: optional\n value of the specified parameter, when params is str key\n \"\"\"\n if isinstance(params, Mapping):\n params = params.items()\n elif isinstance(params, STRING_TYPES) and value is not None:\n params = [(params, value)]\n for key, val in params:\n if val is not None:\n _check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key),\n c_str(str(val))))\n\n def update(self, dtrain, iteration, fobj=None):\n \"\"\"Update for one iteration, with objective function calculated\n internally. This function should not be called directly by users.\n\n Parameters\n ----------\n dtrain : DMatrix\n Training data.\n iteration : int\n Current iteration number.\n fobj : function\n Customized objective function.\n\n \"\"\"\n if not isinstance(dtrain, DMatrix):\n raise TypeError('invalid training matrix: {}'.format(\n type(dtrain).__name__))\n self._validate_features(dtrain)\n\n if fobj is None:\n _check_call(_LIB.XGBoosterUpdateOneIter(self.handle,\n ctypes.c_int(iteration),\n dtrain.handle))\n else:\n pred = self.predict(dtrain, output_margin=True, training=True)\n grad, hess = fobj(pred, dtrain)\n self.boost(dtrain, grad, hess)\n\n def boost(self, dtrain, grad, hess):\n \"\"\"Boost the booster for one iteration, with customized gradient\n statistics. Like :func:`xgboost.core.Booster.update`, this\n function should not be called directly by users.\n\n Parameters\n ----------\n dtrain : DMatrix\n The training DMatrix.\n grad : list\n The first order of gradient.\n hess : list\n The second order of gradient.\n\n \"\"\"\n if len(grad) != len(hess):\n raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))\n if not isinstance(dtrain, DMatrix):\n raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))\n self._validate_features(dtrain)\n\n _check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,\n c_array(ctypes.c_float, grad),\n c_array(ctypes.c_float, hess),\n c_bst_ulong(len(grad))))\n\n def eval_set(self, evals, iteration=0, feval=None):\n # pylint: disable=invalid-name\n \"\"\"Evaluate a set of data.\n\n Parameters\n ----------\n evals : list of tuples (DMatrix, string)\n List of items to be evaluated.\n iteration : int\n Current iteration.\n feval : function\n Custom evaluation function.\n\n Returns\n -------\n result: str\n Evaluation result string.\n \"\"\"\n for d in evals:\n if not isinstance(d[0], DMatrix):\n raise TypeError('expected DMatrix, got {}'.format(\n type(d[0]).__name__))\n if not isinstance(d[1], STRING_TYPES):\n raise TypeError('expected string, got {}'.format(\n type(d[1]).__name__))\n self._validate_features(d[0])\n\n dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])\n evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])\n msg = ctypes.c_char_p()\n _check_call(_LIB.XGBoosterEvalOneIter(self.handle,\n ctypes.c_int(iteration),\n dmats, evnames,\n c_bst_ulong(len(evals)),\n ctypes.byref(msg)))\n res = msg.value.decode()\n if feval is not None:\n for dmat, evname in evals:\n feval_ret = feval(self.predict(dmat, training=False,\n output_margin=True), dmat)\n if isinstance(feval_ret, list):\n for name, val in feval_ret:\n res += '\\t%s-%s:%f' % (evname, name, val)\n else:\n name, val = feval_ret\n res += '\\t%s-%s:%f' % (evname, name, val)\n return res\n\n def eval(self, data, name='eval', iteration=0):\n \"\"\"Evaluate the model on mat.\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n name : str, optional\n The name of the dataset.\n\n iteration : int, optional\n The current iteration number.\n\n Returns\n -------\n result: str\n Evaluation result string.\n \"\"\"\n self._validate_features(data)\n return self.eval_set([(data, name)], iteration)\n\n # pylint: disable=too-many-function-args\n def predict(self,\n data,\n output_margin=False,\n ntree_limit=0,\n pred_leaf=False,\n pred_contribs=False,\n approx_contribs=False,\n pred_interactions=False,\n validate_features=True,\n training=False):\n \"\"\"Predict with data.\n\n .. note:: This function is not thread safe except for ``gbtree``\n booster.\n\n For ``gbtree`` booster, the thread safety is guaranteed by locks.\n For lock free prediction use ``inplace_predict`` instead. Also, the\n safety does not hold when used in conjunction with other methods.\n\n When using booster other than ``gbtree``, predict can only be called\n from one thread. If you want to run prediction using multiple\n thread, call ``bst.copy()`` to make copies of model object and then\n call ``predict()``.\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n output_margin : bool\n Whether to output the raw untransformed margin value.\n\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all\n trees).\n\n pred_leaf : bool\n When this option is on, the output will be a matrix of (nsample,\n ntrees) with each record indicating the predicted leaf index of\n each sample in each tree. Note that the leaf index of a tree is\n unique per tree, so you may find leaf 1 in both tree 1 and tree 0.\n\n pred_contribs : bool\n When this is True the output will be a matrix of size (nsample,\n nfeats + 1) with each record indicating the feature contributions\n (SHAP values) for that prediction. The sum of all feature\n contributions is equal to the raw untransformed margin value of the\n prediction. Note the final column is the bias term.\n\n approx_contribs : bool\n Approximate the contributions of each feature\n\n pred_interactions : bool\n When this is True the output will be a matrix of size (nsample,\n nfeats + 1, nfeats + 1) indicating the SHAP interaction values for\n each pair of features. The sum of each row (or column) of the\n interaction values equals the corresponding SHAP value (from\n pred_contribs), and the sum of the entire matrix equals the raw\n untransformed margin value of the prediction. Note the last row and\n column correspond to the bias term.\n\n validate_features : bool\n When this is True, validate that the Booster's and data's\n feature_names are identical. Otherwise, it is assumed that the\n feature_names are the same.\n\n training : bool\n Whether the prediction value is used for training. This can effect\n `dart` booster, which performs dropouts during training iterations.\n\n .. versionadded:: 1.0.0\n\n .. note:: Using ``predict()`` with DART booster\n\n If the booster object is DART type, ``predict()`` will not perform\n dropouts, i.e. all the trees will be evaluated. If you want to\n obtain result with dropouts, provide `training=True`.\n\n Returns\n -------\n prediction : numpy array\n\n \"\"\"\n option_mask = 0x00\n if output_margin:\n option_mask |= 0x01\n if pred_leaf:\n option_mask |= 0x02\n if pred_contribs:\n option_mask |= 0x04\n if approx_contribs:\n option_mask |= 0x08\n if pred_interactions:\n option_mask |= 0x10\n\n if not isinstance(data, DMatrix):\n raise TypeError('Expecting data to be a DMatrix object, got: ',\n type(data))\n\n if validate_features:\n self._validate_features(data)\n\n length = c_bst_ulong()\n preds = ctypes.POINTER(ctypes.c_float)()\n _check_call(_LIB.XGBoosterPredict(self.handle, data.handle,\n ctypes.c_int(option_mask),\n ctypes.c_uint(ntree_limit),\n ctypes.c_int(training),\n ctypes.byref(length),\n ctypes.byref(preds)))\n preds = ctypes2numpy(preds, length.value, np.float32)\n if pred_leaf:\n preds = preds.astype(np.int32)\n nrow = data.num_row()\n if preds.size != nrow and preds.size % nrow == 0:\n chunk_size = int(preds.size / nrow)\n\n if pred_interactions:\n ngroup = int(chunk_size / ((data.num_col() + 1) *\n (data.num_col() + 1)))\n if ngroup == 1:\n preds = preds.reshape(nrow,\n data.num_col() + 1,\n data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, ngroup,\n data.num_col() + 1,\n data.num_col() + 1)\n elif pred_contribs:\n ngroup = int(chunk_size / (data.num_col() + 1))\n if ngroup == 1:\n preds = preds.reshape(nrow, data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, ngroup, data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, chunk_size)\n return preds\n\n def inplace_predict(self, data, iteration_range=(0, 0),\n predict_type='value', missing=np.nan):\n '''Run prediction in-place, Unlike ``predict`` method, inplace prediction does\n not cache the prediction result.\n\n Calling only ``inplace_predict`` in multiple threads is safe and lock\n free. But the safety does not hold when used in conjunction with other\n methods. E.g. you can't train the booster in one thread and perform\n prediction in the other.\n\n .. code-block:: python\n\n booster.set_param({'predictor': 'gpu_predictor'})\n booster.inplace_predict(cupy_array)\n\n booster.set_param({'predictor': 'cpu_predictor})\n booster.inplace_predict(numpy_array)\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n data : numpy.ndarray/scipy.sparse.csr_matrix/cupy.ndarray/\n cudf.DataFrame/pd.DataFrame\n The input data, must not be a view for numpy array. Set\n ``predictor`` to ``gpu_predictor`` for running prediction on CuPy\n array or CuDF DataFrame.\n iteration_range : tuple\n Specifies which layer of trees are used in prediction. For\n example, if a random forest is trained with 100 rounds. Specifying\n `iteration_range=(10, 20)`, then only the forests built during [10,\n 20) (open set) rounds are used in this prediction.\n predict_type : str\n * `value` Output model prediction values.\n * `margin` Output the raw untransformed margin value.\n missing : float\n Value in the input data which needs to be present as a missing\n value.\n\n Returns\n -------\n prediction : numpy.ndarray/cupy.ndarray\n The prediction result. When input data is on GPU, prediction\n result is stored in a cupy array.\n\n '''\n\n def reshape_output(predt, rows):\n '''Reshape for multi-output prediction.'''\n if predt.size != rows and predt.size % rows == 0:\n cols = int(predt.size / rows)\n predt = predt.reshape(rows, cols)\n return predt\n return predt\n\n length = c_bst_ulong()\n preds = ctypes.POINTER(ctypes.c_float)()\n iteration_range = (ctypes.c_uint(iteration_range[0]),\n ctypes.c_uint(iteration_range[1]))\n\n # once caching is supported, we can pass id(data) as cache id.\n try:\n import pandas as pd\n if isinstance(data, pd.DataFrame):\n data = data.values\n except ImportError:\n pass\n if isinstance(data, np.ndarray):\n assert data.flags.c_contiguous\n arr = np.array(data.reshape(data.size), copy=False,\n dtype=np.float32)\n _check_call(_LIB.XGBoosterPredictFromDense(\n self.handle,\n arr.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n c_bst_ulong(data.shape[0]),\n c_bst_ulong(data.shape[1]),\n ctypes.c_float(missing),\n iteration_range[0],\n iteration_range[1],\n c_str(predict_type),\n c_bst_ulong(0),\n ctypes.byref(length),\n ctypes.byref(preds)\n ))\n preds = ctypes2numpy(preds, length.value, np.float32)\n rows = data.shape[0]\n return reshape_output(preds, rows)\n if isinstance(data, scipy.sparse.csr_matrix):\n csr = data\n _check_call(_LIB.XGBoosterPredictFromCSR(\n self.handle,\n c_array(ctypes.c_size_t, csr.indptr),\n c_array(ctypes.c_uint, csr.indices),\n c_array(ctypes.c_float, csr.data),\n ctypes.c_size_t(len(csr.indptr)),\n ctypes.c_size_t(len(csr.data)),\n ctypes.c_size_t(csr.shape[1]),\n ctypes.c_float(missing),\n iteration_range[0],\n iteration_range[1],\n c_str(predict_type),\n c_bst_ulong(0),\n ctypes.byref(length),\n ctypes.byref(preds)))\n preds = ctypes2numpy(preds, length.value, np.float32)\n rows = data.shape[0]\n return reshape_output(preds, rows)\n if lazy_isinstance(data, 'cupy.core.core', 'ndarray'):\n assert data.flags.c_contiguous\n interface = data.__cuda_array_interface__\n if 'mask' in interface:\n interface['mask'] = interface['mask'].__cuda_array_interface__\n interface_str = bytes(json.dumps(interface, indent=2), 'utf-8')\n _check_call(_LIB.XGBoosterPredictFromArrayInterface(\n self.handle,\n interface_str,\n ctypes.c_float(missing),\n iteration_range[0],\n iteration_range[1],\n c_str(predict_type),\n c_bst_ulong(0),\n ctypes.byref(length),\n ctypes.byref(preds)))\n mem = ctypes2cupy(preds, length, np.float32)\n rows = data.shape[0]\n return reshape_output(mem, rows)\n if lazy_isinstance(data, 'cudf.core.dataframe', 'DataFrame'):\n from .data import _cudf_array_interfaces\n interfaces_str = _cudf_array_interfaces(data)\n _check_call(_LIB.XGBoosterPredictFromArrayInterfaceColumns(\n self.handle,\n interfaces_str,\n ctypes.c_float(missing),\n iteration_range[0],\n iteration_range[1],\n c_str(predict_type),\n c_bst_ulong(0),\n ctypes.byref(length),\n ctypes.byref(preds)))\n mem = ctypes2cupy(preds, length, np.float32)\n rows = data.shape[0]\n predt = reshape_output(mem, rows)\n return predt\n\n raise TypeError('Data type:' + str(type(data)) +\n ' not supported by inplace prediction.')\n\n def save_model(self, fname):\n \"\"\"Save the model to a file.\n\n The model is saved in an XGBoost internal format which is universal\n among the various XGBoost interfaces. Auxiliary attributes of the\n Python Booster object (such as feature_names) will not be saved. See:\n\n https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html\n\n for more info.\n\n Parameters\n ----------\n fname : string or os.PathLike\n Output file name\n\n \"\"\"\n if isinstance(fname, (STRING_TYPES, os.PathLike)): # assume file name\n _check_call(_LIB.XGBoosterSaveModel(\n self.handle, c_str(os.fspath(fname))))\n else:\n raise TypeError(\"fname must be a string or os PathLike\")\n\n def save_raw(self):\n \"\"\"Save the model to a in memory buffer representation instead of file.\n\n Returns\n -------\n a in memory buffer representation of the model\n \"\"\"\n length = c_bst_ulong()\n cptr = ctypes.POINTER(ctypes.c_char)()\n _check_call(_LIB.XGBoosterGetModelRaw(self.handle,\n ctypes.byref(length),\n ctypes.byref(cptr)))\n return ctypes2buffer(cptr, length.value)\n\n def load_model(self, fname):\n \"\"\"Load the model from a file or bytearray. Path to file can be local\n or as an URI.\n\n The model is loaded from an XGBoost format which is universal among the\n various XGBoost interfaces. Auxiliary attributes of the Python Booster\n object (such as feature_names) will not be loaded. See:\n\n https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html\n\n for more info.\n\n Parameters\n ----------\n fname : string, os.PathLike, or a memory buffer\n Input file name or memory buffer(see also save_raw)\n\n \"\"\"\n if isinstance(fname, (STRING_TYPES, os.PathLike)):\n # assume file name, cannot use os.path.exist to check, file can be\n # from URL.\n _check_call(_LIB.XGBoosterLoadModel(\n self.handle, c_str(os.fspath(fname))))\n elif isinstance(fname, bytearray):\n buf = fname\n length = c_bst_ulong(len(buf))\n ptr = (ctypes.c_char * len(buf)).from_buffer(buf)\n _check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr,\n length))\n else:\n raise TypeError('Unknown file type: ', fname)\n\n def dump_model(self, fout, fmap='', with_stats=False, dump_format=\"text\"):\n \"\"\"Dump model into a text or JSON file. Unlike `save_model`, the\n output format is primarily used for visualization or interpretation,\n hence it's more human readable but cannot be loaded back to XGBoost.\n\n Parameters\n ----------\n fout : string or os.PathLike\n Output file name.\n fmap : string or os.PathLike, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump file. Can be 'text' or 'json'.\n \"\"\"\n if isinstance(fout, (STRING_TYPES, os.PathLike)):\n fout = open(os.fspath(fout), 'w')\n need_close = True\n else:\n need_close = False\n ret = self.get_dump(fmap, with_stats, dump_format)\n if dump_format == 'json':\n fout.write('[\\n')\n for i, _ in enumerate(ret):\n fout.write(ret[i])\n if i < len(ret) - 1:\n fout.write(\",\\n\")\n fout.write('\\n]')\n else:\n for i, _ in enumerate(ret):\n fout.write('booster[{}]:\\n'.format(i))\n fout.write(ret[i])\n if need_close:\n fout.close()\n\n def get_dump(self, fmap='', with_stats=False, dump_format=\"text\"):\n \"\"\"Returns the model dump as a list of strings. Unlike `save_model`, the\n output format is primarily used for visualization or interpretation,\n hence it's more human readable but cannot be loaded back to XGBoost.\n\n Parameters\n ----------\n fmap : string or os.PathLike, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump. Can be 'text', 'json' or 'dot'.\n\n \"\"\"\n fmap = os.fspath(fmap)\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n if self.feature_names is not None and fmap == '':\n flen = len(self.feature_names)\n\n fname = from_pystr_to_cstr(self.feature_names)\n\n if self.feature_types is None:\n # use quantitative as default\n # {'q': quantitative, 'i': indicator}\n ftype = from_pystr_to_cstr(['q'] * flen)\n else:\n ftype = from_pystr_to_cstr(self.feature_types)\n _check_call(_LIB.XGBoosterDumpModelExWithFeatures(\n self.handle,\n ctypes.c_int(flen),\n fname,\n ftype,\n ctypes.c_int(with_stats),\n c_str(dump_format),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n else:\n if fmap != '' and not os.path.exists(fmap):\n raise ValueError(\"No such file: {0}\".format(fmap))\n _check_call(_LIB.XGBoosterDumpModelEx(self.handle,\n c_str(fmap),\n ctypes.c_int(with_stats),\n c_str(dump_format),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n res = from_cstr_to_pystr(sarr, length)\n return res\n\n def get_fscore(self, fmap=''):\n \"\"\"Get feature importance of each feature.\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n .. note:: Zero-importance features will not be included\n\n Keep in mind that this function does not include zero-importance feature, i.e.\n those features that have not been used in any split conditions.\n\n Parameters\n ----------\n fmap: str or os.PathLike (optional)\n The name of feature map file\n \"\"\"\n\n return self.get_score(fmap, importance_type='weight')\n\n def get_score(self, fmap='', importance_type='weight'):\n \"\"\"Get feature importance of each feature.\n Importance type can be defined as:\n\n * 'weight': the number of times a feature is used to split the data across all trees.\n * 'gain': the average gain across all splits the feature is used in.\n * 'cover': the average coverage across all splits the feature is used in.\n * 'total_gain': the total gain across all splits the feature is used in.\n * 'total_cover': the total coverage across all splits the feature is used in.\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str or os.PathLike (optional)\n The name of feature map file.\n importance_type: str, default 'weight'\n One of the importance types defined above.\n \"\"\"\n fmap = os.fspath(fmap)\n if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:\n raise ValueError('Feature importance is not defined for Booster type {}'\n .format(self.booster))\n\n allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']\n if importance_type not in allowed_importance_types:\n msg = (\"importance_type mismatch, got '{}', expected one of \" +\n repr(allowed_importance_types))\n raise ValueError(msg.format(importance_type))\n\n # if it's weight, then omap stores the number of missing values\n if importance_type == 'weight':\n # do a simpler tree dump to save time\n trees = self.get_dump(fmap, with_stats=False)\n fmap = {}\n for tree in trees:\n for line in tree.split('\\n'):\n # look for the opening square bracket\n arr = line.split('[')\n # if no opening bracket (leaf node), ignore this line\n if len(arr) == 1:\n continue\n\n # extract feature name from string between []\n fid = arr[1].split(']')[0].split('<')[0]\n\n if fid not in fmap:\n # if the feature hasn't been seen yet\n fmap[fid] = 1\n else:\n fmap[fid] += 1\n\n return fmap\n\n average_over_splits = True\n if importance_type == 'total_gain':\n importance_type = 'gain'\n average_over_splits = False\n elif importance_type == 'total_cover':\n importance_type = 'cover'\n average_over_splits = False\n\n trees = self.get_dump(fmap, with_stats=True)\n\n importance_type += '='\n fmap = {}\n gmap = {}\n for tree in trees:\n for line in tree.split('\\n'):\n # look for the opening square bracket\n arr = line.split('[')\n # if no opening bracket (leaf node), ignore this line\n if len(arr) == 1:\n continue\n\n # look for the closing bracket, extract only info within that bracket\n fid = arr[1].split(']')\n\n # extract gain or cover from string after closing bracket\n g = float(fid[1].split(importance_type)[1].split(',')[0])\n\n # extract feature name from string before closing bracket\n fid = fid[0].split('<')[0]\n\n if fid not in fmap:\n # if the feature hasn't been seen yet\n fmap[fid] = 1\n gmap[fid] = g\n else:\n fmap[fid] += 1\n gmap[fid] += g\n\n # calculate average value (gain/cover) for each feature\n if average_over_splits:\n for fid in gmap:\n gmap[fid] = gmap[fid] / fmap[fid]\n\n return gmap\n\n def trees_to_dataframe(self, fmap=''):\n \"\"\"Parse a boosted tree model text dump into a pandas DataFrame structure.\n\n This feature is only defined when the decision tree model is chosen as base\n learner (`booster in {gbtree, dart}`). It is not defined for other base learner\n types, such as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str or os.PathLike (optional)\n The name of feature map file.\n \"\"\"\n # pylint: disable=too-many-locals\n fmap = os.fspath(fmap)\n if not PANDAS_INSTALLED:\n raise Exception(('pandas must be available to use this method.'\n 'Install pandas before calling again.'))\n\n if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:\n raise ValueError('This method is not defined for Booster type {}'\n .format(self.booster))\n\n tree_ids = []\n node_ids = []\n fids = []\n splits = []\n y_directs = []\n n_directs = []\n missings = []\n gains = []\n covers = []\n\n trees = self.get_dump(fmap, with_stats=True)\n for i, tree in enumerate(trees):\n for line in tree.split('\\n'):\n arr = line.split('[')\n # Leaf node\n if len(arr) == 1:\n # Last element of line.split is an empy string\n if arr == ['']:\n continue\n # parse string\n parse = arr[0].split(':')\n stats = re.split('=|,', parse[1])\n\n # append to lists\n tree_ids.append(i)\n node_ids.append(int(re.findall(r'\\b\\d+\\b', parse[0])[0]))\n fids.append('Leaf')\n splits.append(float('NAN'))\n y_directs.append(float('NAN'))\n n_directs.append(float('NAN'))\n missings.append(float('NAN'))\n gains.append(float(stats[1]))\n covers.append(float(stats[3]))\n # Not a Leaf Node\n else:\n # parse string\n fid = arr[1].split(']')\n parse = fid[0].split('<')\n stats = re.split('=|,', fid[1])\n\n # append to lists\n tree_ids.append(i)\n node_ids.append(int(re.findall(r'\\b\\d+\\b', arr[0])[0]))\n fids.append(parse[0])\n splits.append(float(parse[1]))\n str_i = str(i)\n y_directs.append(str_i + '-' + stats[1])\n n_directs.append(str_i + '-' + stats[3])\n missings.append(str_i + '-' + stats[5])\n gains.append(float(stats[7]))\n covers.append(float(stats[9]))\n\n ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]\n df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids,\n 'Feature': fids, 'Split': splits, 'Yes': y_directs,\n 'No': n_directs, 'Missing': missings, 'Gain': gains,\n 'Cover': covers})\n\n if callable(getattr(df, 'sort_values', None)):\n # pylint: disable=no-member\n return df.sort_values(['Tree', 'Node']).reset_index(drop=True)\n # pylint: disable=no-member\n return df.sort(['Tree', 'Node']).reset_index(drop=True)\n\n def _validate_features(self, data):\n \"\"\"\n Validate Booster and data's feature_names are identical.\n Set feature_names and feature_types from DMatrix\n \"\"\"\n if self.feature_names is None:\n self.feature_names = data.feature_names\n self.feature_types = data.feature_types\n else:\n # Booster can't accept data with different feature names\n if self.feature_names != data.feature_names:\n dat_missing = set(self.feature_names) - set(data.feature_names)\n my_missing = set(data.feature_names) - set(self.feature_names)\n\n msg = 'feature_names mismatch: {0} {1}'\n\n if dat_missing:\n msg += ('\\nexpected ' + ', '.join(\n str(s) for s in dat_missing) + ' in input data')\n\n if my_missing:\n msg += ('\\ntraining data did not have the following fields: ' +\n ', '.join(str(s) for s in my_missing))\n\n raise ValueError(msg.format(self.feature_names,\n data.feature_names))\n\n def get_split_value_histogram(self, feature, fmap='', bins=None,\n as_pandas=True):\n \"\"\"Get split value histogram of a feature\n\n Parameters\n ----------\n feature: str\n The name of the feature.\n fmap: str or os.PathLike (optional)\n The name of feature map file.\n bin: int, default None\n The maximum number of bins.\n Number of bins equals number of unique split values n_unique,\n if bins == None or bins > n_unique.\n as_pandas: bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return numpy ndarray.\n\n Returns\n -------\n a histogram of used splitting values for the specified feature\n either as numpy array or pandas DataFrame.\n \"\"\"\n xgdump = self.get_dump(fmap=fmap)\n values = []\n regexp = re.compile(r\"\\[{0}<([\\d.Ee+-]+)\\]\".format(feature))\n for i, _ in enumerate(xgdump):\n m = re.findall(regexp, xgdump[i])\n values.extend([float(x) for x in m])\n\n n_unique = len(np.unique(values))\n bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)\n\n nph = np.histogram(values, bins=bins)\n nph = np.column_stack((nph[1][1:], nph[0]))\n nph = nph[nph[:, 1] > 0]\n\n if as_pandas and PANDAS_INSTALLED:\n return DataFrame(nph, columns=['SplitValue', 'Count'])\n if as_pandas and not PANDAS_INSTALLED:\n sys.stderr.write(\n \"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).\")\n return nph\n" ]
[ [ "numpy.histogram", "numpy.array", "numpy.zeros", "numpy.column_stack", "numpy.unique" ] ]
nsina/datashader
[ "7db4112d57589fb23c2a70ea8d88ca57ba954eba" ]
[ "datashader/tests/test_pandas.py" ]
[ "from __future__ import absolute_import\nfrom collections import OrderedDict\nimport os\nfrom numpy import nan\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nimport datashader as ds\n\nimport pytest\n\nfrom datashader.datatypes import RaggedDtype\n\ndf_pd = pd.DataFrame({'x': np.array(([0.] * 10 + [1] * 10)),\n 'y': np.array(([0.] * 5 + [1] * 5 + [0] * 5 + [1] * 5)),\n 'log_x': np.array(([1.] * 10 + [10] * 10)),\n 'log_y': np.array(([1.] * 5 + [10] * 5 + [1] * 5 + [10] * 5)),\n 'i32': np.arange(20, dtype='i4'),\n 'i64': np.arange(20, dtype='i8'),\n 'f32': np.arange(20, dtype='f4'),\n 'f64': np.arange(20, dtype='f8'),\n 'empty_bin': np.array([0.] * 15 + [np.nan] * 5),\n 'cat': ['a']*5 + ['b']*5 + ['c']*5 + ['d']*5})\ndf_pd.cat = df_pd.cat.astype('category')\ndf_pd.at[2,'f32'] = nan\ndf_pd.at[2,'f64'] = nan\ndfs_pd = [df_pd]\n\nif \"DATASHADER_TEST_GPU\" in os.environ:\n test_gpu = bool(int(os.environ[\"DATASHADER_TEST_GPU\"]))\nelse:\n test_gpu = None\n\n\ntry:\n import spatialpandas as sp\n from spatialpandas.geometry import LineDtype\nexcept ImportError:\n LineDtype = None\n sp = None\n\n\ndef pd_DataFrame(*args, **kwargs):\n if kwargs.pop(\"geo\", False):\n return sp.GeoDataFrame(*args, **kwargs)\n else:\n return pd.DataFrame(*args, **kwargs)\n\n\ntry:\n import cudf\n import cupy\n\n if not test_gpu:\n # GPU testing disabled even though cudf/cupy are available\n raise ImportError\n\n def cudf_DataFrame(*args, **kwargs):\n assert not kwargs.pop(\"geo\", False)\n return cudf.DataFrame.from_pandas(\n pd.DataFrame(*args, **kwargs), nan_as_null=False\n )\n df_cuda = cudf_DataFrame(df_pd)\n dfs = [df_pd, df_cuda]\n DataFrames = [pd_DataFrame, cudf_DataFrame]\nexcept ImportError:\n cudf = cupy = None\n dfs = [df_pd]\n DataFrames = [pd_DataFrame]\n\n\nc = ds.Canvas(plot_width=2, plot_height=2, x_range=(0, 1), y_range=(0, 1))\nc_logx = ds.Canvas(plot_width=2, plot_height=2, x_range=(1, 10),\n y_range=(0, 1), x_axis_type='log')\nc_logy = ds.Canvas(plot_width=2, plot_height=2, x_range=(0, 1),\n y_range=(1, 10), y_axis_type='log')\nc_logxy = ds.Canvas(plot_width=2, plot_height=2, x_range=(1, 10),\n y_range=(1, 10), x_axis_type='log', y_axis_type='log')\n\naxis = ds.core.LinearAxis()\nlincoords = axis.compute_index(axis.compute_scale_and_translate((0, 1), 2), 2)\ncoords = OrderedDict([('x', lincoords), ('y', lincoords)])\ndims = ['y', 'x']\n\n\ndef assert_eq_xr(agg, b, close=False):\n \"\"\"Assert that two xarray DataArrays are equal, handling the possibility\n that the two DataArrays may be backed by ndarrays of different types\"\"\"\n if cupy:\n if isinstance(agg.data, cupy.ndarray):\n agg = xr.DataArray(\n cupy.asnumpy(agg.data), coords=agg.coords, dims=agg.dims\n )\n if isinstance(b.data, cupy.ndarray):\n b = xr.DataArray(\n cupy.asnumpy(b.data), coords=b.coords, dims=b.dims\n )\n if close:\n xr.testing.assert_allclose(agg, b)\n else:\n xr.testing.assert_equal(agg, b)\n\ndef assert_eq_ndarray(data, b):\n \"\"\"Assert that two ndarrays are equal, handling the possibility that the\n ndarrays are of different types\"\"\"\n if cupy and isinstance(data, cupy.ndarray):\n data = cupy.asnumpy(data)\n np.testing.assert_equal(data, b)\n\n\ndef floats(n):\n \"\"\"Returns contiguous list of floats from initial point\"\"\"\n while True:\n yield n\n n = n + np.spacing(n)\n\n\ndef values(s):\n \"\"\"Get numpy array of values from pandas-like Series, handling Series\n of different types\"\"\"\n if cudf and isinstance(s, cudf.Series):\n return s.to_array(fillna=np.nan)\n else:\n return s.values\n\n\ndef test_gpu_dependencies():\n if test_gpu is True and cudf is None:\n pytest.fail(\"cudf and/or cupy not available and DATASHADER_TEST_GPU=1\")\n\n\[email protected]('df', dfs)\ndef test_count(df):\n out = xr.DataArray(np.array([[5, 5], [5, 5]], dtype='i4'),\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.count('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.count('i64')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.count()), out)\n out = xr.DataArray(np.array([[4, 5], [5, 5]], dtype='i4'),\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.count('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.count('f64')), out)\n\n\[email protected]('df', dfs)\ndef test_any(df):\n out = xr.DataArray(np.array([[True, True], [True, True]]),\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.any('i64')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.any('f64')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.any()), out)\n out = xr.DataArray(np.array([[True, True], [True, False]]),\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.any('empty_bin')), out)\n\n\[email protected]('df', dfs)\ndef test_sum(df):\n out = xr.DataArray(values(df.i32).reshape((2, 2, 5)).sum(axis=2, dtype='f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.sum('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.sum('i64')), out)\n out = xr.DataArray(np.nansum(values(df.f64).reshape((2, 2, 5)), axis=2).T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.sum('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.sum('f64')), out)\n\n\[email protected]('df', dfs)\ndef test_min(df):\n out = xr.DataArray(values(df.i64).reshape((2, 2, 5)).min(axis=2).astype('f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.min('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.min('i64')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.min('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.min('f64')), out)\n\n\[email protected]('df', dfs)\ndef test_max(df):\n out = xr.DataArray(values(df.i64).reshape((2, 2, 5)).max(axis=2).astype('f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.max('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.max('i64')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.max('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.max('f64')), out)\n\n\[email protected]('df', dfs)\ndef test_mean(df):\n out = xr.DataArray(values(df.i32).reshape((2, 2, 5)).mean(axis=2, dtype='f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.mean('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.mean('i64')), out)\n out = xr.DataArray(np.nanmean(values(df.f64).reshape((2, 2, 5)), axis=2).T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.mean('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.mean('f64')), out)\n\n\[email protected]('df', [df_pd])\ndef test_var(df):\n out = xr.DataArray(values(df.i32).reshape((2, 2, 5)).var(axis=2, dtype='f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.var('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.var('i64')), out)\n out = xr.DataArray(np.nanvar(values(df.f64).reshape((2, 2, 5)), axis=2).T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.var('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.var('f64')), out)\n\n\[email protected]('df', [df_pd])\ndef test_std(df):\n out = xr.DataArray(values(df.i32).reshape((2, 2, 5)).std(axis=2, dtype='f8').T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.std('i32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.std('i64')), out)\n out = xr.DataArray(np.nanstd(values(df.f64).reshape((2, 2, 5)), axis=2).T,\n coords=coords, dims=dims)\n assert_eq_xr(c.points(df, 'x', 'y', ds.std('f32')), out)\n assert_eq_xr(c.points(df, 'x', 'y', ds.std('f64')), out)\n\n\[email protected]('df', dfs)\ndef test_count_cat(df):\n sol = np.array([[[5, 0, 0, 0],\n [0, 0, 5, 0]],\n [[0, 5, 0, 0],\n [0, 0, 0, 5]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n agg = c.points(df, 'x', 'y', ds.count_cat('cat'))\n assert_eq_xr(agg, out)\n\[email protected]('df', dfs)\ndef test_categorical_count(df):\n sol = np.array([[[5, 0, 0, 0],\n [0, 0, 5, 0]],\n [[0, 5, 0, 0],\n [0, 0, 0, 5]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.count('i32')))\n assert_eq_xr(agg, out)\n\[email protected]('df', dfs)\ndef test_categorical_sum(df):\n sol = np.array([[[ 10, nan, nan, nan],\n [nan, nan, 60, nan]],\n [[nan, 35, nan, nan],\n [nan, nan, nan, 85]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.sum('i32')))\n assert_eq_xr(agg, out)\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.sum('i64')))\n assert_eq_xr(agg, out)\n\n sol = np.array([[[8.0, nan, nan, nan],\n [nan, nan, 60.0, nan]],\n [[nan, 35.0, nan, nan],\n [nan, nan, nan, 85.0]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.sum('f32')))\n assert_eq_xr(agg, out)\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.sum('f64')))\n assert_eq_xr(agg, out)\n\[email protected]('df', dfs)\ndef test_categorical_max(df):\n sol = np.array([[[ 4, nan, nan, nan],\n [nan, nan, 14, nan]],\n [[nan, 9, nan, nan],\n [nan, nan, nan, 19]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.max('i32')))\n assert_eq_xr(agg, out)\n\[email protected]('df', dfs)\ndef test_categorical_mean(df):\n sol = np.array([[[ 2, nan, nan, nan],\n [nan, nan, 12, nan]],\n [[nan, 7, nan, nan],\n [nan, nan, nan, 17]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.mean('f32')))\n assert_eq_xr(agg, out)\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.mean('f64')))\n assert_eq_xr(agg, out)\n\[email protected]('df', dfs)\ndef test_categorical_var(df):\n if cudf and isinstance(df, cudf.DataFrame):\n pytest.skip(\n \"The 'var' reduction is yet supported on the GPU\"\n )\n\n sol = np.array([[[ 2.5, nan, nan, nan],\n [ nan, nan, 2., nan]],\n [[ nan, 2., nan, nan],\n [ nan, nan, nan, 2.]]])\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.var('f32')))\n assert_eq_xr(agg, out, True)\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.var('f64')))\n assert_eq_xr(agg, out, True)\n\[email protected]('df', dfs)\ndef test_categorical_std(df):\n if cudf and isinstance(df, cudf.DataFrame):\n pytest.skip(\n \"The 'std' reduction is yet supported on the GPU\"\n )\n\n sol = np.sqrt(np.array([\n [[ 2.5, nan, nan, nan],\n [ nan, nan, 2., nan]],\n [[ nan, 2., nan, nan],\n [ nan, nan, nan, 2.]]])\n )\n out = xr.DataArray(\n sol,\n coords=OrderedDict(coords, cat=['a', 'b', 'c', 'd']),\n dims=(dims + ['cat']))\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.std('f32')))\n assert_eq_xr(agg, out, True)\n\n agg = c.points(df, 'x', 'y', ds.by('cat', ds.std('f64')))\n assert_eq_xr(agg, out, True)\n\[email protected]('df', dfs)\ndef test_multiple_aggregates(df):\n agg = c.points(df, 'x', 'y',\n ds.summary(f64_mean=ds.mean('f64'),\n i32_sum=ds.sum('i32'),\n i32_count=ds.count('i32')))\n\n f = lambda x: xr.DataArray(x, coords=coords, dims=dims)\n assert_eq_xr(agg.f64_mean, f(np.nanmean(values(df.f64).reshape((2, 2, 5)), axis=2).T))\n assert_eq_xr(agg.i32_sum, f(values(df.i32).reshape((2, 2, 5)).sum(axis=2, dtype='f8').T))\n assert_eq_xr(agg.i32_count, f(np.array([[5, 5], [5, 5]], dtype='i4')))\n\n\[email protected]('DataFrame', DataFrames)\ndef test_auto_range_points(DataFrame):\n n = 10\n data = np.arange(n, dtype='i4')\n df = DataFrame({'time': np.arange(n),\n 'x': data,\n 'y': data})\n\n cvs = ds.Canvas(plot_width=n, plot_height=n)\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n sol = np.zeros((n, n), int)\n np.fill_diagonal(sol, 1)\n assert_eq_ndarray(agg.data, sol)\n\n cvs = ds.Canvas(plot_width=n+1, plot_height=n+1)\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n sol = np.zeros((n+1, n+1), int)\n np.fill_diagonal(sol, 1)\n sol[5, 5] = 0\n assert_eq_ndarray(agg.data, sol)\n\n n = 4\n data = np.arange(n, dtype='i4')\n df = DataFrame({'time': np.arange(n),\n 'x': data,\n 'y': data})\n\n cvs = ds.Canvas(plot_width=2*n, plot_height=2*n)\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n sol = np.zeros((2*n, 2*n), int)\n np.fill_diagonal(sol, 1)\n sol[np.array([tuple(range(1, 4, 2))])] = 0\n sol[np.array([tuple(range(4, 8, 2))])] = 0\n assert_eq_ndarray(agg.data, sol)\n\n cvs = ds.Canvas(plot_width=2*n+1, plot_height=2*n+1)\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n sol = np.zeros((2*n+1, 2*n+1), int)\n sol[0, 0] = 1\n sol[3, 3] = 1\n sol[6, 6] = 1\n sol[8, 8] = 1\n assert_eq_ndarray(agg.data, sol)\n\n\ndef test_uniform_points():\n n = 101\n df = pd.DataFrame({'time': np.ones(2*n, dtype='i4'),\n 'x': np.concatenate((np.arange(n, dtype='f8'),\n np.arange(n, dtype='f8'))),\n 'y': np.concatenate(([0.] * n, [1.] * n))})\n\n cvs = ds.Canvas(plot_width=10, plot_height=2, y_range=(0, 1))\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n sol = np.array([[10] * 9 + [11], [10] * 9 + [11]], dtype='i4')\n assert_eq_ndarray(agg.data, sol)\n\n\[email protected]('high', [9, 10, 99, 100])\[email protected]('low', [0])\ndef test_uniform_diagonal_points(low, high):\n bounds = (low, high)\n x_range, y_range = bounds, bounds\n\n width = x_range[1] - x_range[0]\n height = y_range[1] - y_range[0]\n n = width * height\n df = pd.DataFrame({'time': np.ones(n, dtype='i4'),\n 'x': np.array([np.arange(*x_range, dtype='f8')] * width).flatten(),\n 'y': np.array([np.arange(*y_range, dtype='f8')] * height).flatten()})\n\n cvs = ds.Canvas(plot_width=2, plot_height=2, x_range=x_range, y_range=y_range)\n agg = cvs.points(df, 'x', 'y', ds.count('time'))\n\n diagonal = agg.data.diagonal(0)\n assert sum(diagonal) == n\n assert abs(bounds[1] - bounds[0]) % 2 == abs(diagonal[1] / high - diagonal[0] / high)\n\n\[email protected]('df', dfs)\ndef test_log_axis_points(df):\n axis = ds.core.LogAxis()\n logcoords = axis.compute_index(axis.compute_scale_and_translate((1, 10), 2), 2)\n\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((0, 1), 2), 2)\n\n sol = np.array([[5, 5], [5, 5]], dtype='i4')\n out = xr.DataArray(sol, coords=[lincoords, logcoords],\n dims=['y', 'log_x'])\n assert_eq_xr(c_logx.points(df, 'log_x', 'y', ds.count('i32')), out)\n out = xr.DataArray(sol, coords=[logcoords, lincoords],\n dims=['log_y', 'x'])\n assert_eq_xr(c_logy.points(df, 'x', 'log_y', ds.count('i32')), out)\n out = xr.DataArray(sol, coords=[logcoords, logcoords],\n dims=['log_y', 'log_x'])\n assert_eq_xr(c_logxy.points(df, 'log_x', 'log_y', ds.count('i32')), out)\n\n\[email protected](not sp, reason=\"spatialpandas not installed\")\ndef test_points_geometry_point():\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((0., 2.), 3), 3)\n\n df = sp.GeoDataFrame({\n 'geom': pd.array(\n [[0, 0], [0, 1], [1, 1], [0, 2], [1, 2], [2, 2]], dtype='Point[float64]'),\n 'v': [1, 2, 2, 3, 3, 3]\n })\n\n cvs = ds.Canvas(plot_width=3, plot_height=3)\n agg = cvs.points(df, geometry='geom', agg=ds.sum('v'))\n sol = np.array([[1, nan, nan],\n [2, 2, nan],\n [3, 3, 3]], dtype='float64')\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n # Aggregation should not have triggered calculation of spatial index\n assert df.geom.array._sindex is None\n\n # Generate spatial index and check that we get the same result\n df.geom.array.sindex\n agg = cvs.points(df, geometry='geom', agg=ds.sum('v'))\n assert_eq_xr(agg, out)\n\n\[email protected](not sp, reason=\"spatialpandas not installed\")\ndef test_points_geometry_multipoint():\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((0., 2.), 3), 3)\n\n df = sp.GeoDataFrame({\n 'geom': pd.array(\n [[0, 0], [0, 1, 1, 1], [0, 2, 1, 2, 2, 2]], dtype='MultiPoint[float64]'),\n 'v': [1, 2, 3]\n })\n\n cvs = ds.Canvas(plot_width=3, plot_height=3)\n agg = cvs.points(df, geometry='geom', agg=ds.sum('v'))\n sol = np.array([[1, nan, nan],\n [2, 2, nan],\n [3, 3, 3]], dtype='float64')\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n # Aggregation should not have triggered calculation of spatial index\n assert df.geom.array._sindex is None\n\n # Generate spatial index and check that we get the same result\n df.geom.array.sindex\n agg = cvs.points(df, geometry='geom', agg=ds.sum('v'))\n assert_eq_xr(agg, out)\n\n\ndef test_line():\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((-3., 3.), 7), 7)\n\n df = pd.DataFrame({'x': [4, 0, -4, -3, -2, -1.9, 0, 10, 10, 0, 4],\n 'y': [0, -4, 0, 1, 2, 2.1, 4, 20, 30, 4, 0]})\n cvs = ds.Canvas(plot_width=7, plot_height=7,\n x_range=(-3, 3), y_range=(-3, 3))\n agg = cvs.line(df, 'x', 'y', ds.count())\n sol = np.array([[0, 0, 1, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 1],\n [0, 2, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0, 0]], dtype='i4')\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\ndef test_points_on_edge():\n df = pd.DataFrame(dict(x=[0, 0.5, 1.1, 1.5, 2.2, 3, 3, 0],\n y=[0, 0, 0, 0, 0, 0, 3, 3]))\n\n canvas = ds.Canvas(plot_width=3, plot_height=3,\n x_range=(0, 3), y_range=(0, 3))\n\n agg = canvas.points(df, 'x', 'y', agg=ds.count())\n\n sol = np.array([[2, 2, 2],\n [0, 0, 0],\n [1, 0, 1]], dtype='int32')\n out = xr.DataArray(sol,\n coords=[('x', [0.5, 1.5, 2.5]),\n ('y', [0.5, 1.5, 2.5])],\n dims=['y', 'x'])\n\n assert_eq_xr(agg, out)\n\n\ndef test_lines_on_edge():\n df = pd.DataFrame(dict(x=[0, 3, 3, 0],\n y=[0, 0, 3, 3]))\n\n canvas = ds.Canvas(plot_width=3, plot_height=3,\n x_range=(0, 3), y_range=(0, 3))\n\n agg = canvas.line(df, 'x', 'y', agg=ds.count())\n\n sol = np.array([[1, 1, 1],\n [0, 0, 1],\n [1, 1, 1]], dtype='int32')\n out = xr.DataArray(sol,\n coords=[('x', [0.5, 1.5, 2.5]),\n ('y', [0.5, 1.5, 2.5])],\n dims=['y', 'x'])\n\n assert_eq_xr(agg, out)\n\n\[email protected]('df', dfs_pd)\ndef test_log_axis_line(df):\n axis = ds.core.LogAxis()\n logcoords = axis.compute_index(axis.compute_scale_and_translate((1, 10), 2), 2)\n\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((0, 1), 2), 2)\n\n sol = np.array([[4, 5], [5, 5]], dtype='i4')\n out = xr.DataArray(sol, coords=[lincoords, logcoords],\n dims=['y', 'log_x'])\n assert_eq_xr(c_logx.line(df, 'log_x', 'y', ds.count('i32')), out)\n out = xr.DataArray(sol, coords=[logcoords, lincoords],\n dims=['log_y', 'x'])\n assert_eq_xr(c_logy.line(df, 'x', 'log_y', ds.count('i32')), out)\n out = xr.DataArray(sol, coords=[logcoords, logcoords],\n dims=['log_y', 'log_x'])\n assert_eq_xr(c_logxy.line(df, 'log_x', 'log_y', ds.count('i32')), out)\n\n\ndef test_subpixel_line_start():\n cvs = ds.Canvas(plot_width=5, plot_height=5, x_range=(1, 3), y_range=(0, 1))\n\n df = pd.DataFrame(dict(x=[1, 2, 3], y0=[0.0, 0.0, 0.0], y1=[0.0, 0.08, 0.04]))\n agg = cvs.line(df, 'x', ['y0', 'y1'], agg=ds.count(), axis=1)\n xcoords = axis.compute_index(axis.compute_scale_and_translate((1., 3), 5), 5)\n ycoords = axis.compute_index(axis.compute_scale_and_translate((0, 1), 5), 5)\n sol = np.array([\n [1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]\n ], dtype='i4')\n out = xr.DataArray(\n sol, coords=[ycoords, xcoords], dims=['y', 'x']\n )\n assert_eq_xr(agg, out)\n\n\ndef test_auto_range_line():\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((-10., 10.), 5), 5)\n\n df = pd.DataFrame({'x': [-10, 0, 10, 0, -10],\n 'y': [ 0, 10, 0, -10, 0]})\n cvs = ds.Canvas(plot_width=5, plot_height=5)\n agg = cvs.line(df, 'x', 'y', ds.count())\n sol = np.array([[0, 0, 1, 0, 0],\n [0, 1, 0, 1, 0],\n [2, 0, 0, 0, 1],\n [0, 1, 0, 1, 0],\n [0, 0, 1, 0, 0]], dtype='i4')\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected](not sp, reason=\"spatialpandas not installed\")\[email protected]('geom_data,geom_type', [\n ([0, 0, 1, 1, 2, 0, 0, 0], 'line'),\n ([[0, 0, 1, 1, 2, 0, 0, 0]], 'multiline'),\n ([0, 0, 1, 1, 2, 0, 0, 0], 'ring'),\n ([[0, 0, 1, 1, 2, 0, 0, 0]], 'polygon'),\n ([[[0, 0, 1, 1, 2, 0, 0, 0]]], 'multipolygon'),\n])\ndef test_closed_ring_line(geom_data, geom_type):\n gdf = sp.GeoDataFrame(\n {'geometry': sp.GeoSeries([geom_data], dtype=geom_type)}\n )\n cvs = ds.Canvas(plot_width=4, plot_height=4)\n agg = cvs.line(gdf, geometry='geometry', agg=ds.count())\n\n coords_x = axis.compute_index(axis.compute_scale_and_translate((0., 2), 4), 4)\n coords_y = axis.compute_index(axis.compute_scale_and_translate((0., 1), 4), 4)\n sol = np.array([\n [1, 1, 1, 1],\n [0, 1, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 0],\n ])\n out = xr.DataArray(sol, coords=[coords_y, coords_x], dims=['y', 'x'])\n\n if geom_type.endswith(\"line\"):\n # Closed rings represented as line/multiLine arrays will double count the\n # starting pixel\n out[0, 0] = 2\n\n assert_eq_xr(agg, out)\n\n\ndef test_trimesh_no_double_edge():\n \"\"\"Assert that when two triangles share an edge that would normally get\n double-drawn, the edge is only drawn for the rightmost (or bottommost)\n triangle.\n \"\"\"\n # Test left/right edge shared\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5], 'val': [1, 2]})\n # Plot dims and x/y ranges need to be set such that the edge is drawn twice:\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\n # Test top/bottom edge shared\n verts = pd.DataFrame({'x': [3, 3, 1, 1, 3, 3],\n 'y': [4, 1, 4, 4, 5, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5], 'val': [3, 1]})\n # Plot dims and x/y ranges need to be set such that the edge is drawn twice:\n cvs = ds.Canvas(plot_width=22, plot_height=22, x_range=(0, 10), y_range=(0, 10))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[10:20, :20], sol)\n\ndef test_trimesh_interp():\n \"\"\"Assert that triangles are interpolated when vertex values are provided.\n \"\"\"\n verts = pd.DataFrame({'x': [0, 5, 10],\n 'y': [0, 10, 0]})\n tris = pd.DataFrame({'v0': [0], 'v1': [1], 'v2': [2],\n 'val': [1]})\n cvs = ds.Canvas(plot_width=10, plot_height=10, x_range=(0, 10), y_range=(0, 10))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values), sol)\n\n verts = pd.DataFrame({'x': [0, 5, 10],\n 'y': [0, 10, 0],\n 'z': [1, 5, 3]})\n cvs = ds.Canvas(plot_width=10, plot_height=10, x_range=(0, 10), y_range=(0, 10))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 4, 0, 0, 0, 0],\n [0, 0, 0, 0, 4, 4, 0, 0, 0, 0],\n [0, 0, 0, 0, 3, 4, 4, 0, 0, 0],\n [0, 0, 0, 3, 3, 3, 3, 0, 0, 0],\n [0, 0, 0, 2, 3, 3, 3, 3, 0, 0],\n [0, 0, 2, 2, 2, 3, 3, 3, 0, 0],\n [0, 0, 2, 2, 2, 2, 2, 3, 3, 0],\n [0, 1, 1, 1, 2, 2, 2, 2, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values), sol)\n\ndef test_trimesh_simplex_weights():\n \"\"\"Assert that weighting the simplices works as expected.\n \"\"\"\n # val is float\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5], 'val': [2., 4.]}) # floats\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 4, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\n # val is int\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5], 'val': [3, 4]})\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 4, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\ndef test_trimesh_vertex_weights():\n \"\"\"Assert that weighting the vertices works as expected.\n \"\"\"\n # z is float\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4],\n 'z': [1., 1., 1., 2., 2., 2.]}, columns=['x', 'y', 'z'])\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5]})\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='f8')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0.).values)[:5], sol)\n\n # val is int\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4],\n 'val': [2, 2, 2, 3, 3, 3]}, columns=['x', 'y', 'val'])\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5]})\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\ndef test_trimesh_winding_detect():\n \"\"\"Assert that CCW windings get converted to CW.\n \"\"\"\n # val is int, winding is CCW\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [2, 5], 'v2': [1, 4], 'val': [3, 4]})\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 4, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\n # val is float, winding is CCW\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4]})\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [2, 5], 'v2': [1, 4], 'val': [3., 4.]}) # floats\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 4, 4, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='i4')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0).astype('i4').values)[:5], sol)\n\ndef test_trimesh_mesharg():\n \"\"\"Assert that the ``mesh`` argument results in the same rasterization,\n despite the ``vertices`` and ``simplices`` arguments changing.\n \"\"\"\n # z is float\n verts = pd.DataFrame({'x': [4, 1, 5, 5, 5, 4],\n 'y': [4, 5, 5, 5, 4, 4],\n 'z': [1., 1., 1., 2., 2., 2.]}, columns=['x', 'y', 'z'])\n tris = pd.DataFrame({'v0': [0, 3], 'v1': [1, 4], 'v2': [2, 5]})\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts, tris)\n sol = np.array([\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype='f8')\n np.testing.assert_array_equal(np.flipud(agg.fillna(0.).values)[:5], sol)\n\n mesh = ds.utils.mesh(verts, tris)\n cvs = ds.Canvas(plot_width=20, plot_height=20, x_range=(0, 5), y_range=(0, 5))\n agg = cvs.trimesh(verts[:1], tris[:1], mesh=mesh)\n np.testing.assert_array_equal(np.flipud(agg.fillna(0.).values)[:5], sol)\n\ndef test_trimesh_agg_api():\n \"\"\"Assert that the trimesh aggregation API properly handles weights on the simplices.\"\"\"\n pts = pd.DataFrame({'x': [1, 3, 4, 3, 3],\n 'y': [2, 1, 2, 1, 4]},\n columns=['x', 'y'])\n tris = pd.DataFrame({'n1': [4, 1],\n 'n2': [1, 4],\n 'n3': [2, 0],\n 'weight': [0.83231525, 1.3053126]},\n columns=['n1', 'n2', 'n3', 'weight'])\n cvs = ds.Canvas(x_range=(0, 10), y_range=(0, 10))\n agg = cvs.trimesh(pts, tris, agg=ds.mean('weight'))\n assert agg.shape == (600, 600)\n\n\ndef test_bug_570():\n # See https://github.com/holoviz/datashader/issues/570\n df = pd.DataFrame({\n 'Time': [1456353642.2053893, 1456353642.2917893],\n 'data': [-59.4948743433377, 506.4847376716022],\n }, columns=['Time', 'data'])\n\n x_range = (1456323293.9859753, 1456374687.0009754)\n y_range = (-228.56721300380943, 460.4042291124646)\n\n cvs = ds.Canvas(x_range=x_range, y_range=y_range,\n plot_height=300, plot_width=1000)\n agg = cvs.line(df, 'Time', 'data', agg=ds.count())\n\n # Check location of line\n yi, xi = np.where(agg.values == 1)\n assert np.array_equal(yi, np.arange(73, 300))\n assert np.array_equal(xi, np.array([590] * len(yi)))\n\n\n# # Line tests\nline_manual_range_params = [\n # axis1 none constant\n ([{\n 'x0': [4, -4],\n 'x1': [0, 0],\n 'x2': [-4, 4],\n 'y0': [0, 0],\n 'y1': [-4, 4],\n 'y2': [0, 0]\n }], dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis1 x constant\n ([{\n 'y0': [0, 0],\n 'y1': [-4, 4],\n 'y2': [0, 0]\n }], dict(x=np.array([-4, 0, 4]), y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis1 y constant\n ([{\n 'x0': [0, 0],\n 'x1': [-4, 4],\n 'x2': [0, 0]\n }], dict(x=['x0', 'x1', 'x2'], y=np.array([-4, 0, 4]), axis=1)),\n\n # axis0 single\n ([{\n 'x': [0, -4, 0, np.nan, 0, 4, 0],\n 'y': [-4, 0, 4, np.nan, -4, 0, 4],\n }], dict(x='x', y='y', axis=0)),\n\n # axis0 multi\n ([{\n 'x0': [0, -4, 0],\n 'x1': [0, 4, 0],\n 'y0': [-4, 0, 4],\n 'y1': [-4, 0, 4],\n }], dict(x=['x0', 'x1'], y=['y0', 'y1'], axis=0)),\n\n # axis0 multi with string\n ([{\n 'x0': [0, -4, 0],\n 'x1': [0, 4, 0],\n 'y0': [-4, 0, 4],\n 'y1': [-4, 0, 4],\n }], dict(x=['x0', 'x1'], y='y0', axis=0)),\n\n # axis1 ragged arrays\n ([{\n 'x': pd.array([[4, 0], [0, -4, 0, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4], [-4, 0, 4, 0]], dtype='Ragged[float32]')\n }], dict(x='x', y='y', axis=1)),\n]\nif sp:\n line_manual_range_params.append(\n # geometry\n ([{\n 'geom': pd.array(\n [[4, 0, 0, -4], [0, -4, -4, 0, 0, 4, 4, 0]], dtype='Line[float32]'\n ),\n }], dict(geometry='geom'))\n )\[email protected]('DataFrame', DataFrames)\[email protected]('df_args,cvs_kwargs', line_manual_range_params)\ndef test_line_manual_range(DataFrame, df_args, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if (isinstance(getattr(df_args[0].get('x', []), 'dtype', ''), RaggedDtype) or\n sp and isinstance(\n getattr(df_args[0].get('geom', []), 'dtype', ''), LineDtype\n )\n ):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(geo='geometry' in cvs_kwargs, *df_args)\n\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(\n axis.compute_scale_and_translate((-3., 3.), 7), 7)\n\n cvs = ds.Canvas(plot_width=7, plot_height=7,\n x_range=(-3, 3), y_range=(-3, 3))\n\n agg = cvs.line(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 0, 1, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0, 0]], dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\nline_autorange_params = [\n # axis1 none constant\n ([{\n 'x0': [0, 0],\n 'x1': [-4, 4],\n 'x2': [0, 0],\n 'y0': [-4, -4],\n 'y1': [0, 0],\n 'y2': [4, 4]\n }], dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis1 y constant\n ([{\n 'x0': [0, 0],\n 'x1': [-4, 4],\n 'x2': [0, 0]\n }], dict(x=['x0', 'x1', 'x2'], y=np.array([-4, 0, 4]), axis=1)),\n\n # axis0 single\n ([{\n 'x': [0, -4, 0, np.nan, 0, 4, 0],\n 'y': [-4, 0, 4, np.nan, -4, 0, 4],\n }], dict(x='x', y='y', axis=0)),\n\n # axis0 multi\n ([{\n 'x0': [0, -4, 0],\n 'x1': [0, 4, 0],\n 'y0': [-4, 0, 4],\n 'y1': [-4, 0, 4],\n }], dict(x=['x0', 'x1'], y=['y0', 'y1'], axis=0)),\n\n # axis0 multi with string\n ([{\n 'x0': [0, -4, 0],\n 'x1': [0, 4, 0],\n 'y0': [-4, 0, 4],\n 'y1': [-4, 0, 4],\n }], dict(x=['x0', 'x1'], y='y0', axis=0)),\n\n # axis1 ragged arrays\n ([{\n 'x': pd.array([[0, -4, 0], [0, 4, 0]], dtype='Ragged[float32]'),\n 'y': pd.array([[-4, 0, 4], [-4, 0, 4]], dtype='Ragged[float32]')\n }], dict(x='x', y='y', axis=1)),\n]\nif sp:\n line_autorange_params.append(\n # geometry\n ([{\n 'geom': pd.array(\n [[0, -4, -4, 0, 0, 4], [0, -4, 4, 0, 0, 4]], dtype='Line[float32]'\n ),\n }], dict(geometry='geom'))\n )\[email protected]('DataFrame', DataFrames)\[email protected]('df_args,cvs_kwargs', line_autorange_params)\ndef test_line_autorange(DataFrame, df_args, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if (isinstance(getattr(df_args[0].get('x', []), 'dtype', ''), RaggedDtype) or\n sp and isinstance(\n getattr(df_args[0].get('geom', []), 'dtype', ''), LineDtype\n )\n ):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(geo='geometry' in cvs_kwargs, *df_args)\n\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 9), 9)\n\n cvs = ds.Canvas(plot_width=9, plot_height=9)\n\n agg = cvs.line(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 0, 0, 0, 2, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 2, 0, 0, 0, 0]], dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected]('DataFrame', DataFrames)\ndef test_line_autorange_axis1_x_constant(DataFrame):\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 9), 9)\n\n xs = np.array([-4, 0, 4])\n df = DataFrame({\n 'y0': [0, 0],\n 'y1': [-4, 4],\n 'y2': [0, 0]\n })\n\n cvs = ds.Canvas(plot_width=9, plot_height=9)\n\n agg = cvs.line(df,\n xs,\n ['y0', 'y1', 'y2'],\n ds.count(),\n axis=1)\n\n sol = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [2, 0, 0, 0, 0, 0, 0, 0, 2],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\n# Sum aggregate\[email protected]('DataFrame', DataFrames)\ndef test_line_agg_sum_axis1_none_constant(DataFrame):\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(axis.compute_scale_and_translate((-3., 3.), 7), 7)\n\n df = DataFrame({\n 'x0': [4, -4],\n 'x1': [0, 0],\n 'x2': [-4, 4],\n 'y0': [0, 0],\n 'y1': [-4, 4],\n 'y2': [0, 0],\n 'v': [7, 9]\n })\n\n cvs = ds.Canvas(plot_width=7, plot_height=7,\n x_range=(-3, 3), y_range=(-3, 3))\n\n agg = cvs.line(df,\n ['x0', 'x1', 'x2'],\n ['y0', 'y1', 'y2'],\n ds.sum('v'),\n axis=1)\n nan = np.nan\n sol = np.array([[nan, nan, 7, nan, 7, nan, nan],\n [nan, 7, nan, nan, nan, 7, nan],\n [7, nan, nan, nan, nan, nan, 7],\n [nan, nan, nan, nan, nan, nan, nan],\n [9, nan, nan, nan, nan, nan, 9],\n [nan, 9, nan, nan, nan, 9, nan],\n [nan, nan, 9, nan, 9, nan, nan]], dtype='float32')\n\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\ndef test_line_autorange_axis1_ragged():\n axis = ds.core.LinearAxis()\n lincoords = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 9), 9)\n\n df = pd.DataFrame({\n 'x': pd.array([[4, 0], [0, -4, 0, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4], [-4, 0, 4, 0]], dtype='Ragged[float32]')\n })\n\n cvs = ds.Canvas(plot_width=9, plot_height=9)\n\n agg = cvs.line(df,\n 'x',\n 'y',\n ds.count(),\n axis=1)\n\n sol = np.array([[0, 0, 0, 0, 2, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 2],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords, lincoords],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected]('DataFrame', DataFrames)\[email protected]('df_kwargs,cvs_kwargs', [\n # axis1 none constant\n (dict(data={\n 'x0': [-4, np.nan],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n 'y0': [0, np.nan],\n 'y1': [-4, 4],\n 'y2': [0, 0]\n }, dtype='float32'), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis0 single\n (dict(data={\n 'x': [-4, -2, 0, np.nan, 2, 4],\n 'y': [0, -4, 0, np.nan, 4, 0],\n }), dict(x='x', y='y', axis=0)),\n\n # axis0 multi\n (dict(data={\n 'x0': [-4., -2., 0],\n 'x1': [np.nan, 2, 4],\n 'y0': [0, -4, 0],\n 'y1': [np.nan, 4, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y=['y0', 'y1'], axis=0)),\n\n # axis1 ragged arrays\n (dict(data={\n 'x': pd.array([[-4, -2, 0], [2, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4, 0], [4, 0]], dtype='Ragged[float32]')\n }), dict(x='x', y='y', axis=1))\n])\ndef test_area_to_zero_fixedrange(DataFrame, df_kwargs, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if isinstance(getattr(df_kwargs['data'].get('x', []), 'dtype', ''), RaggedDtype):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(**df_kwargs)\n\n axis = ds.core.LinearAxis()\n lincoords_y = axis.compute_index(\n axis.compute_scale_and_translate((-2.25, 2.25), 5), 5)\n\n lincoords_x = axis.compute_index(\n axis.compute_scale_and_translate((-3.75, 3.75), 9), 9)\n\n cvs = ds.Canvas(plot_width=9, plot_height=5,\n x_range=[-3.75, 3.75], y_range=[-2.25, 2.25])\n\n agg = cvs.area(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 1, 1, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, 1, 0]],\n dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords_y, lincoords_x],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected]('DataFrame', DataFrames)\[email protected]('df_kwargs,cvs_kwargs', [\n # axis1 none constant\n (dict(data={\n 'x0': [-4, 0],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n 'y0': [0, 0],\n 'y1': [-4, -4],\n 'y2': [0, 0]\n }, dtype='float32'), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis1 y constant\n (dict(data={\n 'x0': [-4, 0],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n }, dtype='float32'),\n dict(x=['x0', 'x1', 'x2'], y=np.array([0, -4, 0], dtype='float32'), axis=1)),\n\n # axis0 single\n (dict(data={\n 'x': [-4, -2, 0, 0, 2, 4],\n 'y': [0, -4, 0, 0, -4, 0],\n }), dict(x='x', y='y', axis=0)),\n\n # axis0 multi\n (dict(data={\n 'x0': [-4, -2, 0],\n 'x1': [0, 2, 4],\n 'y0': [0, -4, 0],\n 'y1': [0, -4, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y=['y0', 'y1'], axis=0)),\n\n # axis0 multi, y string\n (dict(data={\n 'x0': [-4, -2, 0],\n 'x1': [0, 2, 4],\n 'y0': [0, -4, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y='y0', axis=0)),\n\n # axis1 ragged arrays\n (dict(data={\n 'x': pd.array([[-4, -2, 0], [0, 2, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4, 0], [0, -4, 0]], dtype='Ragged[float32]')\n }), dict(x='x', y='y', axis=1))\n])\ndef test_area_to_zero_autorange(DataFrame, df_kwargs, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if isinstance(getattr(df_kwargs['data'].get('x', []), 'dtype', ''), RaggedDtype):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(**df_kwargs)\n\n axis = ds.core.LinearAxis()\n lincoords_y = axis.compute_index(\n axis.compute_scale_and_translate((-4., 0.), 7), 7)\n lincoords_x = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 13), 13)\n\n cvs = ds.Canvas(plot_width=13, plot_height=7)\n\n agg = cvs.area(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1]],\n dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords_y, lincoords_x],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected]('DataFrame', DataFrames)\[email protected]('df_kwargs,cvs_kwargs', [\n # axis1 none constant\n (dict(data={\n 'x0': [-4, np.nan],\n # 'x0': [-4, 0],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n 'y0': [0, np.nan],\n # 'y0': [0, 1],\n 'y1': [-4, 4],\n 'y2': [0, 0]\n }, dtype='float32'), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),\n\n # axis0 single\n (dict(data={\n 'x': [-4, -2, 0, np.nan, 2, 4],\n 'y': [0, -4, 0, np.nan, 4, 0],\n }), dict(x='x', y='y', axis=0)),\n\n # axis0 multi\n (dict(data={\n 'x0': [-4, -2, 0],\n 'x1': [np.nan, 2, 4],\n 'y0': [0, -4, 0],\n 'y1': [np.nan, 4, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y=['y0', 'y1'], axis=0)),\n\n # axis1 ragged arrays\n (dict(data={\n 'x': pd.array([[-4, -2, 0], [2, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4, 0], [4, 0]], dtype='Ragged[float32]')\n }), dict(x='x', y='y', axis=1))\n])\ndef test_area_to_zero_autorange_gap(DataFrame, df_kwargs, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if isinstance(getattr(df_kwargs['data'].get('x', []), 'dtype', ''), RaggedDtype):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(**df_kwargs)\n\n axis = ds.core.LinearAxis()\n lincoords_y = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 7), 7)\n lincoords_x = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 13), 13)\n\n cvs = ds.Canvas(plot_width=13, plot_height=7)\n\n agg = cvs.area(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]],\n dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords_y, lincoords_x],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\[email protected]('DataFrame', DataFrames)\[email protected]('df_kwargs,cvs_kwargs', [\n # axis1 none constant\n (dict(data={\n 'x0': [-4, 0],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n 'y0': [0, 0],\n 'y1': [-4, -4],\n 'y2': [0, 0],\n 'y3': [0, 0],\n 'y4': [-2, -2],\n 'y5': [0, 0],\n }, dtype='float32'),\n dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'],\n y_stack=['y3', 'y4', 'y5'], axis=1)),\n\n # axis1 y constant\n (dict(data={\n 'x0': [-4, 0],\n 'x1': [-2, 2],\n 'x2': [0, 4],\n }, dtype='float32'),\n dict(x=['x0', 'x1', 'x2'], y=np.array([0, -4, 0]),\n y_stack=np.array([0, -2, 0], dtype='float32'), axis=1)),\n\n # axis0 single\n (dict(data={\n 'x': [-4, -2, 0, 0, 2, 4],\n 'y': [0, -4, 0, 0, -4, 0],\n 'y_stack': [0, -2, 0, 0, -2, 0],\n }), dict(x='x', y='y', y_stack='y_stack', axis=0)),\n\n # axis0 multi\n (dict(data={\n 'x0': [-4, -2, 0],\n 'x1': [0, 2, 4],\n 'y0': [0, -4, 0],\n 'y1': [0, -4, 0],\n 'y2': [0, -2, 0],\n 'y3': [0, -2, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y=['y0', 'y1'],\n y_stack=['y2', 'y3'], axis=0)),\n\n # axis0 multi, y string\n (dict(data={\n 'x0': [-4, -2, 0],\n 'x1': [0, 2, 4],\n 'y0': [0, -4, 0],\n 'y2': [0, -2, 0],\n }, dtype='float32'), dict(x=['x0', 'x1'], y='y0', y_stack='y2', axis=0)),\n\n # axis1 ragged arrays\n (dict(data={\n 'x': pd.array([[-4, -2, 0], [0, 2, 4]], dtype='Ragged[float32]'),\n 'y': pd.array([[0, -4, 0], [0, -4, 0]], dtype='Ragged[float32]'),\n 'y_stack': pd.array([[0, -2, 0], [0, -2, 0]], dtype='Ragged[float32]')\n }), dict(x='x', y='y', y_stack='y_stack', axis=1))\n])\ndef test_area_to_line_autorange(DataFrame, df_kwargs, cvs_kwargs):\n if cudf and DataFrame is cudf_DataFrame:\n if isinstance(getattr(df_kwargs['data'].get('x', []), 'dtype', ''), RaggedDtype):\n pytest.skip(\"cudf DataFrames do not support extension types\")\n\n df = DataFrame(**df_kwargs)\n\n axis = ds.core.LinearAxis()\n lincoords_y = axis.compute_index(\n axis.compute_scale_and_translate((-4., 0.), 7), 7)\n lincoords_x = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 13), 13)\n\n cvs = ds.Canvas(plot_width=13, plot_height=7)\n\n agg = cvs.area(df, agg=ds.count(), **cvs_kwargs)\n\n sol = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords_y, lincoords_x],\n dims=['y', 'x'])\n assert_eq_xr(agg, out)\n\n\ndef test_area_to_line_autorange_gap():\n axis = ds.core.LinearAxis()\n lincoords_y = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 7), 7)\n lincoords_x = axis.compute_index(\n axis.compute_scale_and_translate((-4., 4.), 13), 13)\n\n cvs = ds.Canvas(plot_width=13, plot_height=7)\n\n df = pd.DataFrame({\n 'x': [-4, -2, 0, np.nan, 2, 4],\n 'y0': [0, -4, 0, np.nan, 4, 0],\n 'y1': [0, 0, 0, np.nan, 0, 0],\n })\n\n # When a line is specified to fill to, this line is not included in\n # the fill. So we expect the y=0 line to not be filled.\n agg = cvs.area(df, 'x', 'y0', ds.count(), y_stack='y1')\n\n sol = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]],\n dtype='i4')\n\n out = xr.DataArray(sol, coords=[lincoords_y, lincoords_x],\n dims=['y0', 'x'])\n assert_eq_xr(agg, out)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.fill_diagonal", "numpy.zeros", "numpy.testing.assert_equal", "pandas.DataFrame", "pandas.array", "numpy.ones", "numpy.where", "numpy.arange", "numpy.spacing" ] ]
XiangboGaoBarry/TexasHoldemAI
[ "91776498fafd464040eca2b1fe454333991bc04e" ]
[ "examples/limit_holdem_dqn.py" ]
[ "''' An example of learning a Deep-Q Agent on Texas Limit Holdem\n'''\n\nimport tensorflow as tf\nimport os\n\nimport rlcard\nfrom rlcard.agents import DQNAgent\nfrom rlcard.agents import RandomAgent\nfrom rlcard.utils import set_global_seed, tournament\nfrom rlcard.utils import Logger\n\n# Make environment\nenv = rlcard.make('limit-holdem', config={'seed': 0})\neval_env = rlcard.make('limit-holdem', config={'seed': 0})\n\n# Set the iterations numbers and how frequently we evaluate the performance\nevaluate_every = 100\nevaluate_num = 1000\nepisode_num = 5000\n\n# The intial memory size\nmemory_init_size = 1000\n\n# Train the agent every X steps\ntrain_every = 1\n\n# The paths for saving the logs and learning curves\nlog_dir = './experiments/limit_holdem_dqn_result/'\n\n# Set a global seed\nset_global_seed(0)\n\nwith tf.Session() as sess:\n\n # Initialize a global step\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Set up the agents\n agent = DQNAgent(sess,\n scope='dqn',\n action_num=env.action_num,\n replay_memory_init_size=memory_init_size,\n train_every=train_every,\n state_shape=env.state_shape,\n mlp_layers=[512,512])\n random_agent = RandomAgent(action_num=eval_env.action_num)\n env.set_agents([agent, random_agent])\n eval_env.set_agents([agent, random_agent])\n\n # Initialize global variables\n sess.run(tf.global_variables_initializer())\n\n # Init a Logger to plot the learning curve\n logger = Logger(log_dir)\n\n for episode in range(episode_num):\n\n # Generate data from the environment\n trajectories, _ = env.run(is_training=True)\n\n # Feed transitions into agent memory, and train the agent\n for ts in trajectories[0]:\n agent.feed(ts)\n\n # Evaluate the performance. Play with random agents.\n if episode % evaluate_every == 0:\n logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])\n\n # Close files in the logger\n logger.close_files()\n\n # Plot the learning curve\n logger.plot('DQN')\n \n # Save model\n save_dir = 'models/limit_holdem_dqn'\n # if not os.path.exists(save_dir):\n # os.makedirs(save_dir)\n # saver = tf.train.Saver()\n # saver.save(sess, os.path.join(save_dir, 'model'))\n \n" ]
[ [ "tensorflow.Session", "tensorflow.Variable", "tensorflow.global_variables_initializer" ] ]
KTH-SML/geomtwo
[ "d529c1745eacd27aaca1dfd484c7b561e99f966b" ]
[ "nodes/pose_plotter.py" ]
[ "#!/usr/bin/env python\nimport rospy as rp\nimport geomtwo.msg as gms\nimport geomtwo.impl as gmi\nimport threading as thd\nimport matplotlib.pyplot as plt\n\n\nrp.init_node(name=\"pose_plotter\")\nFREQUENCY = 3e1\nRATE = rp.Rate(FREQUENCY)\nTIME_STEP = 1/FREQUENCY\n\nLOCK = thd.Lock()\n\npose = None\nartists = None\n\ndef callback(msg):\n global pose\n LOCK.acquire()\n pose = gmi.Pose(msg)\n LOCK.release()\n\nrp.Subscriber(name=\"pose\", data_class=gms.Pose, callback=callback)\n\n\n\nplt.ion()\nplt.figure()\nplt.grid(True)\nplt.axis([-5,5,-5,5])\n\nwhile not rp.is_shutdown():\n LOCK.acquire()\n if not pose is None:\n if not artists is None:\n for artist in artists:\n artist.remove()\n artists = pose.draw()\n pose = None\n LOCK.release()\n plt.draw()\n RATE.sleep()\n" ]
[ [ "matplotlib.pyplot.ion", "matplotlib.pyplot.grid", "matplotlib.pyplot.figure", "matplotlib.pyplot.draw", "matplotlib.pyplot.axis" ] ]
phillips96/similarity
[ "3794f288f17f47f1f90b5368e5c0eeac1e81e10d" ]
[ "tensorflow_similarity/models/similarity_model.py" ]
[ "# Copyright 2021 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import defaultdict\nfrom copy import copy\nfrom pathlib import Path\nfrom typing import DefaultDict, Dict, List, MutableMapping, MutableSequence\nfrom typing import Optional, Sequence, Union\nimport numpy as np\nfrom tabulate import tabulate\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Optimizer\nfrom tensorflow.keras.metrics import Metric\nfrom tensorflow.keras.losses import Loss\nfrom tqdm.auto import tqdm\n\nfrom tensorflow_similarity.classification_metrics import ClassificationMetric\nfrom tensorflow_similarity.classification_metrics import make_classification_metric # noqa\nfrom tensorflow_similarity.distances import Distance\nfrom tensorflow_similarity.distances import distance_canonicalizer\nfrom tensorflow_similarity.training_metrics import DistanceMetric\nfrom tensorflow_similarity.evaluators.evaluator import Evaluator\nfrom tensorflow_similarity.indexer import Indexer\nfrom tensorflow_similarity.losses import MetricLoss\nfrom tensorflow_similarity.matchers import ClassificationMatch\nfrom tensorflow_similarity.retrieval_metrics import RetrievalMetric\nfrom tensorflow_similarity.stores import Store\nfrom tensorflow_similarity.search import Search\nfrom tensorflow_similarity.types import FloatTensor, Lookup, IntTensor, Tensor\nfrom tensorflow_similarity.types import PandasDataFrame, CalibrationResults\n\n\[email protected]_keras_serializable(package=\"Similarity\")\nclass SimilarityModel(tf.keras.Model):\n \"\"\"Specialized Keras.Model which implement the core features needed for\n metric learning. In particular, `SimilarityModel()` supports indexing,\n searching and saving the embeddings predicted by the network.\n\n All Similarity models classes derive from this class to benefits from those\n core features.\n \"\"\"\n\n # @property\n # def _index(self):\n # if not hasattr(self, '_index'):\n # ValueError(\"Index doesn't exist: index data before quering it\")\n # return self._index\n\n # @index.setter\n # def _index(self, index):\n # self._index: Indexer = index\n\n def compile(\n self,\n optimizer: Union[Optimizer, str, Dict, List] = 'rmsprop', # noqa\n distance: Union[Distance, str] = 'auto',\n loss: Union[Loss, MetricLoss, str, Dict, List] = None,\n metrics: Union[Metric, DistanceMetric, str, Dict, List] = None,\n embedding_output: int = None,\n kv_store: Union[Store, str] = 'memory',\n search: Union[Search, str] = 'nmslib',\n evaluator: Union[Evaluator, str] = 'memory',\n stat_buffer_size: int = 1000,\n loss_weights: List = None,\n weighted_metrics: List = None,\n run_eagerly: bool = False,\n steps_per_execution: int = 1,\n **kwargs):\n \"\"\"Configures the model for training.\n\n Args:\n\n optimizer: String (name of optimizer) or optimizer instance. See\n [tf.keras.optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers).\n\n loss: String (name of objective function), objective function,\n any `tensorflow_similarity.loss.*` instance or a\n `tf.keras.losses.Loss` instance. See the [Losses\n documentation](../losses.md) for a list of metric learning\n specifics loss offered by TensorFlow Similairy and\n [tf.keras.losses](https://www.tensorflow.org/api_docs/python/tf/keras/losses)\n for the losses available directly in TensorFlow.\n\n\n metrics: List of metrics to be evaluated by the model during\n training and testing. Each of those can be a string,\n a function or a [tensorflow_similairty.metrics.*](../metrics.md)\n instance. Note that the metrics used for some type of\n metric-learning such as distance learning (e.g via triplet loss)\n have a different prototype than the metrics used in\n standard models and you can't use the `tf.keras.metrics` for those\n type of learning.\n\n Additionally many distance metrics are computed based of the\n [Indexer()](../indexer.md) performance. E.g Matching Top 1\n accuracy. For technical and performance reasons, indexing data at\n each training batch to compute those is impractical so\n those metrics are computed at epoch end via\n the [EvalCallback](../callbacks.md)\n\n See [Evaluation Metrics](../eval_metrics.md) for a list of\n available metrics.\n\n For multi-output models you can specify different metrics for\n different outputs by passing a dictionary, such as\n `metrics={'similarity': 'min_neg_gap', 'other': ['accuracy',\n 'mse']}`. You can also pass a list (len = len(outputs)) of lists\n of metrics such as `metrics=[['min_neg_gap'], ['accuracy', 'mse']]`\n or `metrics=['min_neg_gap', ['accuracy', 'mse']]`. For outputs\n which are not related to metrics learning, you can use any of the\n standard `tf.keras.metrics`.\n\n loss_weights: Optional list or dictionary specifying scalar\n coefficients (Python floats) to weight the loss contributions of\n different model outputs. The loss value that will be minimized\n by the model will then be the *weighted sum* of all individual\n losses, weighted by the `loss_weights` coefficients.\n If a list, it is expected to have a 1:1 mapping to the model's\n outputs. If a dict, it is expected to map output names (strings)\n to scalar coefficients.\n\n weighted_metrics: List of metrics to be evaluated and weighted by\n sample_weight or class_weight during training and testing.\n\n\n run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s\n logic will not be wrapped in a `tf.function`. Recommended to leave\n this as `None` unless your `Model` cannot be run inside a\n `tf.function`.\n\n steps_per_execution: Int. Defaults to 1. The number of batches to\n run during each `tf.function` call. Running multiple batches\n inside a single `tf.function` call can greatly improve performance\n on TPUs or small models with a large Python overhead.\n At most, one full epoch will be run each execution. If a number\n larger than the size of the epoch is passed, the execution will be\n truncated to the size of the epoch.\n Note that if `steps_per_execution` is set to `N`,\n `Callback.on_batch_begin` and `Callback.on_batch_end` methods will\n only be called every `N` batches (i.e. before/after each\n `tf.function` execution).\n\n Raises:\n ValueError: In case of invalid arguments for\n `optimizer`, `loss` or `metrics`.\n \"\"\"\n # Fetching the distance used from the first loss if auto\n if distance == 'auto':\n if isinstance(loss, list):\n metric_loss = loss[0]\n else:\n metric_loss = loss\n\n try:\n distance = metric_loss.distance\n except: # noqa\n msg = \"distance='auto' only works if the first loss is a\\\n metric loss\"\n\n raise ValueError(msg)\n print(\"Distance metric automatically set to\", distance,\n \"use the distance arg to override.\")\n else:\n distance = distance_canonicalizer(distance)\n\n # check if we we need to set the embedding head\n num_outputs = len(self.output_names)\n if embedding_output and embedding_output > num_outputs:\n raise Exception(\"Embedding_output value exceed number of model \"\n \"outputs\")\n\n if not embedding_output and num_outputs > 1:\n print(\"Embedding output set to be model output 0\",\n \"Use the embedding_output arg to override this\")\n embedding_output = 0\n\n # fetch embedding size as some ANN libs requires it for init\n if num_outputs > 1:\n self.embedding_size = self.outputs[embedding_output].shape[1]\n else:\n self.embedding_size = self.outputs[0].shape[1]\n\n # init index\n self._index = Indexer(embedding_size=self.embedding_size,\n distance=distance,\n search=search,\n kv_store=kv_store,\n evaluator=evaluator,\n embedding_output=embedding_output,\n stat_buffer_size=stat_buffer_size)\n\n # call underlying keras method\n super().compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics,\n loss_weights=loss_weights,\n weighted_metrics=weighted_metrics,\n run_eagerly=run_eagerly,\n steps_per_execution=steps_per_execution,\n **kwargs)\n\n def index(self,\n x: Tensor,\n y: IntTensor = None,\n data: Optional[Tensor] = None,\n build: bool = True,\n verbose: int = 1):\n \"\"\"Index data.\n\n Args:\n x: Samples to index.\n\n y: class ids associated with the data if any. Defaults to None.\n\n store_data: store the data associated with the samples in the key\n value store. Defaults to True.\n\n build: Rebuild the index after indexing. This is needed to make the\n new samples searchable. Set it to false to save processing time\n when calling indexing repeatidly without the need to search between\n the indexing requests. Defaults to True.\n\n verbose: Output indexing progress info. Defaults to 1.\n \"\"\"\n\n if not self._index:\n raise Exception('You need to compile the model with a valid'\n 'distance to be able to use the indexing')\n if verbose:\n print('[Indexing %d points]' % len(x))\n print('|-Computing embeddings')\n predictions = self.predict(x)\n self._index.batch_add(predictions=predictions,\n labels=y,\n data=data,\n build=build,\n verbose=verbose)\n\n def index_single(self,\n x: Tensor,\n y: IntTensor = None,\n data: Optional[Tensor] = None,\n build: bool = True,\n verbose: int = 1):\n \"\"\"Index data.\n\n Args:\n x: Sample to index.\n\n y: class id associated with the data if any. Defaults to None.\n\n data: store the data associated with the samples in the key\n value store. Defaults to None.\n\n build: Rebuild the index after indexing. This is needed to make the\n new samples searchable. Set it to false to save processing time\n when calling indexing repeatidly without the need to search between\n the indexing requests. Defaults to True.\n\n verbose: Output indexing progress info. Defaults to 1.\n \"\"\"\n\n if not self._index:\n raise Exception('You need to compile the model with a valid'\n 'distance to be able to use the indexing')\n if verbose:\n print('[Indexing 1 point]')\n print('|-Computing embeddings')\n\n x = tf.expand_dims(x, axis=0)\n prediction = self.predict(x)\n self._index.add(prediction=prediction,\n label=y,\n data=data,\n build=build,\n verbose=verbose)\n\n def lookup(self,\n x: Tensor,\n k: int = 5,\n verbose: int = 1) -> List[List[Lookup]]:\n \"\"\"Find the k closest matches in the index for a set of samples.\n\n Args:\n x: Samples to match.\n\n k: Number of nearest neighboors to lookup. Defaults to 5.\n\n verbose: display progress. Default to 1.\n\n Returns\n list of list of k nearest neighboors:\n List[List[Lookup]]\n \"\"\"\n predictions = self.predict(x)\n return self._index.batch_lookup(predictions=predictions,\n k=k,\n verbose=verbose)\n\n def single_lookup(self, x: Tensor, k: int = 5) -> List[Lookup]:\n \"\"\"Find the k closest matches in the index for a given sample.\n\n Args:\n x: Sample to match.\n\n k: Number of nearest neighboors to lookup. Defaults to 5.\n\n Returns\n list of the k nearest neigboors info:\n List[Lookup]\n \"\"\"\n x = tf.expand_dims(x, axis=0)\n prediction = self.predict(x)\n return self._index.single_lookup(prediction=prediction, k=k)\n\n def index_summary(self):\n \"Display index info summary.\"\n self._index.print_stats()\n\n def calibrate(\n self,\n x: FloatTensor,\n y: IntTensor,\n thresholds_targets: MutableMapping[str, float] = {},\n k: int = 1,\n calibration_metric: Union[str, ClassificationMetric] = \"f1\",\n matcher: Union[str, ClassificationMatch] = 'match_nearest',\n extra_metrics: MutableSequence[Union[str, ClassificationMetric]] = [\n 'precision', 'recall'\n ], # noqa\n rounding: int = 2,\n verbose: int = 1) -> CalibrationResults:\n \"\"\"Calibrate model thresholds using a test dataset.\n\n TODO: more detailed explaination.\n\n Args:\n\n x: examples to use for the calibration.\n\n y: labels associated with the calibration examples.\n\n thresholds_targets: Dict of performance targets to (if possible)\n meet with respect to the `calibration_metric`.\n\n calibration_metric:\n [ClassificationMetric()](classification_metrics/overview.md) used\n to evaluate the performance of the index.\n\n k: How many neighboors to use during the calibration.\n Defaults to 1.\n\n matcher: {'match_nearest', 'match_majority_vote'} or\n ClassificationMatch object. Defines the classification matching,\n e.g., match_nearest will count a True Positive if the query_label\n is equal to the label of the nearest neighbor and the distance is\n less than or equal to the distance threshold. Defaults to\n 'match_nearest'.\n\n extra_metrics: List of additional\n `tf.similarity.classification_metrics.ClassificationMetric()`\n to compute and report. Defaults to ['precision', 'recall'].\n\n\n rounding: Metric rounding. Default to 2 digits.\n\n verbose: Be verbose and display calibration results.\n Defaults to 1.\n\n Returns:\n CalibrationResults containing the thresholds and cutpoints Dicts.\n \"\"\"\n\n # predict\n predictions = self.predict(x)\n\n # calibrate\n return self._index.calibrate(predictions=predictions,\n target_labels=y,\n thresholds_targets=thresholds_targets,\n k=k,\n calibration_metric=calibration_metric,\n matcher=matcher,\n extra_metrics=extra_metrics,\n rounding=rounding,\n verbose=verbose)\n\n def match(self,\n x: FloatTensor,\n cutpoint='optimal',\n no_match_label=-1,\n k=1,\n matcher: Union[str, ClassificationMatch] = 'match_nearest',\n verbose=0):\n \"\"\"Match a set of examples against the calibrated index\n\n For the match function to work, the index must be calibrated using\n calibrate().\n\n Args:\n x: Batch of examples to be matched against the index.\n\n cutpoint: Which calibration threshold to use.\n Defaults to 'optimal' which is the optimal F1 threshold computed\n using calibrate().\n\n no_match_label: Which label value to assign when there is no\n match. Defaults to -1.\n\n k: How many neighboors to use during the calibration.\n Defaults to 1.\n\n matcher: {'match_nearest', 'match_majority_vote'} or\n ClassificationMatch object. Defines the classification matching,\n e.g., match_nearest will count a True Positive if the query_label\n is equal to the label of the nearest neighbor and the distance is\n less than or equal to the distance threshold.\n\n verbose. Be verbose. Defaults to 0.\n\n Returns:\n List of class ids that matches for each supplied example\n\n Notes:\n This function matches all the cutpoints at once internally as there\n is little performance downside to do so and allows to do the\n evaluation in a single go.\n\n \"\"\"\n # basic checks\n if not self._index.is_calibrated:\n raise ValueError('Uncalibrated model: run model.calibration()')\n\n # get predictions\n predictions = self.predict(x)\n\n # matching\n matches = self._index.match(predictions,\n no_match_label=no_match_label,\n k=k,\n matcher=matcher,\n verbose=verbose)\n\n # select which matches to return\n if cutpoint == 'all': # returns all the cutpoints for eval purpose.\n return matches\n else: # normal match behavior - returns a specific cut point\n return matches[cutpoint]\n\n def evaluate_retrieval(\n self,\n x: Tensor,\n y: IntTensor,\n retrieval_metrics: Sequence[RetrievalMetric], # noqa\n verbose: int = 1) -> Dict[str, np.ndarray]:\n \"\"\"Evaluate the quality of the index against a test dataset.\n\n Args:\n x: Examples to be matched against the index.\n\n y: Label associated with the examples supplied.\n\n retrieval_metrics: List of\n [RetrievalMetric()](retrieval_metrics/overview.md) to compute.\n\n verbose (int, optional): Display results if set to 1 otherwise\n results are returned silently. Defaults to 1.\n\n Returns:\n Dictionary of metric results where keys are the metric names and\n values are the metrics values.\n \"\"\"\n # get embeddings\n if verbose:\n print(\"|-Computing embeddings\")\n predictions = self.predict(x)\n\n if verbose:\n print(\"|-Computing retrieval metrics\")\n\n results = self._index.evaluate_retrieval(\n predictions=predictions,\n target_labels=y,\n retrieval_metrics=retrieval_metrics,\n verbose=verbose,\n )\n\n if verbose:\n table = zip(results.keys(), results.values())\n headers = ['metric', 'Value']\n print('\\n [Summary]\\n')\n print(tabulate(table, headers=headers))\n\n return results\n\n def evaluate_classification(\n self,\n x: Tensor,\n y: IntTensor,\n k: int = 1,\n extra_metrics: MutableSequence[Union[str, ClassificationMetric]] = [\n 'precision', 'recall'\n ], # noqa\n matcher: Union[str, ClassificationMatch] = 'match_nearest',\n verbose: int = 1\n ) -> DefaultDict[str, Dict[str, Union[str, np.ndarray]]]:\n \"\"\"Evaluate model classification matching on a given evaluation dataset.\n\n Args:\n x: Examples to be matched against the index.\n\n y: Label associated with the examples supplied.\n\n k: How many neighbors to use to perform the evaluation.\n Defaults to 1.\n\n extra_metrics: List of additional\n `tf.similarity.classification_metrics.ClassificationMetric()` to\n compute and report. Defaults to ['precision', 'recall'].\n\n matcher: {'match_nearest', 'match_majority_vote'} or\n ClassificationMatch object. Defines the classification matching,\n e.g., match_nearest will count a True Positive if the query_label\n is equal to the label of the nearest neighbor and the distance is\n less than or equal to the distance threshold.\n\n verbose (int, optional): Display results if set to 1 otherwise\n results are returned silently. Defaults to 1.\n\n Returns:\n Dictionary of (distance_metrics.md)[evaluation metrics]\n \"\"\"\n # There is some code duplication in this function but that is the best\n # solution to keep the end-user API clean and doing inferences once.\n\n if not self._index.is_calibrated:\n raise ValueError('Uncalibrated model: run model.calibration()')\n cal_metric = self._index.get_calibration_metric()\n\n # get embeddings\n if verbose:\n print(\"|-Computing embeddings\")\n predictions = self.predict(x)\n\n results: DefaultDict[str, Dict[str,\n Union[str,\n np.ndarray]]] = (defaultdict(dict))\n\n if verbose:\n pb = tqdm(total=len(self._index.cutpoints),\n desc='Evaluating cutpoints')\n\n for cp_name, cp_data in self._index.cutpoints.items():\n # create a metric that match at the requested k and threshold\n distance_threshold = float(cp_data['distance'])\n metric = make_classification_metric(cal_metric.name)\n metrics = copy(extra_metrics)\n metrics.append(metric)\n\n res: Dict[str, Union[str, np.ndarray]] = {}\n res.update(\n self._index.evaluate_classification(predictions,\n y, [distance_threshold],\n metrics=metrics,\n matcher=matcher,\n k=k))\n res['distance'] = tf.constant([distance_threshold])\n res['name'] = cp_name\n results[cp_name] = res\n if verbose:\n pb.update()\n\n if verbose:\n pb.close()\n\n if verbose:\n headers = ['name', cal_metric.name]\n for i in results['optimal'].keys():\n if i not in headers:\n headers.append(str(i))\n rows = []\n for data in results.values():\n rows.append([data[v] for v in headers])\n print('\\n [Summary]\\n')\n print(tabulate(rows, headers=headers))\n\n return results\n\n def reset_index(self):\n \"Reinitialize the index\"\n self._index.reset()\n\n def index_size(self) -> int:\n \"Return the index size\"\n return self._index.size()\n\n def load_index(self, filepath: str):\n \"\"\"Load Index data from a checkpoint and initialize underlying\n structure with the reloaded data.\n\n Args:\n path: Directory where the checkpoint is located.\n verbose: Be verbose. Defaults to 1.\n \"\"\"\n\n index_path = Path(filepath) / \"index\"\n self._index = Indexer.load(index_path)\n\n def save_index(self, filepath, compression=True):\n \"\"\"Save the index to disk\n\n Args:\n path: directory where to save the index\n compression: Store index data compressed. Defaults to True.\n \"\"\"\n index_path = Path(filepath) / \"index\"\n self._index.save(index_path, compression=compression)\n\n def save(self,\n filepath: str,\n save_index: bool = True,\n compression: bool = True,\n overwrite: bool = True,\n include_optimizer: bool = True,\n signatures=None,\n options=None,\n save_traces: bool = True):\n \"\"\"Save the model and the index.\n\n Args:\n filepath: where to save the model.\n\n save_index: Save the index content. Defaults to True.\n\n compression: Compress index data. Defaults to True.\n\n overwrite: Overwrite previous model. Defaults to True.\n\n include_optimizer: Save optimizer state. Defaults to True.\n\n signatures: Signatures to save with the model. Defaults to None.\n\n options: A `tf.saved_model.SaveOptions` to save with the model.\n Defaults to None.\n\n save_traces (optional): When enabled, the SavedModel will\n store the function traces for each layer. This can be disabled,\n so that only the configs of each layer are stored.\n Defaults to True. Disabling this will decrease serialization\n time and reduce file size, but it requires that all\n custom layers/models implement a get_config() method.\n \"\"\"\n\n # save trace doesn't exist prior to 2.4 -- asking for it but not\n # using it\n\n # call underlying keras method to save the mode graph and its weights\n tf.keras.models.save_model(self,\n filepath,\n overwrite=overwrite,\n include_optimizer=include_optimizer,\n signatures=signatures,\n options=options,\n save_traces=save_traces)\n if hasattr(self, '_index') and self._index and save_index:\n self.save_index(filepath, compression=compression)\n else:\n print('Index not saved as save_index=False')\n\n def to_data_frame(self, num_items: int = 0) -> PandasDataFrame:\n \"\"\"Export data as pandas dataframe\n\n Args:\n num_items (int, optional): Num items to export to the dataframe.\n Defaults to 0 (unlimited).\n\n Returns:\n pd.DataFrame: a pandas dataframe.\n \"\"\"\n return self._index.to_data_frame(num_items=num_items)\n\n # We don't need from_config as the index is reloaded separatly.\n # this is kept as a reminder that it was looked into and decided to split\n # the index reloading instead of overloading this method.\n # @classmethod\n # def from_config(cls, config):\n # return super().from_config(**config)\n" ]
[ [ "tensorflow.constant", "tensorflow.keras.models.save_model", "tensorflow.expand_dims", "tensorflow.keras.utils.register_keras_serializable" ] ]
zheng-ningxin/Prune-Ratio-Sensitivity
[ "b7c336dfd2cc1cec8b9cff7dce9bdc308bdeb2e5" ]
[ "src/filter_pruner.py" ]
[ "#!/bin/env python\nimport torch\nimport torch.nn as nn\n\n\nclass filter_pruner:\n def __init__(self, layer):\n self.layer = layer\n\n def cal_mask_l1(self, ratio):\n filters = self.layer.weight.shape[0]\n w_abs = self.layer.ori_weight.abs()\n w_sum = w_abs.view(filters, -1).sum(1)\n count = filters - int(filters * ratio)\n threshold = torch.topk(w_sum.view(-1), count, largest=False)[0].max()\n mask_weight = torch.gt(w_sum, threshold)[:, None, None, None].expand_as(layer.weight).type_as(layer.w_mask).detach()\n mask_bias = torch.gt(w_sum, threshold).type_as(layer.bias).detach() if hasattr(layer, 'bias') else None\n return mask_weight, mask_bias\n\n def cal_mask_l2(self, percentage):\n pass" ]
[ [ "torch.gt" ] ]
behnam-samadi/Fast_FPS
[ "6e522bfc5c3d53a1c200569f28b5df0065872abe" ]
[ "models/pointnet_util_current.py" ]
[ "\"\"\"\nAuthor: Benny\nDate: Nov 2019\n\"\"\"\nfrom data_utils.ModelNetDataLoader import ModelNetDataLoader\nimport argparse\nimport numpy as np\nimport os\nimport torch\nimport logging\nfrom tqdm import tqdm\nimport sys\nimport importlib\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\n\n\ndef parse_args():\n '''PARAMETERS'''\n parser = argparse.ArgumentParser('PointNet')\n parser.add_argument('--batch_size', type=int, default=24, help='batch size in training')\n parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')\n parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\n parser.add_argument('--log_dir', type=str, default='pointnet2_ssg_normal', help='Experiment root')\n parser.add_argument('--normal', action='store_true', default=True, help='Whether to use normal information [default: False]')\n parser.add_argument('--num_votes', type=int, default=3, help='Aggregate classification scores with voting [default: 3]')\n return parser.parse_args()\n\ndef test(model, loader, num_class=40, vote_num=1):\n mean_correct = []\n class_acc = np.zeros((num_class,3))\n for j, data in tqdm(enumerate(loader), total=len(loader)):\n points, target = data\n target = target[:, 0]\n points = points.transpose(2, 1)\n points, target = points.cuda(), target.cuda()\n classifier = model.eval()\n vote_pool = torch.zeros(target.size()[0],num_class).cuda()\n for _ in range(vote_num):\n pred, _ = classifier(points)\n vote_pool += pred\n pred = vote_pool/vote_num\n pred_choice = pred.data.max(1)[1]\n for cat in np.unique(target.cpu()):\n classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum()\n class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0])\n class_acc[cat,1]+=1\n correct = pred_choice.eq(target.long().data).cpu().sum()\n mean_correct.append(correct.item()/float(points.size()[0]))\n class_acc[:,2] = class_acc[:,0]/ class_acc[:,1]\n class_acc = np.mean(class_acc[:,2])\n instance_acc = np.mean(mean_correct)\n return instance_acc, class_acc\n\n\ndef main(args):\n def log_string(str):\n logger.info(str)\n print(str)\n\n '''HYPER PARAMETER'''\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n '''CREATE DIR'''\n experiment_dir = 'log/classification/' + args.log_dir\n\n '''LOG'''\n args = parse_args()\n logger = logging.getLogger(\"Model\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n log_string('PARAMETER ...')\n log_string(args)\n\n '''DATA LOADING'''\n log_string('Load dataset ...')\n DATA_PATH = 'data/modelnet40_normal_resampled/'\n TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)\n testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)\n\n '''MODEL LOADING'''\n num_class = 40\n model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0]\n MODEL = importlib.import_module(model_name)\n\n classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()\n\n checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')\n classifier.load_state_dict(checkpoint['model_state_dict'])\n\n with torch.no_grad():\n instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes)\n log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))\n\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom time import time\nimport numpy as np\nimport time as Time\ndef timeit(tag, t):\n print(\"{}: {}s\".format(tag, time() - t))\n return time()\n\ndef pc_normalize(pc):\n l = pc.shape[0]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.darthest_pointmax(np.sqrt(np.sum(pc**2, axis=1)))\n pc = pc / m\n return pc\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n B, N, _ = src.shape\n _, M, _ = dst.shape\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\n return dist\n\n\ndef index_points(points, idx):\n \"\"\"\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n \n\ndef binary_search(sorted_points, origin, margin):\n left = min(origin, margin)\n right = max(origin, margin)\n expected = (float(sorted_points[left]) + float(sorted_points[right]))/2\n center = int((left+right)/2)\n while(left < right):\n center = int((left+right)/2)\n if sorted_points[center] == expected:\n break\n if sorted_points[center] < expected:\n left = center+1\n else:\n right = center-1\n return center\n\n\n\n\n\ndef farthest_point_sample(xyz, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, 3]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud index, [B, npoint]\n \"\"\"\n \n device = xyz.device\n B,N,C = xyz.shape\n xyz = torch.squeeze(xyz)\n \n xyz = (xyz + 1)*100\n xyz = xyz.int()\n Start_FPS = Time.time()\n sum_dims = torch.sum(xyz, -1)\n sorted_points , indices = torch.sort(sum_dims, -1)\n sorted_points = sorted_points.numpy()\n indices = indices.numpy()\n initial_point = np.random.randint(1, N-2)\n centroids = []\n scores = np.zeros(N)\n centroids.append(initial_point)\n candidates = set()\n candidates.add(0)\n candidates.add(N-1)\n scores[0] = np.abs(sorted_points[initial_point]- sorted_points[0])\n scores[-1] = np.abs(sorted_points[initial_point]- sorted_points[-1])\n for i in range(npoint - 1):\n selected_centroid = np.argmax(scores)\n scores[selected_centroid] = 0\n centroids.append(selected_centroid)\n centroids = sorted(centroids)\n candidates.remove(selected_centroid)\n index = np.where(centroids==selected_centroid)[0][0]\n if not index == (len(centroids)-1):\n candidate_right = indices[binary_search(sorted_points,selected_centroid, centroids[index+1])]\n if candidate_right in centroids:\n R = candidate_right\n while R in centroids and (R<N-1):\n R+=1\n L = candidate_right\n while L in centroids and (L>0):\n L-=1\n L_score = abs(sorted_points[centroids[index+1]] - sorted_points[L])\n R_score = abs(sorted_points[selected_centroid] - sorted_points[R])\n if L_score > R_score or R==N-1:\n candidate_right = L\n if R_score > L_score or L==0:\n candidate_right = R\n candidates.add(candidate_right)\n middle_score = sorted_points[candidate_right]\n next_score = sorted_points[centroids[index+1]]\n past_score = sorted_points[selected_centroid]\n this_score = max(abs(next_score - middle_score), abs(middle_score-past_score))\n scores[candidate_right] = this_score\n if not index == 0:\n candidate_left = indices[binary_search(sorted_points, selected_centroid, centroids[index-1])]\n if candidate_left in centroids:\n R = candidate_left\n while (R in centroids) and (R < N-1):\n R+=1\n L = candidate_left\n while (L in centroids) and (L>0):\n L-=1\n L_score = abs(sorted_points[selected_centroid] - sorted_points[L])\n R_score = abs(sorted_points[centroids[index-1]] - sorted_points[R])\n if L_score > R_score or R==N-1:\n candidate_left = L\n if R_score > L_score or L==0:\n candidate_left = R_score\n \n middle_score = sorted_points[candidate_left]\n next_score = sorted_points[selected_centroid]\n past_score = sorted_points[centroids[index-1]]\n this_score = max(abs(next_score - middle_score), abs(middle_score-past_score))\n candidates.add(candidate_left)\n scores[candidate_left] = this_score\n final_centroids = []\n for item in centroids:\n final_centroids.append(indices[item])\n final_centroids = np.array(final_centroids)\n centroids = np.expand_dims(centroids, axis=0)\n FPS_Time = Time.time() - Start_FPS\n with open(\"FPS_Whole.txt\", 'a') as f:\n f.write(str(FPS_Time)+\"\\n\")\n return torch.tensor(centroids)\n\n\n\ndef query_ball_point(radius, nsample, xyz, new_xyz):\n start = Time.time()\n \"\"\"\n Input:\n radius: local region radius\n nsample: max sample number in local region\n xyz: all points, [B, N, 3]\n new_xyz: query points, [B, S, 3]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n _, S, _ = new_xyz.shape\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])\n #print(group_idx)\n #print(group_idx.shape)\n #print(\"group_idx\")\n #raise(False)\n sqrdists = square_distance(new_xyz, xyz)\n group_idx[sqrdists > radius ** 2] = N\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\n mask = group_idx == N\n group_idx[mask] = group_first[mask]\n with open(\"query_ball_point.txt\", 'a') as f:\n f.write(str(Time.time() - start)+\"\\n\")\n #print(\"query ball point\", Time.time() - start, \"\\n\")\n return group_idx\n\n\ndef sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):\n \"\"\"\n Input:\n npoint:\n radius:\n nsample:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, N, D]\n Return:\n new_xyz: sampled points position data, [B, npoint, nsample, 3]\n new_points: sampled points data, [B, npoint, nsample, 3+D]\n \"\"\"\n B, N, C = xyz.shape\n S = npoint\n fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]\n torch.cuda.empty_cache()\n new_xyz = index_points(xyz, fps_idx)\n torch.cuda.empty_cache()\n idx = query_ball_point(radius, nsample, xyz, new_xyz)\n torch.cuda.empty_cache()\n grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]\n torch.cuda.empty_cache()\n grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)\n torch.cuda.empty_cache()\n\n if points is not None:\n grouped_points = index_points(points, idx)\n new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]\n else:\n new_points = grouped_xyz_norm\n if returnfps:\n return new_xyz, new_points, grouped_xyz, fps_idx\n else:\n return new_xyz, new_points\n\n\ndef sample_and_group_all(xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, N, D]\n Return:\n new_xyz: sampled points position data, [B, 1, 3]\n new_points: sampled points data, [B, 1, N, 3+D]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n new_xyz = torch.zeros(B, 1, C).to(device)\n grouped_xyz = xyz.view(B, 1, N, C)\n if points is not None:\n new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)\n else:\n new_points = grouped_xyz\n return new_xyz, new_points\n\n\nclass PointNetSetAbstraction(nn.Module):\n def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):\n super(PointNetSetAbstraction, self).__init__()\n self.npoint = npoint\n self.radius = radius\n self.nsample = nsample\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n self.group_all = group_all\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, C, N]\n points: input points data, [B, D, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n xyz = xyz.permute(0, 2, 1)\n if points is not None:\n points = points.permute(0, 2, 1)\n\n if self.group_all:\n new_xyz, new_points = sample_and_group_all(xyz, points)\n else:\n new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)\n # new_xyz: sampled points position data, [B, npoint, C]\n # new_points: sampled points data, [B, npoint, nsample, C+D]\n new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n\n new_points = torch.max(new_points, 2)[0]\n new_xyz = new_xyz.permute(0, 2, 1)\n return new_xyz, new_points\n\n\nclass PointNetSetAbstractionMsg(nn.Module):\n def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):\n super(PointNetSetAbstractionMsg, self).__init__()\n self.npoint = npoint\n self.radius_list = radius_list\n self.nsample_list = nsample_list\n self.conv_blocks = nn.ModuleList()\n self.bn_blocks = nn.ModuleList()\n for i in range(len(mlp_list)):\n convs = nn.ModuleList()\n bns = nn.ModuleList()\n last_channel = in_channel + 3\n for out_channel in mlp_list[i]:\n convs.append(nn.Conv2d(last_channel, out_channel, 1))\n bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n self.conv_blocks.append(convs)\n self.bn_blocks.append(bns)\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, C, N]\n points: input points data, [B, D, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n xyz = xyz.permute(0, 2, 1)\n if points is not None:\n points = points.permute(0, 2, 1)\n\n B, N, C = xyz.shape\n S = self.npoint\n new_xyz = index_points(xyz, farthest_point_sample(xyz, S))\n new_points_list = []\n for i, radius in enumerate(self.radius_list):\n K = self.nsample_list[i]\n group_idx = query_ball_point(radius, K, xyz, new_xyz)\n grouped_xyz = index_points(xyz, group_idx)\n grouped_xyz -= new_xyz.view(B, S, 1, C)\n if points is not None:\n grouped_points = index_points(points, group_idx)\n grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)\n else:\n grouped_points = grouped_xyz\n\n grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]\n for j in range(len(self.conv_blocks[i])):\n conv = self.conv_blocks[i][j]\n bn = self.bn_blocks[i][j]\n grouped_points = F.relu(bn(conv(grouped_points)))\n new_points = torch.max(grouped_points, 2)[0] # [B, D', S]\n new_points_list.append(new_points)\n\n new_xyz = new_xyz.permute(0, 2, 1)\n new_points_concat = torch.cat(new_points_list, dim=1)\n return new_xyz, new_points_concat\n\n\nclass PointNetFeaturePropagation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropagation, self).__init__()\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\n last_channel = out_channel\n\n def forward(self, xyz1, xyz2, points1, points2):\n \"\"\"\n Input:\n xyz1: input points position data, [B, C, N]\n xyz2: sampled input points position data, [B, C, S]\n points1: input points data, [B, D, N]\n points2: input points data, [B, D, S]\n Return:\n new_points: upsampled points data, [B, D', N]\n \"\"\"\n xyz1 = xyz1.permute(0, 2, 1)\n xyz2 = xyz2.permute(0, 2, 1)\n\n points2 = points2.permute(0, 2, 1)\n B, N, C = xyz1.shape\n _, S, _ = xyz2.shape\n\n if S == 1:\n interpolated_points = points2.repeat(1, N, 1)\n else:\n dists = square_distance(xyz1, xyz2)\n dists, idx = dists.sort(dim=-1)\n dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]\n\n dist_recip = 1.0 / (dists + 1e-8)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)\n\n if points1 is not None:\n points1 = points1.permute(0, 2, 1)\n new_points = torch.cat([points1, interpolated_points], dim=-1)\n else:\n new_points = interpolated_points\n\n new_points = new_points.permute(0, 2, 1)\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n return new_points\n\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.BatchNorm2d", "numpy.mean", "numpy.where", "torch.squeeze", "torch.sum", "torch.nn.Conv1d", "numpy.random.randint", "torch.utils.data.DataLoader", "torch.tensor", "numpy.argmax", "numpy.expand_dims", "torch.zeros", "numpy.array", "numpy.zeros", "torch.max", "torch.cuda.empty_cache", "torch.nn.Conv2d", "torch.sort", "torch.arange", "numpy.sum", "torch.no_grad", "torch.nn.BatchNorm1d", "numpy.abs" ] ]
fathomer/pandas
[ "5baccd46441e94fca4b40f25da70a563f642fae3" ]
[ "pandas/plotting/_matplotlib/tools.py" ]
[ "# being a bit too dynamic\nfrom __future__ import annotations\n\nfrom math import ceil\nfrom typing import (\n TYPE_CHECKING,\n Iterable,\n List,\n Sequence,\n Tuple,\n Union,\n)\nimport warnings\n\nimport matplotlib.table\nimport matplotlib.ticker as ticker\nimport numpy as np\n\nfrom pandas._typing import FrameOrSeriesUnion\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndex,\n ABCSeries,\n)\n\nfrom pandas.plotting._matplotlib import compat\n\nif TYPE_CHECKING:\n from matplotlib.axes import Axes\n from matplotlib.axis import Axis\n from matplotlib.figure import Figure\n from matplotlib.lines import Line2D\n from matplotlib.table import Table\n\n\ndef do_adjust_figure(fig: Figure):\n \"\"\"Whether fig has constrained_layout enabled.\"\"\"\n if not hasattr(fig, \"get_constrained_layout\"):\n return False\n return not fig.get_constrained_layout()\n\n\ndef maybe_adjust_figure(fig: Figure, *args, **kwargs):\n \"\"\"Call fig.subplots_adjust unless fig has constrained_layout enabled.\"\"\"\n if do_adjust_figure(fig):\n fig.subplots_adjust(*args, **kwargs)\n\n\ndef format_date_labels(ax: Axes, rot):\n # mini version of autofmt_xdate\n for label in ax.get_xticklabels():\n label.set_ha(\"right\")\n label.set_rotation(rot)\n fig = ax.get_figure()\n maybe_adjust_figure(fig, bottom=0.2)\n\n\ndef table(\n ax, data: FrameOrSeriesUnion, rowLabels=None, colLabels=None, **kwargs\n) -> Table:\n if isinstance(data, ABCSeries):\n data = data.to_frame()\n elif isinstance(data, ABCDataFrame):\n pass\n else:\n raise ValueError(\"Input data must be DataFrame or Series\")\n\n if rowLabels is None:\n rowLabels = data.index\n\n if colLabels is None:\n colLabels = data.columns\n\n cellText = data.values\n\n table = matplotlib.table.table(\n ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs\n )\n return table\n\n\ndef _get_layout(nplots: int, layout=None, layout_type: str = \"box\") -> Tuple[int, int]:\n if layout is not None:\n if not isinstance(layout, (tuple, list)) or len(layout) != 2:\n raise ValueError(\"Layout must be a tuple of (rows, columns)\")\n\n nrows, ncols = layout\n\n if nrows == -1 and ncols > 0:\n layout = nrows, ncols = (ceil(nplots / ncols), ncols)\n elif ncols == -1 and nrows > 0:\n layout = nrows, ncols = (nrows, ceil(nplots / nrows))\n elif ncols <= 0 and nrows <= 0:\n msg = \"At least one dimension of layout must be positive\"\n raise ValueError(msg)\n\n if nrows * ncols < nplots:\n raise ValueError(\n f\"Layout of {nrows}x{ncols} must be larger than required size {nplots}\"\n )\n\n return layout\n\n if layout_type == \"single\":\n return (1, 1)\n elif layout_type == \"horizontal\":\n return (1, nplots)\n elif layout_type == \"vertical\":\n return (nplots, 1)\n\n layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}\n try:\n return layouts[nplots]\n except KeyError:\n k = 1\n while k ** 2 < nplots:\n k += 1\n\n if (k - 1) * k >= nplots:\n return k, (k - 1)\n else:\n return k, k\n\n\n# copied from matplotlib/pyplot.py and modified for pandas.plotting\n\n\ndef create_subplots(\n naxes: int,\n sharex: bool = False,\n sharey: bool = False,\n squeeze: bool = True,\n subplot_kw=None,\n ax=None,\n layout=None,\n layout_type: str = \"box\",\n **fig_kw,\n):\n \"\"\"\n Create a figure with a set of subplots already made.\n\n This utility wrapper makes it convenient to create common layouts of\n subplots, including the enclosing figure object, in a single call.\n\n Parameters\n ----------\n naxes : int\n Number of required axes. Exceeded axes are set invisible. Default is\n nrows * ncols.\n\n sharex : bool\n If True, the X axis will be shared amongst all subplots.\n\n sharey : bool\n If True, the Y axis will be shared amongst all subplots.\n\n squeeze : bool\n\n If True, extra dimensions are squeezed out from the returned axis object:\n - if only one subplot is constructed (nrows=ncols=1), the resulting\n single Axis object is returned as a scalar.\n - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object\n array of Axis objects are returned as numpy 1-d arrays.\n - for NxM subplots with N>1 and M>1 are returned as a 2d array.\n\n If False, no squeezing is done: the returned axis object is always\n a 2-d array containing Axis instances, even if it ends up being 1x1.\n\n subplot_kw : dict\n Dict with keywords passed to the add_subplot() call used to create each\n subplots.\n\n ax : Matplotlib axis object, optional\n\n layout : tuple\n Number of rows and columns of the subplot grid.\n If not specified, calculated from naxes and layout_type\n\n layout_type : {'box', 'horizontal', 'vertical'}, default 'box'\n Specify how to layout the subplot grid.\n\n fig_kw : Other keyword arguments to be passed to the figure() call.\n Note that all keywords not recognized above will be\n automatically included here.\n\n Returns\n -------\n fig, ax : tuple\n - fig is the Matplotlib Figure object\n - ax can be either a single axis object or an array of axis objects if\n more than one subplot was created. The dimensions of the resulting array\n can be controlled with the squeeze keyword, see above.\n\n Examples\n --------\n x = np.linspace(0, 2*np.pi, 400)\n y = np.sin(x**2)\n\n # Just a figure and one subplot\n f, ax = plt.subplots()\n ax.plot(x, y)\n ax.set_title('Simple plot')\n\n # Two subplots, unpack the output array immediately\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n ax1.plot(x, y)\n ax1.set_title('Sharing Y axis')\n ax2.scatter(x, y)\n\n # Four polar axes\n plt.subplots(2, 2, subplot_kw=dict(polar=True))\n \"\"\"\n import matplotlib.pyplot as plt\n\n if subplot_kw is None:\n subplot_kw = {}\n\n if ax is None:\n fig = plt.figure(**fig_kw)\n else:\n if is_list_like(ax):\n if squeeze:\n ax = flatten_axes(ax)\n if layout is not None:\n warnings.warn(\n \"When passing multiple axes, layout keyword is ignored\", UserWarning\n )\n if sharex or sharey:\n warnings.warn(\n \"When passing multiple axes, sharex and sharey \"\n \"are ignored. These settings must be specified when creating axes\",\n UserWarning,\n stacklevel=4,\n )\n if ax.size == naxes:\n fig = ax.flat[0].get_figure()\n return fig, ax\n else:\n raise ValueError(\n f\"The number of passed axes must be {naxes}, the \"\n \"same as the output plot\"\n )\n\n fig = ax.get_figure()\n # if ax is passed and a number of subplots is 1, return ax as it is\n if naxes == 1:\n if squeeze:\n return fig, ax\n else:\n return fig, flatten_axes(ax)\n else:\n warnings.warn(\n \"To output multiple subplots, the figure containing \"\n \"the passed axes is being cleared\",\n UserWarning,\n stacklevel=4,\n )\n fig.clear()\n\n nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)\n nplots = nrows * ncols\n\n # Create empty object array to hold all axes. It's easiest to make it 1-d\n # so we can just append subplots upon creation, and then\n axarr = np.empty(nplots, dtype=object)\n\n # Create first subplot separately, so we can share it if requested\n ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)\n\n if sharex:\n subplot_kw[\"sharex\"] = ax0\n if sharey:\n subplot_kw[\"sharey\"] = ax0\n axarr[0] = ax0\n\n # Note off-by-one counting because add_subplot uses the MATLAB 1-based\n # convention.\n for i in range(1, nplots):\n kwds = subplot_kw.copy()\n # Set sharex and sharey to None for blank/dummy axes, these can\n # interfere with proper axis limits on the visible axes if\n # they share axes e.g. issue #7528\n if i >= naxes:\n kwds[\"sharex\"] = None\n kwds[\"sharey\"] = None\n ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)\n axarr[i] = ax\n\n if naxes != nplots:\n for ax in axarr[naxes:]:\n ax.set_visible(False)\n\n handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)\n\n if squeeze:\n # Reshape the array to have the final desired dimension (nrow,ncol),\n # though discarding unneeded dimensions that equal 1. If we only have\n # one subplot, just return it instead of a 1-element array.\n if nplots == 1:\n axes = axarr[0]\n else:\n axes = axarr.reshape(nrows, ncols).squeeze()\n else:\n # returned axis array will be always 2-d, even if nrows=ncols=1\n axes = axarr.reshape(nrows, ncols)\n\n return fig, axes\n\n\ndef _remove_labels_from_axis(axis: Axis):\n for t in axis.get_majorticklabels():\n t.set_visible(False)\n\n # set_visible will not be effective if\n # minor axis has NullLocator and NullFormatter (default)\n if isinstance(axis.get_minor_locator(), ticker.NullLocator):\n axis.set_minor_locator(ticker.AutoLocator())\n if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):\n axis.set_minor_formatter(ticker.FormatStrFormatter(\"\"))\n for t in axis.get_minorticklabels():\n t.set_visible(False)\n\n axis.get_label().set_visible(False)\n\n\ndef _has_externally_shared_axis(ax1: matplotlib.axes, compare_axis: str) -> bool:\n \"\"\"\n Return whether an axis is externally shared.\n\n Parameters\n ----------\n ax1 : matplotlib.axes\n Axis to query.\n compare_axis : str\n `\"x\"` or `\"y\"` according to whether the X-axis or Y-axis is being\n compared.\n\n Returns\n -------\n bool\n `True` if the axis is externally shared. Otherwise `False`.\n\n Notes\n -----\n If two axes with different positions are sharing an axis, they can be\n referred to as *externally* sharing the common axis.\n\n If two axes sharing an axis also have the same position, they can be\n referred to as *internally* sharing the common axis (a.k.a twinning).\n\n _handle_shared_axes() is only interested in axes externally sharing an\n axis, regardless of whether either of the axes is also internally sharing\n with a third axis.\n \"\"\"\n if compare_axis == \"x\":\n axes = ax1.get_shared_x_axes()\n elif compare_axis == \"y\":\n axes = ax1.get_shared_y_axes()\n else:\n raise ValueError(\n \"_has_externally_shared_axis() needs 'x' or 'y' as a second parameter\"\n )\n\n axes = axes.get_siblings(ax1)\n\n # Retain ax1 and any of its siblings which aren't in the same position as it\n ax1_points = ax1.get_position().get_points()\n\n for ax2 in axes:\n if not np.array_equal(ax1_points, ax2.get_position().get_points()):\n return True\n\n return False\n\n\ndef handle_shared_axes(\n axarr: Iterable[Axes],\n nplots: int,\n naxes: int,\n nrows: int,\n ncols: int,\n sharex: bool,\n sharey: bool,\n):\n if nplots > 1:\n if compat.mpl_ge_3_2_0():\n row_num = lambda x: x.get_subplotspec().rowspan.start\n col_num = lambda x: x.get_subplotspec().colspan.start\n else:\n row_num = lambda x: x.rowNum\n col_num = lambda x: x.colNum\n\n if compat.mpl_ge_3_4_0():\n is_first_col = lambda x: x.get_subplotspec().is_first_col()\n else:\n is_first_col = lambda x: x.is_first_col()\n\n if nrows > 1:\n try:\n # first find out the ax layout,\n # so that we can correctly handle 'gaps\"\n layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_)\n for ax in axarr:\n layout[row_num(ax), col_num(ax)] = ax.get_visible()\n\n for ax in axarr:\n # only the last row of subplots should get x labels -> all\n # other off layout handles the case that the subplot is\n # the last in the column, because below is no subplot/gap.\n if not layout[row_num(ax) + 1, col_num(ax)]:\n continue\n if sharex or _has_externally_shared_axis(ax, \"x\"):\n _remove_labels_from_axis(ax.xaxis)\n\n except IndexError:\n # if gridspec is used, ax.rowNum and ax.colNum may different\n # from layout shape. in this case, use last_row logic\n for ax in axarr:\n if ax.is_last_row():\n continue\n if sharex or _has_externally_shared_axis(ax, \"x\"):\n _remove_labels_from_axis(ax.xaxis)\n\n if ncols > 1:\n for ax in axarr:\n # only the first column should get y labels -> set all other to\n # off as we only have labels in the first column and we always\n # have a subplot there, we can skip the layout test\n if is_first_col(ax):\n continue\n if sharey or _has_externally_shared_axis(ax, \"y\"):\n _remove_labels_from_axis(ax.yaxis)\n\n\ndef flatten_axes(axes: Union[Axes, Sequence[Axes]]) -> np.ndarray:\n if not is_list_like(axes):\n return np.array([axes])\n elif isinstance(axes, (np.ndarray, ABCIndex)):\n return np.asarray(axes).ravel()\n return np.array(axes)\n\n\ndef set_ticks_props(\n axes: Union[Axes, Sequence[Axes]],\n xlabelsize=None,\n xrot=None,\n ylabelsize=None,\n yrot=None,\n):\n import matplotlib.pyplot as plt\n\n for ax in flatten_axes(axes):\n if xlabelsize is not None:\n plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)\n if xrot is not None:\n plt.setp(ax.get_xticklabels(), rotation=xrot)\n if ylabelsize is not None:\n plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)\n if yrot is not None:\n plt.setp(ax.get_yticklabels(), rotation=yrot)\n return axes\n\n\ndef get_all_lines(ax: Axes) -> List[Line2D]:\n lines = ax.get_lines()\n\n if hasattr(ax, \"right_ax\"):\n lines += ax.right_ax.get_lines()\n\n if hasattr(ax, \"left_ax\"):\n lines += ax.left_ax.get_lines()\n\n return lines\n\n\ndef get_xlim(lines: Iterable[Line2D]) -> Tuple[float, float]:\n left, right = np.inf, -np.inf\n for line in lines:\n x = line.get_xdata(orig=False)\n left = min(np.nanmin(x), left)\n right = max(np.nanmax(x), right)\n return left, right\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.asarray", "numpy.zeros", "matplotlib.ticker.AutoLocator", "pandas.plotting._matplotlib.compat.mpl_ge_3_2_0", "matplotlib.pyplot.figure", "pandas.plotting._matplotlib.compat.mpl_ge_3_4_0", "matplotlib.ticker.FormatStrFormatter", "numpy.nanmin", "pandas.core.dtypes.common.is_list_like", "numpy.nanmax" ] ]
ivclab/AdaLabelHash
[ "3fdca6749ae6e3e5a3f0610443f9bb0b55cfe3b4" ]
[ "utils/models.py" ]
[ "import sys\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Dense, Embedding, Input, Activation, Lambda\nfrom keras.engine import Model\nfrom keras.applications import imagenet_utils\nfrom keras.preprocessing.image import img_to_array\nfrom PIL import Image as pil_image\nfrom .VGG_CNN_F_keras import VGG_CNN_F, LRN\nfrom sklearn.cluster import KMeans\n\n\ndef innerprod_similarity(codes):\n res = K.dot(codes[0], K.transpose(codes[1][0, ...]))\n return res\n\n\ndef l2norm_innerprod_similarity():\n x0_norm = tf.nn.l2_normalize(x[0], 1)\n x1_norm = tf.nn.l2_normalize(x[1][0, ...], 1)\n res = K.dot(x0_norm, K.transpose(x1_norm))\n return res\n\n\ndef pair_similarity_shape(input_shape):\n shape = list(input_shape)\n shape[-1] = shape[0]\n return tuple(shape)\n\n\ndef construct_image_coder(code_len, in_shape):\n base_model = VGG_CNN_F(arch_mode='rmlast', weights='imagenet', input_shape=in_shape+(3,))\n image_codes = Dense(code_len, name='hash_layer')(base_model.output)\n image_codes = Activation('tanh', name='image_codes')(image_codes)\n return Model(base_model.input, image_codes)\n\n\ndef construct_class_coder(code_len, num_classes):\n embed_inds = Input(shape=(num_classes,), dtype='int32', name='embed_inds')\n class_codes = Embedding(num_classes, code_len, input_length=num_classes,\n name='class_embedding')(embed_inds)\n class_codes = Activation('tanh', name='class_codes')(class_codes)\n return Model(embed_inds, class_codes)\n\n\ndef predict_image_codes(model, sample_list, input_shape=(224, 224)):\n image_coder = Model(inputs=[model.get_layer('input_1').input],\n outputs=[model.get_layer('image_codes').output])\n batch_size = 128\n num_samples = len(sample_list)\n preprocess = imagenet_utils.preprocess_input\n count = 0\n image_paths, labels = zip(*sample_list)\n image_codes = []\n\n def load_and_resize_image(image_path, output_shape):\n image = pil_image.open(image_path)\n image = image.resize(output_shape, pil_image.BILINEAR)\n if image.mode != 'RGB':\n image = image.convert('RGB')\n image = img_to_array(image)\n return [image]\n\n while count < num_samples:\n sys.stdout.write('prog: {}/{} ... \\r'.format(count, num_samples))\n sys.stdout.flush()\n cur_batch = np.min([batch_size, num_samples-count])\n cur_x = map(lambda x: load_and_resize_image(x, input_shape), image_paths[count:count+cur_batch])\n cur_x = preprocess(np.vstack(cur_x))\n cur_codes = image_coder.predict([cur_x])\n image_codes.append(cur_codes)\n count += cur_batch\n print('\\nProcess %d samples' % num_samples)\n return np.vstack(image_codes)\n\n\ndef predict_class_codes(model, num_classes):\n class_coder = Model(inputs=[model.get_layer('embed_inds').input],\n outputs=[model.get_layer('class_codes').output])\n embed_inds = np.tile(np.arange(num_classes), (1, 1))\n class_codes = class_coder.predict([embed_inds])\n class_codes = np.squeeze(class_codes)\n print('Shape of class_codes: {}'.format(class_codes.shape))\n return class_codes\n\n\ndef construct_adalabelhash(code_len, in_shape, num_classes, sim_name='innerprod'):\n \"\"\"Construct the structure of ResHash Network\n\n # Arguments:\n in_shape: Input shape of images\n num_classes: Number of classes for supervisions\n code_len: Length of hash codes\n sim_name: Method for measuring the code similarities, ('innerprod', 'l2norm_innerprod')\n \"\"\"\n sim_methods = {'innerprod': innerprod_similarity,\n 'l2norm_innerprod': l2norm_innerprod_similarity}\n image_codes = construct_image_coder(code_len, in_shape)\n class_codes = construct_class_coder(code_len, num_classes)\n distance = Lambda(sim_methods[sim_name],\n output_shape=pair_similarity_shape)([image_codes.output, class_codes.output])\n model = Model(inputs=[image_codes.input, class_codes.input], outputs=distance)\n return model\n" ]
[ [ "numpy.arange", "numpy.squeeze", "numpy.min", "numpy.vstack" ] ]
mikelytaev/wave-propagation
[ "eff0eb1fc843e4d206b05731e40047e1d810d76f" ]
[ "transforms/frft.py" ]
[ "\"\"\"\nImplementation of the Fourier transform method from\nBailey D. H., Swarztrauber P. N. A fast method for the numerical evaluation of continuous Fourier and Laplace\ntransforms //SIAM Journal on Scientific Computing. – 1994. – Vol. 15. – N. 5. – С. 1105-1110.\n\"\"\"\nimport numpy as np\nimport cmath as cm\n\n__author__ = 'Lytaev Mikhail ([email protected])'\n\n\ndef get_fcft_grid(m, b):\n return np.arange(0, m) * b / m - b / 2\n\n\ndef frft(x, alpha):\n m = x.shape[-1]\n if len(x.shape) == 1:\n x = x.reshape(1, m)\n y = np.zeros((x.shape[0], 2*x.shape[1]), dtype=complex)\n y[:, 0:m] = x * np.exp(-cm.pi * 1j * np.arange(0, m) ** 2 * alpha)\n z = np.zeros((x.shape[0], 2*x.shape[1]), dtype=complex)\n z[:, 0:m] = np.exp(cm.pi * 1j * np.arange(0, m) ** 2 * alpha)\n z[:, m:2 * m] = np.exp(cm.pi * 1j * (np.arange(m, 2 * m) - 2 * m) ** 2 * alpha)\n w = np.fft.ifft((np.fft.fft(y) * np.fft.fft(z)))\n return np.exp(-cm.pi * 1j * np.arange(0, m) ** 2 * alpha) * w[:, 0:m]\n\n\ndef _fcft(f_x, a, b):\n \"\"\"\n computes discrete Fourier Transform for input points f_x\n 1/\\sqrt{2 \\pi} \\int\\limits_{-a/2}^{a/2} f(t)\\exp (-itx_{k})dt\n :param f_x: input function values in points get_fcft_grid(m, b)\n \"\"\"\n m = f_x.shape[-1]\n delta = a * b / (2 * cm.pi * m ** 2)\n beta = a / m\n w = frft(np.exp(cm.pi * 1j * np.arange(0, m) * m * delta) * f_x, delta)\n return 1 / cm.sqrt(2 * cm.pi) * beta * np.exp(cm.pi * 1j * (np.arange(0, m) - m / 2) * m * delta) * w\n\n\ndef fcft(f_x, a1, a2, b):\n m = f_x.shape[-1]\n grid = np.tile(get_fcft_grid(m, b), (f_x.shape[0], 1))\n return _fcft(f_x, a2 - a1, b) * np.exp(-1j * (a1 + a2) / 2 * grid)\n\n\ndef _ifcft(f_x, b, a):\n \"\"\"\n computes inverse discrete Fourier Transform for input points f_x\n \"\"\"\n return _fcft(f_x, b, -a)\n\n\ndef ifcft(f_x, b1, b2, a):\n m = f_x.shape[-1]\n grid = np.tile(get_fcft_grid(m, a), (f_x.shape[0], 1))\n return _ifcft(f_x, b2 - b1, a) * np.exp(1j * (b1 + b2) / 2 * grid)\n" ]
[ [ "numpy.fft.fft", "numpy.arange", "numpy.exp", "numpy.zeros" ] ]
dumpmemory/Research
[ "30fd70ff331b3d9aeede0b71e7a691ed6c2b87b3" ]
[ "CV/Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection/easymia/utils/progbar.py" ]
[ "# -*-coding utf-8 -*-\n##########################################################################\n#\n# Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n##########################################################################\n\"\"\"\n进度条\n\"\"\"\n\nimport os\nimport sys\nimport time\n\nimport numpy as np\n\n\nclass Progbar(object):\n \"\"\"\n Displays a progress bar.\n It refers to https://github.com/keras-team/keras/blob/keras-2/keras/utils/generic_utils.py\n\n Args:\n target (int): Total number of steps expected, None if unknown.\n width (int): Progress bar width on screen.\n verbose (int): Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n stateful_metrics (list|tuple): Iterable of string names of metrics that should *not* be\n averaged over time. Metrics in this list will be displayed as-is. All\n others will be averaged by the progbar before display.\n interval (float): Minimum visual progress update interval (in seconds).\n unit_name (str): Display name for step counts (usually \"step\" or \"sample\").\n \"\"\"\n\n def __init__(self,\n target,\n width=30,\n verbose=1,\n interval=0.05,\n stateful_metrics=None,\n unit_name='step'):\n self.target = target\n self.width = width\n self.verbose = verbose\n self.interval = interval\n self.unit_name = unit_name\n if stateful_metrics:\n self.stateful_metrics = set(stateful_metrics)\n else:\n self.stateful_metrics = set()\n\n self._dynamic_display = ((hasattr(sys.stderr, 'isatty')\n and sys.stderr.isatty())\n or 'ipykernel' in sys.modules\n or 'posix' in sys.modules\n or 'PYCHARM_HOSTED' in os.environ)\n self._total_width = 0\n self._seen_so_far = 0\n # We use a dict + list to avoid garbage collection\n # issues found in OrderedDict\n self._values = {}\n self._values_order = []\n self._start = time.time()\n self._last_update = 0\n\n def update(self, current, values=None, finalize=None):\n \"\"\"\n Updates the progress bar.\n\n Args:\n current (int): Index of current step.\n values (list): List of tuples: `(name, value_for_last_step)`. If `name` is in\n `stateful_metrics`, `value_for_last_step` will be displayed as-is.\n Else, an average of the metric over time will be displayed.\n finalize (bool): Whether this is the last update for the progress bar. If\n `None`, defaults to `current >= self.target`.\n \"\"\"\n\n if finalize is None:\n if self.target is None:\n finalize = False\n else:\n finalize = current >= self.target\n\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n # In the case that progress bar doesn't have a target value in the first\n # epoch, both on_batch_end and on_epoch_end will be called, which will\n # cause 'current' and 'self._seen_so_far' to have the same value. Force\n # the minimal value to 1 here, otherwise stateful_metric will be 0s.\n value_base = max(current - self._seen_so_far, 1)\n if k not in self._values:\n self._values[k] = [v * value_base, value_base]\n else:\n self._values[k][0] += v * value_base\n self._values[k][1] += value_base\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if now - self._last_update < self.interval and not finalize:\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stderr.write('\\b' * prev_total_width)\n sys.stderr.write('\\r')\n else:\n sys.stderr.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stderr.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n\n if self.target is None or finalize:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += ' %.0fs/%s' % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)\n else:\n info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)\n else:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60, eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(\n self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if finalize:\n info += '\\n'\n\n sys.stderr.write(info)\n sys.stderr.flush()\n\n elif self.verbose == 2:\n if finalize:\n numdigits = int(np.log10(self.target)) + 1\n count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(\n self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stderr.write(info)\n sys.stderr.flush()\n\n self._last_update = now\n\n def add(self, n, values=None):\n \"\"\"\n add\n \"\"\"\n self.update(self._seen_so_far + n, values)" ]
[ [ "numpy.log10" ] ]
cyk19/Geospatial_hackathon
[ "1e9a2d3c19338838afbc2cab2615743e91bee4fa" ]
[ "Differences/differencing.py" ]
[ "from skimage.metrics import structural_similarity\nimport cv2\nimport numpy as np\n\nbefore = cv2.imread('differences/img0.jpg')\nafter = cv2.imread('differences/img1.jpg')\n\n\"\"\" \nimage_size will store image.shape \nwhich is a 3obj tuple (dimension_y, dimension_x, RBG)\n\"\"\"\n#print(before)\n#print(after)\nbefore_size = before.shape\nafter_size = after.shape\nprint(\"Before_size = \" + str(before_size)) #To see te dimension of before_size\nprint(\"After_size = \" + str(after_size)) #To see te dimension of after_size\n\n# create after with grids\nafter_with_grid = after.copy()\nheight, width, channels = after_with_grid.shape\n\nfor i in range(0, width, 30):\n cv2.line(after_with_grid, (i, 0), (i, height), (0, 0, 0), 1)\nfor i in range(0, height, 30):\n cv2.line(after_with_grid, (0, i), (width, i), (0, 0, 0), 1)\n\n# Convert images to grayscale\nbefore_gray = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY)\nafter_gray = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY)\n\n# Compute SSIM between two images\n(score, diff) = structural_similarity(before_gray, after_gray, full=True)\nprint(\"Image similarity = \", score)\n\n# The diff image contains the actual image differences between the two images\n# and is represented as a floating point data type in the range [0,1] \n# so we must convert the array to 8-bit unsigned integers in the range\n# [0,255] before we can use it with OpenCV\ndiff = (diff * 255).astype(\"uint8\")\n\n# Threshold the difference image, followed by finding contours to\n# obtain the regions of the two input images that differ\nthresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\ncontours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncontours = contours[0] if len(contours) == 2 else contours[1]\n\nmask = np.zeros(before.shape, dtype='uint8')\nfilled_after = after.copy()\n\nbouding_boxes = []\nindex = 1\nfor c in contours:\n area = cv2.contourArea(c)\n if area > 40:\n \n x,y,w,h = cv2.boundingRect(c)\n bouding_boxes.append((x,y,w,h))\n cv2.rectangle(before, (x, y), (x + w, y + h), (36,255,12), 1)\n cv2.putText(before, 'point'+ str(index), (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1) #labels\n cv2.rectangle(after, (x, y), (x + w, y + h), (36,255,12), 1)\n cv2.putText(after, 'point'+ str(index), (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1) #labels\n cv2.drawContours(mask, [c], 0, (0,255,0), -1)\n cv2.drawContours(filled_after, [c], 0, (0,255,0), -1)\n index += 1\n\n\nprint(\"box_count = \" + str(len(bouding_boxes)) + \" \\n >> \" + str(bouding_boxes))\ncv2.imshow('before', before)\n# cv2.imshow('after', after)\n#cv2.imshow('diff',diff)\n#cv2.imshow('mask',mask)\n#cv2.imshow('filled after',filled_after)\n# cv2.imshow('after with grid', after_with_grid)\ncv2.waitKey(0)\n\n\"\"\"\n Impact assessment part \nlist_1 = [(x,y,w,h), (obj2.1)]\nlist_2 = [(name, shape_size, bla), (obj_2.2)]\n\n>>list_3 = [((x,y,w,h), (name, shape_size, bla)), (obj2.12)]\n\nfor i in range(len(list_1)):\n list_3.append((list_1[i], list2[i]))\n\nprint(list_3)\n\nlist_1 = bouding_boxes\n\nlist_2 = []\n\"\"\"\n\n" ]
[ [ "numpy.zeros" ] ]
ictnlp/MoE-Waitk
[ "6f8ca9834c2ab77785ebd93fd569f73c3819340b" ]
[ "fairseq/models/bart/hub_interface.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport logging\nfrom typing import Dict, List\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq import utils\nfrom fairseq.data import encoders\nfrom fairseq.hub_utils import GeneratorHubInterface\nfrom omegaconf import open_dict\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BARTHubInterface(GeneratorHubInterface):\n \"\"\"A simple PyTorch Hub interface to BART.\n\n Usage: https://github.com/pytorch/fairseq/tree/master/examples/bart\n \"\"\"\n\n def __init__(self, cfg, task, model):\n super().__init__(cfg, task, [model])\n self.model = self.models[0]\n\n def encode(\n self, sentence: str, *addl_sentences, no_separator=True\n ) -> torch.LongTensor:\n \"\"\"\n BPE-encode a sentence (or multiple sentences).\n\n Every sequence begins with a beginning-of-sentence (`<s>`) symbol.\n Every sentence ends with an end-of-sentence (`</s>`).\n\n Example (single sentence): `<s> a b c </s>`\n Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`\n\n The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE\n requires leading spaces. For example::\n\n >>> bart.encode('Hello world').tolist()\n [0, 31414, 232, 2]\n >>> bart.encode(' world').tolist()\n [0, 232, 2]\n >>> bart.encode('world').tolist()\n [0, 8331, 2]\n \"\"\"\n tokens = self.bpe.encode(sentence)\n if len(tokens.split(\" \")) > min(self.max_positions) - 2:\n tokens = \" \".join(tokens.split(\" \")[: min(self.max_positions) - 2])\n bpe_sentence = \"<s> \" + tokens + \" </s>\"\n for s in addl_sentences:\n bpe_sentence += \" </s>\" if not no_separator else \"\"\n bpe_sentence += \" \" + self.bpe.encode(s) + \" </s>\"\n tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False)\n return tokens.long()\n\n def decode(self, tokens: torch.LongTensor):\n assert tokens.dim() == 1\n tokens = tokens.cpu().numpy()\n if tokens[0] == self.task.source_dictionary.bos():\n tokens = tokens[1:] # remove <s>\n eos_mask = tokens == self.task.source_dictionary.eos()\n doc_mask = eos_mask[1:] & eos_mask[:-1]\n sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)\n sentences = [\n self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences\n ]\n if len(sentences) == 1:\n return sentences[0]\n return sentences\n\n def _build_sample(self, src_tokens: List[torch.LongTensor]):\n # assert torch.is_tensor(src_tokens)\n dataset = self.task.build_dataset_for_inference(\n src_tokens,\n [x.numel() for x in src_tokens],\n )\n sample = dataset.collater(dataset)\n sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)\n return sample\n\n def generate(\n self,\n tokenized_sentences: List[torch.LongTensor],\n *args,\n inference_step_args=None,\n **kwargs\n ) -> List[List[Dict[str, torch.Tensor]]]:\n inference_step_args = inference_step_args or {}\n if \"prefix_tokens\" in inference_step_args:\n raise NotImplementedError(\"prefix generation not implemented for BART\")\n else:\n bsz = len(tokenized_sentences)\n inference_step_args[\"prefix_tokens\"] = (\n tokenized_sentences[0]\n .new_full((bsz, 1), fill_value=self.task.source_dictionary.bos())\n .to(device=self.device)\n )\n return super().generate(\n tokenized_sentences,\n *args,\n inference_step_args=inference_step_args,\n **kwargs\n )\n\n def extract_features(\n self, tokens: torch.LongTensor, return_all_hiddens: bool = False\n ) -> torch.Tensor:\n if tokens.dim() == 1:\n tokens = tokens.unsqueeze(0)\n if tokens.size(-1) > min(self.model.max_positions()):\n raise ValueError(\n \"tokens exceeds maximum length: {} > {}\".format(\n tokens.size(-1), self.model.max_positions()\n )\n )\n tokens.to(device=self.device),\n prev_output_tokens = tokens.clone()\n\n prev_output_tokens[:, 0] = tokens.gather(\n 1,\n (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),\n ).squeeze()\n\n prev_output_tokens[:, 1:] = tokens[:, :-1]\n features, extra = self.model(\n src_tokens=tokens,\n src_lengths=None,\n prev_output_tokens=prev_output_tokens,\n features_only=True,\n return_all_hiddens=return_all_hiddens,\n )\n if return_all_hiddens:\n # convert from T x B x C -> B x T x C\n inner_states = extra[\"inner_states\"]\n return [inner_state.transpose(0, 1) for inner_state in inner_states]\n else:\n return features # just the last layer's features\n\n def register_classification_head(\n self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs\n ):\n self.model.register_classification_head(\n name, num_classes=num_classes, embedding_size=embedding_size, **kwargs\n )\n\n def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):\n if tokens.dim() == 1:\n tokens = tokens.unsqueeze(0)\n features = self.extract_features(tokens.to(device=self.device))\n sentence_representation = features[\n tokens.eq(self.task.source_dictionary.eos()), :\n ].view(features.size(0), -1, features.size(-1))[:, -1, :]\n\n logits = self.model.classification_heads[head](sentence_representation)\n if return_logits:\n return logits\n return F.log_softmax(logits, dim=-1)\n\n def fill_mask(\n self,\n masked_input: str,\n topk: int = 5,\n match_source_len: bool = True,\n **generate_kwargs\n ):\n masked_token = \"<mask>\"\n assert (\n masked_token in masked_input\n ), \"please add one {} token for the input\".format(masked_token)\n\n text_spans = masked_input.split(masked_token)\n text_spans_bpe = (\n (\" {0} \".format(masked_token))\n .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])\n .strip()\n )\n tokens = self.task.source_dictionary.encode_line(\n \"<s> \" + text_spans_bpe + \" </s>\",\n append_eos=False,\n add_if_not_exist=False,\n ).long()\n\n if tokens.dim() == 1:\n tokens = tokens.unsqueeze(0)\n\n # ensure beam size is at least as big as topk\n generate_kwargs[\"beam\"] = max(\n topk,\n generate_kwargs.get(\"beam\", -1),\n )\n generate_kwargs[\"match_source_len\"] = match_source_len\n hypos = self.generate(tokens, **generate_kwargs)[0]\n\n return [(self.decode(hypo[\"tokens\"]), hypo[\"score\"]) for hypo in hypos[:topk]]\n" ]
[ [ "torch.nn.functional.log_softmax" ] ]
deepair-io/flai
[ "edbf74285e24f8605a625edf3a1c73ebae79a6de" ]
[ "flai/core.py" ]
[ "from abc import ABC, abstractmethod\nimport json\nimport math\nimport numpy as np\nfrom flai.utils import np_random\n\n\nclass Env(ABC):\n \"\"\"The main Environment class. It encapsulates an environment with\n arbitrary behind-the-scenes dynamics. An environment can be\n partially or fully observed.\n The main API methods that users of this class need to know are:\n step\n reset\n render\n close\n seed\n And set the following attributes:\n action_space: The Space object corresponding to valid actions\n observation_space: The Space object corresponding to valid observations\n reward_range: A tuple corresponding to the min and max possible rewards\n Note: a default reward range set to [-inf,+inf] already exists. Set it if\n you want a narrower range.\n The methods are accessed publicly as \"step\", \"reset\", etc...\n \"\"\"\n # Set this in SOME subclasses\n reward_range = (-float('inf'), float('inf'))\n\n # Set these in ALL subclasses\n action_space = None\n observation_space = None\n\n @abstractmethod\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n Accepts an action and returns a tuple (observation, reward, done, info)\n Args:\n action (object): an action provided by the agent\n Returns:\n observation (object): agent's observation of the current\n environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further\n step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful\n for debugging, and sometimes learning)\n \"\"\"\n\n @abstractmethod\n def reset(self):\n \"\"\"Resets the state of the environment & returns an initial observation\n Returns:\n observation (object): the initial observation.\n \"\"\"\n\n @abstractmethod\n def render(self, mode='human'):\n \"\"\"Renders the environment.\n The set of supported modes varies per environment. (And some\n environments do not support rendering at all.) By convention,\n if mode is:\n - human: render to the current display or terminal and\n return nothing. Usually for human consumption.\n - rgb_array: Return an numpy.ndarray with shape (x, y, 3),\n representing RGB values for an x-by-y pixel image, suitable\n for turning into a video.\n - ansi: Return a string (str) or StringIO.StringIO containing a\n terminal-style text representation. The text can include newlines\n and ANSI escape sequences (e.g. for colors).\n Note:\n Make sure that your class's metadata 'render.modes' key includes\n the list of supported modes. It's recommended to call super()\n in implementations to use the functionality of this method.\n Args:\n mode (str): the mode to render with\n Example:\n class MyEnv(Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n def render(self, mode='human'):\n if mode == 'rgb_array':\n return np.array(...) # return RGB frame suitable for video\n elif mode == 'human':\n # pop up a window and render\n else:\n # just raise an exception\n super(MyEnv, self).render(mode=mode)\n \"\"\"\n\n def close(self):\n \"\"\"Override close in your subclass to perform any necessary cleanup.\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n \"\"\"\n pass\n\n def seed(self, seed=None):\n \"\"\"Sets the seed for this env's random number generator(s).\n Note:\n Some environments use multiple pseudorandom number generators.\n We want to capture all such seeds used in order to ensure that\n there aren't accidental correlations between multiple generators.\n Returns:\n list<bigint>: Returns the list of seeds used in this env's random\n number generators. The first value in the list should be the\n \"main\" seed, or the value which a reproducer should pass to\n 'seed'. Often, the main seed equals the provided 'seed', but\n this won't be true if seed=None, for example.\n \"\"\"\n return\n\n\nclass ObservationSpace(object):\n \"\"\"The main Observation Space class. It encapsulates an observation space\n with arbitrary behind-the-scenes dynamics.\n The main API methods that users of this class need to know are:\n assign\n\n Example Usage:\n [1]observation_space = ObservationSpace()\n [2]obsercation_space.assign(x=1, name='Myname')\n [3]print(observation_space)\n >>> {\"x\": \"int\", \"name\": \"str\"}\n [4]x in observation_space\n >>> True\n [5]y in observation_space\n >>> False\n [6]observation_space.name\n >>> 'Myname'\n \"\"\"\n\n def assign(self, **kwargs):\n \"\"\"Assigns a keyword argument to the instance variable\n\n Example:\n observation_space = ObservationSpace()\n observation_space.assign(x=1)\n observation_space.x\n >>> 1\n \"\"\"\n for key, value in kwargs.items():\n self.__dict__[key] = value\n\n def __contains__(self, x):\n \"\"\"To check if the observation is present in the\n observation space. \n \"\"\"\n return x in set(self.__dict__.keys())\n\n def __repr__(self):\n \"\"\"To represent observation variables as json\n with key as instance variables and value as dtype\n \"\"\"\n info = dict()\n for key, value in self.__dict__.items():\n info[key] = type(value).__name__\n return json.dumps(info)\n\n\nclass ActionSpace(object):\n \"\"\"The main Action Space class. It encapsulates an action space\n with arbitrary behind-the-scenes dynamics. This action space is\n creates a box space (very similar to OpenAI's Box Space which\n inculed lower bound and upper bound along all the axis) with an\n additional constraint, that is, x[i+1] >= x[i] for all i in x in\n space. For example [1, 2, 3] is valid but [1, 3, 2] is not valid\n sample. \n The main API methods that users of this class need to know are:\n sample\n valid\n\n Example Usage:\n action_space = ActionSpace(upper=[-1, -1, -1], [1, 1, 1])\n \"\"\"\n\n def __init__(self, upper=None, lower=None):\n\n self._upper = np.array(upper, dtype=np.int32)\n self._lower = np.array(lower, dtype=np.int32)\n\n assert (self._upper.shape ==\n self._lower.shape), 'upper and lower limit shape mismatch'\n assert (self._upper - self._lower >=\n 0).all(), 'lower limit is greater than upper limit'\n\n assert (np.all(np.diff(self._upper) >= 0)\n ), 'upper limit must be in increasing order'\n assert (np.all(np.diff(self._lower) >= 0)\n ), 'lower limit must be in increasing order'\n\n @property\n def upper(self):\n return self._upper\n\n @property\n def lower(self):\n return self._lower\n\n def sample(self, seed=None):\n \"\"\"Sample an action from the action space.\n\n Args:\n seed (int) : seed to control randomness\n\n Note: Seed functionality is not implemented right now\n \"\"\"\n result = []\n\n # Creates a lower limit as -oo\n current_lower_bound = -math.inf\n\n for i in range(self.upper.shape[0]):\n\n # create lower and higher limit for one sampling\n low = self.lower[i] if current_lower_bound < self.lower[i] else current_lower_bound\n high = self.upper[i]\n\n # random sample\n price = np_random.rng.randint(low=low, high=high)\n result.append(price)\n\n # update the lower limit for next round of sampling\n current_lower_bound = max(low, price)\n\n return np.array(result, dtype=np.int32)\n\n def valid(self, x):\n \"\"\"Brings an out of bounds invalid sample into the\n closest valid bounds.\n\n Args:\n x (list/np.array) : action with same dimentionality as\n of the box.\n\n Returns:\n A valid sample with the closest match to x. (np.array)\n \"\"\"\n if isinstance(x, list):\n x = np.array(x, dtype=np.int32)\n\n # assert the shape of the input\n assert (x.shape == self.lower.shape), 'x shape is {} and required is {}'.format(\n x.shape, self.lower.shape)\n\n x[x > self.upper] = self.upper[x > self.upper]\n x[x < self.lower] = self.lower[x < self.lower]\n return x\n\n def __contains__(self, x):\n \"\"\"To check if the action is present in the\n action space. \n \"\"\"\n if isinstance(x, list):\n x = np.array(x, dtype=np.int32)\n return (x.shape == self.upper.shape) and (self.upper >= x).all()\\\n and (self.lower <= x).all() and (np.diff(x) >= 0).all()\n\n def __repr__(self):\n \"\"\"To represent action variables as json\n with key as instance variables and value as dtype\n \"\"\"\n info = dict()\n for key, value in self.__dict__.items():\n info[key] = type(value).__name__\n return json.dumps(info)\n" ]
[ [ "numpy.array", "numpy.diff" ] ]
meghbali/ANNElastoplasticity
[ "697a3edecc06999fc4492a16e67ac28650fec212" ]
[ "epnn_module_utility.py" ]
[ "\"\"\" Elasto-Plastic Neural Network (EPNN)\r\n\r\nDEVELOPED AT:\r\n COMPUTATIONAL GEOMECHANICS LABORATORY\r\n DEPARTMENT OF CIVIL ENGINEERING\r\n UNIVERSITY OF CALGARY, AB, CANADA\r\n DIRECTOR: Prof. Richard Wan\r\n\r\nDEVELOPED BY:\r\n MAHDAD EGHBALIAN\r\n\r\nMIT License\r\n\r\nCopyright (c) 2022 Mahdad Eghbalian\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\n\r\nimport torch\r\nimport pickle\r\nimport numpy as np\r\n\r\n\r\ndef data_loader_pt(file_name):\r\n return torch.load(file_name)\r\n\r\n\r\ndef data_loader_dat(file_name):\r\n get_data = pickle.load(open(file_name, \"rb\"))\r\n return get_data\r\n\r\n\r\ndef data_dumper_dat(file_name, outputset):\r\n pickle.dump(outputset, open(file_name, \"wb\"))\r\n return\r\n\r\n\r\ndef pred_error(n11, n12, n2, data1, data2, criterion, min1, min2, range1, range2, min3, range3, device, param):\r\n n11.eval()\r\n n12.eval()\r\n n2.eval()\r\n\r\n # if normalized in [-1, 1]\r\n coeff1 = 2.0\r\n coeff2 = 1.0\r\n\r\n # if normalized in [0, 1]\r\n # coeff1 = 1.0\r\n # coeff2 = 0.0\r\n\r\n input11 = n11.forward(data1.x)\r\n input12 = n12.forward(data1.x)\r\n target11 = data1.y[:, 0:1]\r\n target12 = data1.y[:, 1:4]\r\n out11 = 100.0 * (torch.norm(input11 - target11) / torch.norm(target11))\r\n out12 = 100.0 * (torch.norm(input12 - target12) / torch.norm(target12))\r\n\r\n input1m = torch.cat((input11, data1.x[:, [0, 1, 2, 3, 7, 8, 9, 10, 11, 12]]), 1)\r\n\r\n input21 = n2.forward(input1m)\r\n\r\n # constant tensors\r\n # oneten1 = torch.ones(3, 1, dtype=torch.float32)\r\n oneten1 = torch.ones(3, 1).double()\r\n oneten1 = oneten1.to(device)\r\n # oneten2 = torch.ones(data2.y.shape[0], data2.y.shape[1], dtype=torch.float32)\r\n oneten2 = torch.ones(data2.y.shape[0], data2.y.shape[1]).double()\r\n oneten2 = oneten2.to(device)\r\n\r\n dstrain = data1.x[:, 10:]\r\n dstrain_real = torch.mul(dstrain + coeff2, range3) / coeff1 + min3\r\n # dstrainpl = input12 # predicted plastic strain increment\r\n dstrainpl = target12 # actual plastic strain increment\r\n dstrainpl_real = torch.mul(dstrainpl + coeff2, range1[1:4]) / coeff1 + min1[1:4]\r\n dstrainel = dstrain_real - dstrainpl_real\r\n dstrainelv = torch.matmul(dstrainel, oneten1)\r\n dstrainelvten = torch.mul(dstrainelv, oneten2)\r\n\r\n mu = torch.mul(param, input21[:, 0:1])\r\n\r\n input22 = 2.0 * torch.mul(mu, dstrainel)\r\n input23 = torch.mul((input21[:, 0:1] - (2.0 / 3.0) * mu), dstrainelvten)\r\n input24 = input22 + input23\r\n input2 = coeff1 * torch.div((input24 - min2), range2) - coeff2\r\n\r\n target2 = data2.y\r\n out2 = 100.0 * (torch.norm(input2 - target2) / torch.norm(target2))\r\n\r\n n11.train()\r\n n12.train()\r\n n2.train()\r\n return out11.item(), out12.item(), out2.item()\r\n\r\n\r\ndef cost_function(n11, n12, n2, data1, data2, criterion, min1, min2, range1, range2, min3, range3, device, param):\r\n # if normalized in [-1, 1]\r\n coeff1 = 2.0\r\n coeff2 = 1.0\r\n\r\n # if normalized in [0, 1]\r\n # coeff1 = 1.0\r\n # coeff2 = 0.0\r\n\r\n input11 = n11.forward(data1.x)\r\n input12 = n12.forward(data1.x)\r\n target11 = data1.y[:, 0:1]\r\n target12 = data1.y[:, 1:4]\r\n cost11 = criterion(input11, target11)\r\n cost12 = criterion(input12, target12)\r\n\r\n input1m = torch.cat((input11, data1.x[:, [0, 1, 2, 3, 7, 8, 9, 10, 11, 12]]), 1)\r\n\r\n input21 = n2.forward(input1m)\r\n\r\n # constant tensors\r\n # oneten1 = torch.ones(3, 1, dtype=torch.float32)\r\n oneten1 = torch.ones(3, 1).double()\r\n oneten1 = oneten1.to(device)\r\n # oneten2 = torch.ones(data2.y.shape[0], data2.y.shape[1], dtype=torch.float32)\r\n oneten2 = torch.ones(data2.y.shape[0], data2.y.shape[1]).double()\r\n oneten2 = oneten2.to(device)\r\n\r\n dstrain = data1.x[:, 10:]\r\n dstrain_real = torch.mul(dstrain + coeff2, range3) / coeff1 + min3\r\n # dstrainpl = input12 # predicted plastic strain increment\r\n dstrainpl = target12 # actual plastic strain increment\r\n dstrainpl_real = torch.mul(dstrainpl + coeff2, range1[1:4]) / coeff1 + min1[1:4]\r\n dstrainel = dstrain_real - dstrainpl_real\r\n dstrainelv = torch.matmul(dstrainel, oneten1)\r\n dstrainelvten = torch.mul(dstrainelv, oneten2)\r\n\r\n mu = torch.mul(param, input21[:, 0:1])\r\n\r\n input22 = 2.0 * torch.mul(mu, dstrainel)\r\n input23 = torch.mul((input21[:, 0:1] - (2.0 / 3.0) * mu), dstrainelvten)\r\n input24 = input22 + input23\r\n input2 = coeff1 * torch.div((input24 - min2), range2) - coeff2\r\n\r\n target2 = data2.y\r\n cost2 = criterion(input2, target2)\r\n\r\n cost = cost11 + cost12 + cost2\r\n return cost, cost11, cost12, cost2\r\n\r\n\r\ndef mean(inp):\r\n output = (inp[0][0] + inp[1][0] + inp[2][0]) / 3.0\r\n return output\r\n\r\n\r\ndef deviator(inp):\r\n p = mean(inp)\r\n s = np.array([[inp[0][0] - p], [inp[1][0] - p], [inp[2][0] - p], [inp[3][0]], [inp[4][0]], [inp[5][0]]])\r\n return s\r\n\r\n\r\ndef j2(inp):\r\n s = deviator(inp)\r\n out = 0.0\r\n for i in range(6):\r\n out += s[i][0] ** 2.0 / 2.0\r\n return out\r\n\r\n\r\ndef eq(inp):\r\n j2_val = j2(inp)\r\n out = np.sqrt(3.0 * j2_val)\r\n return out\r\n" ]
[ [ "numpy.array", "torch.cat", "torch.mul", "torch.norm", "torch.ones", "torch.load", "numpy.sqrt", "torch.div", "torch.matmul" ] ]
duongnv0499/Explain-Deformable-DETR
[ "3f222f514a0bba0d0125063300b85aafc5a6030b" ]
[ "main.py" ]
[ "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\nimport argparse\nimport datetime\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nimport datasets\nimport util.misc as utils\nimport datasets.samplers as samplers\nfrom datasets import build_dataset, get_coco_api_from_dataset\nfrom engine import evaluate, train_one_epoch\nfrom models import build_model\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)\n parser.add_argument('--lr', default=2e-4, type=float)\n parser.add_argument('--lr_backbone_names', default=[\"backbone.0\"], type=str, nargs='+')\n parser.add_argument('--lr_backbone', default=2e-5, type=float)\n parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')\n parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)\n parser.add_argument('--batch_size', default=2, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=50, type=int)\n parser.add_argument('--lr_drop', default=40, type=int)\n parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n\n\n parser.add_argument('--sgd', action='store_true')\n\n # Variants of Deformable DETR\n parser.add_argument('--with_box_refine', default=False, action='store_true')\n parser.add_argument('--two_stage', default=False, action='store_true')\n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,\n help=\"position / size * scale\")\n parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=1024, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=300, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--dec_n_points', default=4, type=int)\n parser.add_argument('--enc_n_points', default=4, type=int)\n\n # * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n\n # * Matcher\n parser.add_argument('--set_cost_class', default=2, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--cls_loss_coef', default=2, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--focal_alpha', default=0.25, type=float)\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', default='./data/coco', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')\n\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n if args.frozen_weights is not None:\n assert args.masks, \"Frozen training is meant for segmentation only\"\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion, postprocessors = build_model(args)\n model.to(device)\n\n model_without_ddp = model\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n dataset_train = build_dataset(image_set='train', args=args)\n dataset_val = build_dataset(image_set='val', args=args)\n dataset_test = build_dataset(image_set = 'test', args=args)\n\n if args.distributed:\n if args.cache_mode:\n sampler_train = samplers.NodeDistributedSampler(dataset_train)\n sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False)\n else:\n sampler_train = samplers.DistributedSampler(dataset_train)\n sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False)\n sampler_test = samplers.DistributedSampler(dataset_test, shuffle=False)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n sampler_test = torch.utils.data.SequentialSampler(dataset_test)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, args.batch_size, drop_last=True)\n\n data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,\n collate_fn=utils.collate_fn, num_workers=args.num_workers,\n pin_memory=True)\n data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,\n pin_memory=True)\n data_loader_test = DataLoader(dataset_test, args.batch_size, sampler=sampler_val,\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,\n pin_memory=True)\n\n # lr_backbone_names = [\"backbone.0\", \"backbone.neck\", \"input_proj\", \"transformer.encoder\"]\n def match_name_keywords(n, name_keywords):\n out = False\n for b in name_keywords:\n if b in n:\n out = True\n break\n return out\n\n for n, p in model_without_ddp.named_parameters():\n print(n)\n\n param_dicts = [\n {\n \"params\":\n [p for n, p in model_without_ddp.named_parameters()\n if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n \"lr\": args.lr,\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n \"lr\": args.lr * args.lr_linear_proj_mult,\n }\n ]\n if args.sgd:\n optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9,\n weight_decay=args.weight_decay)\n else:\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\n\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n\n if args.dataset_file == \"coco_panoptic\":\n # We also evaluate AP during panoptic training, on original coco DS\n coco_val = datasets.coco.build(\"val\", args)\n base_ds = get_coco_api_from_dataset(coco_val)\n else:\n base_ds = get_coco_api_from_dataset(dataset_val)\n\n if args.frozen_weights is not None:\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\n\n output_dir = Path(args.output_dir)\n if args.resume:\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.resume, map_location='cpu')\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.0.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.0.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.1.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.1.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.2.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.2.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.3.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.3.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.4.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.4.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.5.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.5.bias\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.6.weight\"]\n del checkpoint[\"model\"][\"transformer.decoder.class_embed.6.bias\"]\n del checkpoint[\"model\"][\"class_embed.0.weight\"]\n del checkpoint[\"model\"][\"class_embed.0.bias\"]\n del checkpoint[\"model\"][\"class_embed.1.weight\"]\n del checkpoint[\"model\"][\"class_embed.1.bias\"]\n del checkpoint[\"model\"][\"class_embed.2.weight\"]\n del checkpoint[\"model\"][\"class_embed.2.bias\"]\n del checkpoint[\"model\"][\"class_embed.3.weight\"]\n del checkpoint[\"model\"][\"class_embed.3.bias\"]\n del checkpoint[\"model\"][\"class_embed.4.weight\"]\n del checkpoint[\"model\"][\"class_embed.4.bias\"]\n del checkpoint[\"model\"][\"class_embed.5.weight\"]\n del checkpoint[\"model\"][\"class_embed.5.bias\"]\n del checkpoint[\"model\"][\"class_embed.6.weight\"]\n del checkpoint[\"model\"][\"class_embed.6.bias\"]\n missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)\n unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))]\n # if len(missing_keys) > 0:\n # print('Missing Keys: {}'.format(missing_keys))\n # if len(unexpected_keys) > 0:\n # print('Unexpected Keys: {}'.format(unexpected_keys))\n # if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n # import copy\n # p_groups = copy.deepcopy(optimizer.param_groups)\n # optimizer.load_state_dict(checkpoint['optimizer'])\n # for pg, pg_old in zip(optimizer.param_groups, p_groups):\n # pg['lr'] = pg_old['lr']\n # pg['initial_lr'] = pg_old['initial_lr']\n # #print(optimizer.param_groups)\n # lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n # # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).\n # args.override_resumed_lr_drop = True\n # if args.override_resumed_lr_drop:\n # print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.')\n # lr_scheduler.step_size = args.lr_drop\n # lr_scheduler.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\n # lr_scheduler.step(lr_scheduler.last_epoch)\n # args.start_epoch = checkpoint['epoch'] + 1\n # # check the resumed model\n if not args.eval:\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\n )\n \n if args.eval:\n test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n data_loader_val, base_ds, device, args.output_dir)\n if args.output_dir:\n utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n if args.test:\n test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n data_loader_test, base_ds, device, args.output_dir)\n if args.output_dir:\n utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n sampler_train.set_epoch(epoch)\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm)\n lr_scheduler.step()\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 5 epochs\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0:\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args,\n }, checkpoint_path)\n\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\n )\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 50 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n main(args)\n" ]
[ [ "torch.device", "torch.optim.AdamW", "torch.optim.lr_scheduler.StepLR", "torch.utils.data.RandomSampler", "numpy.random.seed", "torch.save", "torch.utils.data.SequentialSampler", "torch.optim.SGD", "torch.nn.parallel.DistributedDataParallel", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.utils.data.BatchSampler", "torch.load", "torch.hub.load_state_dict_from_url" ] ]
PHBS/ASP
[ "d18a9c25f13cf5c02353b83636d3c6ce13de5242" ]
[ "py/HW2/option_models/basket.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 19 22:56:58 2017\n\n@author: jaehyuk\n\"\"\"\nimport numpy as np\nimport scipy.stats as ss\nfrom .bsm import bsm_formula\nfrom .normal import normal_formula\n\ndef basket_check_args(spot, vol, corr_m, weights):\n '''\n This function simply checks that the size of the vector (matrix) are consistent\n '''\n n = spot.size\n assert( n == vol.size )\n assert( corr_m.shape == (n, n) )\n return None\n \ndef basket_price_mc_cv(\n strike, spot, vol, weights, texp, cor_m, \n intr=0.0, divr=0.0, cp=1, n_samples=10000\n):\n # price1 = MC based on BSM\n rand_st = np.random.get_state() # Store random state first\n price1 = basket_price_mc(\n strike, spot, vol, weights, texp, cor_m,\n intr, divr, cp, True, n_samples)\n \n ''' \n compute price2: mc price based on normal model\n make sure you use the same seed\n\n # Restore the state in order to generate the same state\n np.random.set_state(rand_st) \n price2 = basket_price_mc(\n strike, spot, spot*vol, weights, texp, cor_m,\n intr, divr, cp, False, n_samples)\n '''\n price2 = 0\n\n ''' \n compute price3: analytic price based on normal model\n \n price3 = basket_price_norm_analytic(\n strike, spot, vol, weights, texp, cor_m, intr, divr, cp)\n '''\n price3 = 0\n \n # return two prices: without and with CV\n return np.array([price1, price1 - (price2 - price3)])\n\n\ndef basket_price_mc(\n strike, spot, vol, weights, texp, cor_m,\n intr=0.0, divr=0.0, cp=1, bsm=True, n_samples = 10000\n):\n basket_check_args(spot, vol, cor_m, weights)\n \n div_fac = np.exp(-texp*divr)\n disc_fac = np.exp(-texp*intr)\n forward = spot / disc_fac * div_fac\n\n cov_m = vol * cor_m * vol[:,None]\n chol_m = np.linalg.cholesky(cov_m)\n\n n_assets = spot.size\n znorm_m = np.random.normal(size=(n_assets, n_samples))\n \n if( bsm ) :\n '''\n PUT the simulation of the geometric brownian motion below\n '''\n prices = np.zeros_like(znorm_m)\n else:\n # bsm = False: normal model\n prices = forward[:,None] + np.sqrt(texp) * chol_m @ znorm_m\n \n price_weighted = weights @ prices\n \n price = np.mean( np.fmax(cp*(price_weighted - strike), 0) )\n return disc_fac * price\n\n\ndef basket_price_norm_analytic(\n strike, spot, vol, weights, \n texp, cor_m, intr=0.0, divr=0.0, cp=1\n):\n \n '''\n The analytic (exact) option price under the normal model\n \n 1. compute the forward of the basket\n 2. compute the normal volatility of basket\n 3. plug in the forward and volatility to the normal price formula\n normal_formula(strike, spot, vol, texp, intr=0.0, divr=0.0, cp=1)\n it is already imorted\n \n PUT YOUR CODE BELOW\n '''\n \n return 0.0\n\ndef spread_price_kirk(strike, spot, vol, texp, corr, intr=0, divr=0, cp=1):\n div_fac = np.exp(-texp*divr)\n disc_fac = np.exp(-texp*intr)\n forward = spot / disc_fac * div_fac\n vol2 = vol[1]*forward[1]/(forward[1]+strike)\n vol_r = np.sqrt(vol[0]**2 + vol2*(vol2 - 2*corr*vol[0]))\n price = disc_fac * bsm_formula(forward[1]+strike, forward[0], vol_r, texp, cp=cp)\n\n return price\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.zeros_like", "numpy.fmax", "numpy.exp", "numpy.sqrt", "numpy.linalg.cholesky", "numpy.random.get_state" ] ]
brookzhcn/covid-xprize-prawn
[ "c289fcdb8c9d1bc8d8053df9aa2a2d58d5945b9e" ]
[ "prawn/prescribe.py" ]
[ "# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.\n\nimport argparse\nimport pandas as pd\nfrom joblib import Parallel, delayed\nimport time\nfrom prawn_prescribe import run_geo, PrawnPrescribe, get_country_region\nfrom standard_predictor.xprize_predictor import NPI_COLUMNS, XPrizePredictor\n\n\ndef prescribe(start_date: str,\n end_date: str,\n path_to_prior_ips_file: str,\n path_to_cost_file: str,\n output_file_path) -> None:\n \"\"\"\n Generates and saves a file with daily intervention plan prescriptions for the given countries, regions and prior\n intervention plans, between start_date and end_date, included.\n :param start_date: day from which to start making prescriptions, as a string, format YYYY-MM-DDD\n :param end_date: day on which to stop making prescriptions, as a string, format YYYY-MM-DDD\n :param path_to_prior_ips_file: path to a csv file containing the intervention plans between inception date\n (Jan 1 2020) and end_date, for the countries and regions for which a prescription is needed\n :param path_to_cost_file: path to a csv file containing the cost of each individual intervention, per country\n See covid_xprize/validation/data/uniform_random_costs.csv for an example\n :param output_file_path: path to file to save the prescriptions to\n :return: Nothing. Saves the generated prescriptions to an output_file_path csv file\n See 2020-08-01_2020-08-04_prescriptions_example.csv for an example\n \"\"\"\n # !!! YOUR CODE HERE !!!\n\n s = time.time()\n x_predictor = XPrizePredictor()\n prescribe1 = PrawnPrescribe(start_date_str=start_date, end_date_str=end_date,\n path_to_prior_ips_file=path_to_prior_ips_file,\n path_to_cost_file=path_to_cost_file, predictor=x_predictor,\n interval=14\n )\n zero_geos, others = prescribe1.filter_geos()\n zero_outputs = []\n date_range = prescribe1.date_range\n num_of_days = prescribe1.num_of_days\n for zero_geo in zero_geos:\n c, r = get_country_region(zero_geo)\n zero_df = pd.DataFrame({\n 'PrescriptionIndex': [0] * num_of_days,\n 'CountryName': [c] * num_of_days,\n 'RegionName': [r] * num_of_days,\n 'Date': date_range\n })\n zero_df.loc[:, NPI_COLUMNS] = 0\n zero_outputs.append(zero_df)\n\n print(zero_geos)\n print(others)\n\n ratio = 50\n outputs = Parallel(backend='loky', n_jobs=2)(delayed(run_geo)(geo, start_date, end_date,\n path_to_cost_file, path_to_prior_ips_file, ratio)\n for geo in others)\n outputs += zero_outputs\n df = pd.concat(outputs)\n df.to_csv(output_file_path, index=False)\n e = time.time()\n print(f'Total seconds {e - s}')\n\n\n# !!! PLEASE DO NOT EDIT. THIS IS THE OFFICIAL COMPETITION API !!!\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--start_date\",\n dest=\"start_date\",\n type=str,\n required=True,\n help=\"Start date from which to prescribe, included, as YYYY-MM-DD.\"\n \"For example 2020-08-01\")\n parser.add_argument(\"-e\", \"--end_date\",\n dest=\"end_date\",\n type=str,\n required=True,\n help=\"End date for the last prescription, included, as YYYY-MM-DD.\"\n \"For example 2020-08-31\")\n parser.add_argument(\"-ip\", \"--interventions_past\",\n dest=\"prior_ips_file\",\n type=str,\n required=True,\n help=\"The path to a .csv file of previous intervention plans\")\n parser.add_argument(\"-c\", \"--intervention_costs\",\n dest=\"cost_file\",\n type=str,\n required=True,\n help=\"Path to a .csv file containing the cost of each IP for each geo\")\n parser.add_argument(\"-o\", \"--output_file\",\n dest=\"output_file\",\n type=str,\n required=True,\n help=\"The path to an intervention plan .csv file\")\n args = parser.parse_args()\n print(f\"Generating prescriptions from {args.start_date} to {args.end_date}...\")\n prescribe(args.start_date, args.end_date, args.prior_ips_file, args.cost_file, args.output_file)\n print(\"Done!\")\n\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
ndmaxar/DEODR
[ "a18e49240825e3209f28f6eedafd68fb05a90b3e" ]
[ "setup.py" ]
[ "\"\"\"Setup script for the DEODR project.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nfrom Cython.Build import cythonize\n\nimport numpy as np\n\n\n# compilation mode for debuging\n# extensions = [\n# Extension(\"differentiable_renderer_cython\",\n# [\"DEODR/differentiable_renderer_cython.pyx\"]\n# ,extra_compile_args=[\"-Zi\", \"/Od\"]\n# ,extra_link_args=[\"-debug\"],\n# undef_macros = [ \"NDEBUG\" ]\n# )\n# ]\n\nextensions = \"deodr/differentiable_renderer_cython.pyx\"\n\nmy_modules = cythonize(extensions, annotate=True, language=\"c++\")\n\nlibname = \"deodr\"\n\nsetup(\n name=libname,\n version=\"0.1.14\",\n author=\"Martin de La Gorce\",\n author_email=\"[email protected]\",\n description=\"A differentiable renderer with Pytorch,Tensorflow and Matlab interfaces.\",\n url=\"https://github.com/martinResearch/DEODR\",\n license=\"BSD\",\n packages=find_packages(),\n package_data={\"deodr\": [\"*.pyx\", \"*.pxd\", \"data/*.*\", \"data/**/*.*\"]},\n ext_modules=my_modules, # additional source file(s)),\n include_dirs=[np.get_include()],\n setup_requires=[\"numpy\", \"scipy\", \"cython\"],\n install_requires=[\"numpy\", \"scipy\"],\n)\n" ]
[ [ "numpy.get_include" ] ]
rowanz/verb-attributes
[ "a04931e3b2ef5be859bdb4c0f123148b194c9d42" ]
[ "models/baselines/eszsl/run_exp.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom data.imsitu_loader import ImSitu\nfrom data.attribute_loader import Attributes, COLUMNS\n\ntrain_data, val_data, test_data = ImSitu.splits(zeroshot=True)\n\n# X \\in \\R^{d x m} where D is dimensionality and m is # examples\ntrain_feats = np.load('train_feats.npy').T\nval_feats = np.load('val_feats.npy').T\ntest_feats = np.load('test_feats.npy').T\ntrain_labels = np.load('train_labels.npy')\nval_labels = np.load('val_labels.npy')\ntest_labels = np.load('test_labels.npy')\n\n# Predicted atts\nold_index = Attributes(use_defns=True, use_test=True).atts_df.index\ninds = np.array([np.where(old_index==i)[0][0] for i in test_data.attributes.atts_df.index])\npred_atts = pd.DataFrame(\n np.load('/home/rowan/code/verb-attributes/data/att_preds_ensemble.npy')[inds],\n columns=COLUMNS,\n index=test_data.attributes.atts_df.index,\n)\n\ndef dummies(atts_df, col, dom_size):\n if dom_size > 2:\n d = pd.get_dummies(pd.concat((atts_df['time'], pd.Series(np.arange(dom_size), np.arange(dom_size))), 0), prefix=col)[:-dom_size]\n else:\n d = atts_df[col]\n return d\n\n\n# Binarize\noffsets = [0, train_labels.max() + 1, train_labels.max() + val_labels.max() + 2]\nfor data, labels, offset in zip(\n (train_data, val_data, test_data),\n (train_labels, val_labels, test_labels),\n offsets\n ):\n full_dummies = [dummies(data.attributes.atts_df, col, dom_size) for col, dom_size in data.attributes.domains]\n\n #number of attributes by labels [a x z]\n data.S = pd.concat(full_dummies, axis=1).as_matrix().astype(np.float32).T\n\n #number of examples * labels [z x m]\n data.Y = -np.ones((labels.shape[0], labels.max() + 1), dtype=np.float32)\n data.Y[np.arange(labels.shape[0]), labels] = 1\n\n data.Y_full = -np.ones((labels.shape[0], 504), dtype=np.float32)\n data.Y_full[np.arange(labels.shape[0]), labels+offset] = 1\n\nfull_dummies = [dummies(pred_atts, col, dom_size) for col, dom_size in data.attributes.domains]\nS_pred = pd.concat(full_dummies, axis=1).as_matrix().astype(np.float32).T\nassert np.allclose(S_pred.shape, test_data.S.shape)\nS_full = np.concatenate((train_data.S, val_data.S, test_data.S), 1)\nS_full_pred = np.concatenate((train_data.S, val_data.S, S_pred), 1)\n\n\ndef soln(gamma=1, l=1):\n first_term = train_feats.dot(train_feats.T)\n first_term += gamma * np.eye(first_term.shape[0])\n\n #first_term = np.linalg.inv(first_term)\n\n middle_term = train_feats.dot(train_data.Y).dot(train_data.S.T)\n\n final_term = train_data.S.dot(train_data.S.T)\n final_term += np.eye(final_term.shape[0])*l\n # final_term = np.linalg.inv(final_term)\n\n # We want to compute BC^{-1}\n # C^TX = B^T -> X = (C^{-1})^TB^T -> X = (BC^{-1})^T\n BCinv = np.linalg.solve(final_term.T, middle_term.T).T\n # BCinv = middle_term.dot(np.linalg.inv(final_term))\n\n # Ax = BC^-1 is equiv to A^-1BC^-1\n V = np.linalg.solve(first_term, BCinv)\n return V\n\ndef test(V, X, S):\n return X.T.dot(V).dot(S)\n\n\ndef test_deploy(V, pred=False):\n if pred:\n preds_part = test(V, test_feats, S_pred)\n preds_full = test(V, test_feats, S_full_pred)\n else:\n preds_part = test(V, test_feats, test_data.S)\n preds_full = test(V, test_feats, S_full)\n\n ranking_part = (-preds_part).argsort(1).argsort(1)\n ranking_full = (-preds_full).argsort(1).argsort(1)\n\n rank_part = ranking_part[np.arange(ranking_part.shape[0]), test_labels]\n rank_full = ranking_full[np.arange(ranking_part.shape[0]), (test_labels + offsets[2])]\n\n top1_acc = (rank_part == 0).mean()*100\n top5_acc = (rank_part < 5).mean()*100\n top1_acc_full = (rank_full == 0).mean()*100\n top5_acc_full = (rank_full < 5).mean()*100\n\n return pd.Series(data=[top1_acc, top5_acc, top1_acc_full, top5_acc_full],\n index=['top1_acc', 'top5_acc', 'top1_acc_full', 'top5_acc_full'])\n\ndef val_deploy(V):\n preds_part = test(V, val_feats, val_data.S).argmax(1)\n part_acc = (preds_part == val_labels).mean()*100\n return part_acc\n\ndef log_sample(minv, maxv):\n miv = np.log(minv)\n mav = np.log(maxv)\n sa = np.random.random() * (mav-miv) + miv\n return np.exp(sa)\n\nparams = []\nax = []\nfor i in range(20):\n g = log_sample(2, 1000)\n l = log_sample(0.001, 0.1)\n params.append((g,l))\n V = soln(g,l)\n acc = val_deploy(V)\n ax.append(acc)\n print(\"Accuracy for g={:.3f}, l={:.3f} is {:.2f}\".format(g,l,acc))\n\nbest_params = params[np.argmax(ax)]\nV = soln(*best_params)\nres = test_deploy(V)\nres_pred = test_deploy(V, pred=True)\nres = pd.DataFrame([res, res_pred], index=['eszsl', 'eszsl-pred'])\nres.to_csv('eszsl.csv', float_format='%.2f')\nprint(res)\n" ]
[ [ "numpy.concatenate", "numpy.log", "pandas.concat", "pandas.DataFrame", "numpy.ones", "numpy.load", "numpy.exp", "numpy.eye", "numpy.allclose", "numpy.where", "numpy.argmax", "numpy.arange", "numpy.linalg.solve", "pandas.Series", "numpy.random.random" ] ]
DiMoser/PyPinT
[ "3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73" ]
[ "examples/mlsdc_stability_regions.py" ]
[ "# coding=utf-8\nimport warnings as warnings\n# emmit all warnings\n\nwarnings.simplefilter('always')\n# Deactivate Annoyances\n# DeprecationWarnings are emitted by various numpy functions\nwarnings.simplefilter('ignore', category=DeprecationWarning)\n# RuntimeWarnings are emitted by numpy.abs on most calls when encountering over- or underflows.\nwarnings.simplefilter('ignore', category=RuntimeWarning)\n\nimport argparse\nimport concurrent.futures\nimport pickle as pickle\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom pypint.integrators.sdc_integrator import SdcIntegrator\nfrom pypint.multi_level_providers.multi_time_level_provider import MultiTimeLevelProvider\nfrom pypint.multi_level_providers.level_transition_providers.time_transition_provider import TimeTransitionProvider\nfrom pypint.communicators import ForwardSendingMessaging\nfrom pypint.solvers.ml_sdc import MlSdc\nfrom pypint.solvers.cores.semi_implicit_mlsdc_core import SemiImplicitMlSdcCore\nfrom pypint.utilities.threshold_check import ThresholdCheck\nfrom pypint.solvers.diagnosis.norms import two_norm\nfrom examples.problems.lambda_u import LambdaU\n\n\ndef run_problem(real, imag, max_iter, num_steps, num_nodes, criteria, task, num_tasks, n_procs, starttime):\n _width = len(str(num_tasks))\n _percent = float(task) / float(num_tasks) * 100\n _diff_time = time.time() - starttime\n _time_epsilon = 0.1\n if task > n_procs \\\n and ((_diff_time > 8.0 and\n ((_diff_time % 10.0) < _time_epsilon) or ((10.0 - (_diff_time % 10.0)) < _time_epsilon))\n or (num_tasks % 2 == 0 and _percent % 4 == 0)\n or (num_tasks % 2 != 0 and _percent % 4 == 0)):\n print(\"[ {:6.2f}%] Starting task {:{width}d} of {:{width}d}: \\\\lambda = {: .3f}{:+.3f}i\"\n .format(_percent, task, num_tasks, real, imag, width=_width))\n\n base_integrator = SdcIntegrator()\n base_integrator.init(num_nodes=num_nodes)\n\n intermediate_integrator = SdcIntegrator()\n intermediate_integrator.init(num_nodes=(2 * num_nodes - 1))\n\n # fine_integrator = SdcIntegrator()\n # fine_integrator.init(num_nodes=(num_nodes + 4))\n\n transitioner1 = TimeTransitionProvider(fine_nodes=intermediate_integrator.nodes, coarse_nodes=base_integrator.nodes)\n # transitioner2 = TimeTransitionProvider(fine_nodes=fine_integrator.nodes, coarse_nodes=intermediate_integrator.nodes)\n\n ml_provider = MultiTimeLevelProvider()\n # ml_provider.add_coarse_level(fine_integrator)\n ml_provider.add_coarse_level(intermediate_integrator)\n ml_provider.add_coarse_level(base_integrator)\n ml_provider.add_level_transition(transitioner1, 0, 1)\n # ml_provider.add_level_transition(transitioner2, 1, 2)\n\n problem = LambdaU(lmbda=complex(real, imag))\n check = ThresholdCheck(min_threshold=1e-12, max_threshold=max_iter,\n conditions=('residual', 'iterations'))\n\n comm = ForwardSendingMessaging()\n solver = MlSdc(communicator=comm)\n comm.link_solvers(previous=comm, next=comm)\n comm.write_buffer(tag=(ml_provider.num_levels - 1), value=problem.initial_value, time_point=problem.time_start)\n\n solver.init(problem=problem, ml_provider=ml_provider, threshold=check)\n try:\n solution = solver.run(SemiImplicitMlSdcCore, dt=(problem.time_end - problem.time_start))\n return int(solution[-1].used_iterations)\n # print(\"####======> %s -> %s\" % (solution[-1].error(-1)[-1].value, linalg.norm(solution[-1].error(-1)[-1].value)))\n # return two_norm(solution[-1].error(-1)[-1].value)\n except RuntimeError:\n return max_iter + 1\n # return np.inf\n\n\ndef sdc_stability_region(num_points, max_iter, num_steps, num_nodes, num_procs, real, imag, criteria):\n _start_time = time.time()\n _test_region = {\n 'real': real,\n 'imag': imag\n }\n _dist = [\n np.abs(_test_region['real'][1] - _test_region['real'][0]),\n np.abs(_test_region['imag'][1] - _test_region['imag'][0]),\n ]\n _num_points_per_axis = {\n 'real': num_points,\n 'imag': num_points\n }\n if _dist[0] > _dist[1]:\n _num_points_per_axis['imag'] = int(_dist[1] / _dist[0] * num_points)\n else:\n _num_points_per_axis['real'] = int(_dist[0] / _dist[1] * num_points)\n\n _points = {\n 'real': np.linspace(_test_region['real'][0], _test_region['real'][1], _num_points_per_axis['real']),\n 'imag': np.linspace(_test_region['imag'][0], _test_region['imag'][1], _num_points_per_axis['imag'])\n }\n _results = np.zeros((_num_points_per_axis['imag'], _num_points_per_axis['real']), dtype=np.float64)\n _futures = np.zeros((_num_points_per_axis['imag'], _num_points_per_axis['real']), dtype=object)\n\n _name = \"mlsdc_stability_{:.2f}-{:.2f}_{:.2f}-{:.2f}_p{:d}_maxI{:d}_T{:d}_n0{:d}_n1{:d}\"\\\n .format(_test_region[\"real\"][0], _test_region['real'][1], _test_region['imag'][0], _test_region['imag'][1],\n num_points, max_iter, num_steps, num_nodes, (2*num_nodes - 1))\n\n with concurrent.futures.ProcessPoolExecutor(max_workers=num_procs) as pool:\n for a in range(0, _points['real'].size):\n for j in range(0, _points['imag'].size):\n _futures[j][a] = \\\n pool.submit(run_problem, _points['real'][a], _points['imag'][j], max_iter, num_steps, num_nodes,\n criteria, a * _points['imag'].size + j + 1, _points['real'].size * _points['imag'].size,\n num_procs, _start_time)\n\n for a in range(0, _points['real'].size):\n for j in range(0, _points['imag'].size):\n if _futures[j][a].exception(timeout=None) is None:\n _results[j][a] = _futures[j][a].result(timeout=None)\n else:\n _results[j][a] = max_iter\n print(\"[FAILED ] \\\\lambda = {: .3f}{:+.3f}i.\\n[ reason] {:s}\"\n .format(_points['real'][a], _points['imag'][j], _futures[j][a].exception()))\n\n with open(\"{:s}.pickle\".format(_name), 'wb') as f:\n pickle.dump(_results, f)\n print(\"Iteration Data:\\n%s\" % _results)\n\n plt.rc('text', usetex=True)\n plt.hold(True)\n # plt.title(\"MLSDC with {:d} time steps and {:d} nodes each\".format(num_steps, num_nodes))\n C = plt.contour(_points['real'], _points['imag'], _results, vmin=0, vmax=1e-7, levels=[1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0])\n plt.clabel(C, inline=1, fontsize=10, fmt='%.2e')\n CF = plt.pcolor(_points['real'], _points['imag'], _results, vmin=0, vmax=1e-2, cmap=cm.jet, rasterized=True)\n plt.xlabel(r'$\\Re(\\lambda)$')\n plt.ylabel(r'$\\Im(\\lambda)$')\n plt.grid('off')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig(\"{:s}.png\".format(_name))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Run MLSDC Stability Anaylsis\")\n parser.add_argument('-p', '--num-pnts', nargs='?', default=10, type=int, help=\"Number of points on longest axis.\")\n parser.add_argument('-i', '--max-iter', nargs='?', default=769, type=int, help=\"Maximum number of iterations.\")\n parser.add_argument('-t', '--num-stps', nargs='?', default=1, type=int, help=\"Number of time steps.\")\n parser.add_argument('-n', '--num-ndes', nargs='?', default=5, type=int, help=\"Number of integration nodes per time step.\")\n parser.add_argument('-w', '--num-proc', nargs='?', default=8, type=int, help=\"Number of concurrent worker processes.\")\n parser.add_argument('--real', nargs=2, default=[-6.0, 3.0], type=float, help=\"Start and end of real axis.\")\n parser.add_argument('--imag', nargs=2, default=[0.0, 8.0], type=float, help=\"Start and end of imaginary axis.\")\n parser.add_argument('-c', '--criteria', nargs='?', default='error', type=str, help=\"Termination criteria.\")\n args = parser.parse_args()\n\n print(\"[ ] Calculating MLSDC Stability Regions\")\n for key in vars(args):\n print(\"[{:{fill}{align}8s}] {}\".format(key[0:8], vars(args)[key], fill=' ', align='<'))\n\n sdc_stability_region(args.num_pnts, args.max_iter, args.num_stps, args.num_ndes, args.num_proc, args.real,\n args.imag, args.criteria)\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.pcolor", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.linspace", "matplotlib.pyplot.rc", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.clabel", "matplotlib.pyplot.hold", "numpy.abs", "matplotlib.pyplot.contour" ] ]
aanavisinha/def_vit
[ "fd411eeb62fc13c0cf9ff52952748a05f1294bc7" ]
[ "models/backbone.py" ]
[ "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nBackbone modules.\n\"\"\"\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\nfrom util.misc import NestedTensor, is_main_process\n\nfrom .position_encoding import build_position_encoding\n\nimport timm\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n, eps=1e-5):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n self.eps = eps\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n eps = self.eps\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n\n def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool):\n super().__init__()\n for name, parameter in backbone.named_parameters():\n if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:\n parameter.requires_grad_(False)\n if return_interm_layers:\n # return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n return_layers = {\"layer2\": \"0\", \"layer3\": \"1\", \"layer4\": \"2\"}\n self.strides = [8, 16, 32]\n self.num_channels = [512, 1024, 2048]\n else:\n return_layers = {'layer4': \"0\"}\n self.strides = [32]\n self.num_channels = [2048]\n self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n def forward(self, tensor_list: NestedTensor):\n xs = self.body(tensor_list.tensors)\n out: Dict[str, NestedTensor] = {}\n for name, x in xs.items():\n m = tensor_list.mask\n assert m is not None\n mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n out[name] = NestedTensor(x, mask)\n return out\n\n\nclass Backbone(BackboneBase):\n \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n def __init__(self, name: str,\n train_backbone: bool,\n return_interm_layers: bool,\n dilation: bool):\n norm_layer = FrozenBatchNorm2d\n backbone = getattr(torchvision.models, name)(\n replace_stride_with_dilation=[False, False, dilation],\n pretrained=is_main_process(), norm_layer=norm_layer)\n assert name not in ('resnet18', 'resnet34'), \"number of channels are hard coded\"\n super().__init__(backbone, train_backbone, return_interm_layers)\n if dilation:\n self.strides[-1] = self.strides[-1] // 2\n \n\n# VIT BACKBONE\n\nclass ViTBackbone():\n def __init__(self): \n self.body = timm.create_model('vit_base_patch16_384', pretrained=True)\n self.body = self.body.cuda()\n self.body.num_classes = 0\n self.strides = [32]\n self.num_channels = [2048]\n \n def forward(self, tensor_list: NestedTensor):\n x = tensor_list.tensors\n x = self.body.patch_embed(x)\n x = self.body.pos_drop(x)\n for module in self.body.blocks:\n x = module(x)\n x = self.body.norm(x)\n x = torch.reshape(x, (-1, 2048, 18, 12))\n x = self.body.pre_logits(x)\n\n m = tensor_list.mask\n mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n\n out: Dict[str, NestedTensor] = {}\n out['0'] = NestedTensor(x, mask)\n\n return out\n \n def __call__(self, tensor_list: NestedTensor):\n return self.forward(tensor_list)\n \n \n# VIT INT GETTER\nclass ViTBackboneInt(nn.Module):\n def __init__(self, train_backbone: bool, channel_768: bool):\n super().__init__()\n if tiny:\n self.body = IntermediateLayerGetter(timm.create_model('vit_small_patch16_224', pretrained=True), return_layers={'blocks': '0'})\n self.tiny = True\n else:\n self.body = IntermediateLayerGetter(timm.create_model('vit_base_patch16_384', pretrained=True), return_layers={'blocks': '0'})\n self.tiny = False\n\n if train_backbone:\n for name, parameter in self.body.named_parameters():\n parameter.requires_grad_(True)\n else:\n for name, parameter in self.body.named_parameters():\n parameter.requires_grad_(False)\n\n if channel_768:\n self.num_channels = [768]\n else:\n self.num_channels = [2048]\n \n self.strides = [32]\n \n \nclass Joiner(nn.Sequential):\n def __init__(self, backbone, position_embedding):\n super().__init__(backbone, position_embedding)\n self.strides = backbone.strides\n self.num_channels = backbone.num_channels\n\n def forward(self, tensor_list: NestedTensor):\n xs = self[0](tensor_list)\n out: List[NestedTensor] = []\n pos = []\n for name, x in sorted(xs.items()):\n out.append(x)\n\n # position encoding\n for x in out:\n pos.append(self[1](x).to(x.tensors.dtype))\n\n return out, pos\n \n# VIT JOINER\n\nclass ViTJoiner(ViTBackbone):\n def __init__(self, backbone, position_embedding):\n self.backbone = backbone\n self.position_embedding = position_embedding\n self.strides = backbone.strides\n self.num_channels = backbone.num_channels\n \n def forward(self, tensor_list: NestedTensor):\n xs = self.backbone(tensor_list)\n out: List[NestedTensor] = []\n pos = []\n for name, x in sorted(xs.items()):\n out.append(x)\n # position encoding\n pos.append(self.position_embedding(x).to(x.tensors.dtype))\n\n return out, pos\n \n def __call__(self, tensor_list: NestedTensor):\n return self.forward(tensor_list)\n\n\ndef build_backbone(args):\n position_embedding = build_position_encoding(args)\n if args.backbone == 'vit':\n tiny = args.epochs == 152\n train_backbone = args.lr_backbone > 0\n channel_768 = args.epochs == 151\n backbone = ViTBackboneInt(train_backbone = train_backbone, channel_768 = channel_768)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n return model\n else:\n train_backbone = args.lr_backbone > 0\n return_interm_layers = args.masks or (args.num_feature_levels > 1)\n backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)\n model = Joiner(backbone, position_embedding)\n return model\n \n" ]
[ [ "torch.ones", "torch.zeros", "torch.reshape" ] ]
PandoraLS/Seq2Seq
[ "96851fd7d0fe2a7cbcaafb3b2d41d58541ad652b" ]
[ "experiment/trainer.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2021/4/22 下午12:37\n\nimport torch\nimport random\nfrom experiment.base_trainer import BaseTrainer\n\nclass Trainer(BaseTrainer):\n def __init__(self,\n config: dict,\n resume: bool,\n encoder,\n decoder,\n optim_enc,\n optim_dec,\n loss_fucntion,\n visual,\n dataset,\n word2indexs,\n sentence_max_length):\n super().__init__(config, resume, encoder, decoder, optim_enc, optim_dec, loss_fucntion, visual)\n self.SOS_token = 0\n self.EOS_token = 1\n self.dataset = dataset\n self.word2indexs = word2indexs\n self.sentence_max_length = sentence_max_length # 输入sentence的最大长度,与data_prep.py中的max_length保持一致\n self.print_loss_every = 1000 # 每1000个iter打印一次loss值\n self.print_loss_total = 0\n self.n_iter = 0\n\n def _train_epoch(self, epoch):\n # TODO 目前这种载入数据的方法非常难用,需要自定义dataloader方法,参考下面链接\n # https://github.com/PandoraLS/Chinese-Text-Classification-Pytorch/blob/master/utils.py\n for i in range(self.dataset.length):\n self.n_iter += 1\n (src_sentence, tar_sentence) = self.dataset.__getitem__(i)\n input_tensor, output_tensor = self._pair_to_tensor(src_sentence, tar_sentence) # 把数据转换为tensor\n loss_iter = self._train_iter(input_tensor, output_tensor, self.sentence_max_length)\n self.print_loss_total += loss_iter\n\n if self.n_iter % self.print_loss_every == 0:\n print_loss_avg = self.print_loss_total / self.print_loss_every\n self.print_loss_total = 0\n print(\"iter / current 1000 iter mean loss: {} / {:.4f}\".format(self.n_iter, print_loss_avg))\n\n # 验证当前翻译效果\n print(\"input: \", src_sentence)\n output_words = self._eval_iter(input_tensor, self.sentence_max_length)\n print(\"predict: \", ' '.join(output_words))\n print(\"groundtruth: \", tar_sentence)\n print()\n\n with torch.no_grad():\n if self.visual:\n self.writer.add_scalars(f\"模型/损失值_n_iter\", {\n \"loss_iter\": loss_iter\n }, self.n_iter)\n \n def _train_iter(self, input_tensor, output_tensor, max_length):\n \"\"\"\n 对单个输入的input_tensor, output_tensor进行训练\n Args:\n input_tensor: tensor格式的输入sentence\n output_tensor: tensor格式的输出sentence\n max_length: 筛选得到的的sentence的最大长度, 这里为10\n Returns:\n 当前iter的loss值\n \"\"\"\n encoder_hidden = self.encoder.init_hidden().to(self.device)\n self.optimizer_enc.zero_grad()\n self.optimizer_dec.zero_grad()\n input_length, output_length = input_tensor.size(0), output_tensor.size(0)\n # 这里的10是因为数据集筛选max_length=10的sentence\n encoder_outputs = torch.zeros(max_length, self.encoder.hidden_size, device=self.device)\n \n loss = 0\n for ei in range(input_length):\n # encoder 每次读取一个词, 重复input_length次\n encoder_output, encoder_hidden = self.encoder(input_tensor[ei], encoder_hidden)\n # encoder_output: [1, 1, hidden_size]\n # encoder_output[ei]: [hidden_size]\n encoder_outputs[ei] = encoder_output[0, 0]\n \n decoder_input = torch.tensor([[self.SOS_token]], device=self.device)\n decoder_hidden = encoder_hidden\n \n use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(output_length):\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden) # , encoder_outputs)\n # decoder_output: [1, V] 值为每个单词的概率\n loss += self.loss_function(decoder_output, output_tensor[di])\n decoder_input = output_tensor[di]\n else:\n # without teacher forcing: use its own predictions as the next input\n for di in range(output_length):\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden) # , encoder_outputs)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += self.loss_function(decoder_output, output_tensor[di])\n if decoder_input.item() == self.EOS_token:\n break\n \n loss.backward()\n self.optimizer_enc.step()\n self.optimizer_dec.step()\n \n return loss.item() / output_length\n\n def _eval_iter(self, input_tensor, max_length):\n with torch.no_grad():\n input_length = input_tensor.size(0)\n encoder_hidden = self.encoder.init_hidden().to(self.device)\n encoder_outputs = torch.zeros(max_length, self.encoder.hidden_size, device=self.device)\n\n for ei in range(input_length):\n # encoder 每次读取一个词, 重复input_length次\n encoder_output, encoder_hidden = self.encoder(input_tensor[ei], encoder_hidden)\n # encoder_output: [1, 1, hidden_size]\n # encoder_output[ei]: [hidden_size]\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[self.SOS_token]], device=self.device)\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n for di in range(max_length):\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden) # , encoder_outputs)\n topv, topi = decoder_output.topk(1)\n\n if topi.item() == self.EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(self.word2indexs.output_lang.index2word[topi.item()])\n decoder_input = topi.squeeze().detach() # detach from history as input\n return decoded_words[:-1]\n \n def _pair_to_tensor(self, input_sentence, output_sentence):\n \"\"\"\n 将dataloader得到的[法文语句, 英文语句]映射为index后,转换为tensor\n Args:\n input_sentence (str): 法文语句\n output_sentence (str): 英文语句 \n Returns (tuple):\n input_tensor, output_tensor\n \"\"\"\n input_indexes = [self.word2indexs.input_lang.word2index[word] for word in input_sentence.split(' ')]\n input_indexes.append(self.EOS_token)\n input_tensor = torch.tensor(input_indexes, dtype=torch.long, device=self.device).view(-1, 1)\n\n output_indexes = [self.word2indexs.output_lang.word2index[word] for word in output_sentence.split(' ')]\n output_indexes.append(self.EOS_token)\n output_tensor = torch.tensor(output_indexes, dtype=torch.long, device=self.device).view(-1, 1)\n\n return input_tensor, output_tensor\n\n def _visualize_weights_and_grads(self, model, epoch):\n # 绘制模型训练曲线\n if self.visual:\n for name, param in model.named_parameters():\n self.writer.add_histogram(\"WEIGHT_\" + name, param.clone().cpu().data.numpy(), epoch)\n self.writer.add_histogram(\"GRAD_\" + name, param.grad.cpu().numpy(), epoch)\n\n def _validation_epoch(self, epoch):\n raise NotImplementedError\n" ]
[ [ "torch.zeros", "torch.no_grad", "torch.tensor" ] ]
d-v-b/CNNectome
[ "2b1f4786282306edf94b231c9fcf64419d8d1e2a" ]
[ "training/anisotropic/train_dist_cleftprepost_deluxe.py" ]
[ "from __future__ import print_function\nfrom gunpowder import *\nfrom gunpowder.tensorflow import *\nfrom gunpowder.contrib import (\n ZeroOutConstSections,\n AddBoundaryDistance,\n AddDistance,\n AddPrePostCleftDistance,\n)\nimport gpn\nimport tensorflow as tf\nimport os\nimport math\nimport json\nimport csv\nimport logging\n\n\ndef make_cleft_to_prepostsyn_neuron_id_dict(csv_files):\n cleft_to_pre = dict()\n cleft_to_post = dict()\n for csv_f in csv_files:\n f = open(csv_f, \"r\")\n reader = csv.reader(f)\n reader.next()\n for row in reader:\n if int(row[10]) >= 0:\n try:\n cleft_to_pre[int(row[10])].add(int(row[0]))\n except KeyError:\n cleft_to_pre[int(row[10])] = {int(row[0])}\n try:\n cleft_to_post[int(row[10])].add(int(row[5]))\n except KeyError:\n cleft_to_post[int(row[10])] = {int(row[5])}\n return cleft_to_pre, cleft_to_post\n\n\ndef train_until(\n max_iteration,\n data_sources,\n input_shape,\n output_shape,\n dt_scaling_factor,\n loss_name,\n cremi_version,\n aligned,\n):\n ArrayKey(\"RAW\")\n ArrayKey(\"ALPHA_MASK\")\n ArrayKey(\"GT_LABELS\")\n ArrayKey(\"GT_CLEFTS\")\n ArrayKey(\"GT_MASK\")\n ArrayKey(\"TRAINING_MASK\")\n ArrayKey(\"CLEFT_SCALE\")\n ArrayKey(\"PRE_SCALE\")\n ArrayKey(\"POST_SCALE\")\n ArrayKey(\"LOSS_GRADIENT\")\n ArrayKey(\"GT_CLEFT_DIST\")\n ArrayKey(\"PRED_CLEFT_DIST\")\n ArrayKey(\"GT_PRE_DIST\")\n ArrayKey(\"PRED_PRE_DIST\")\n ArrayKey(\"GT_POST_DIST\")\n ArrayKey(\"PRED_POST_DIST\")\n ArrayKey(\"GT_POST_DIST\")\n data_providers = []\n if cremi_version == \"2016\":\n cremi_dir = \"/groups/saalfeld/saalfeldlab/larissa/data/cremi-2016/\"\n filename = \"sample_{0:}_padded_20160501.\"\n elif cremi_version == \"2017\":\n cremi_dir = \"/groups/saalfeld/saalfeldlab/larissa/data/cremi-2017/\"\n filename = \"sample_{0:}_padded_20170424.\"\n if aligned:\n filename += \"aligned.\"\n filename += \"0bg.hdf\"\n if tf.train.latest_checkpoint(\".\"):\n trained_until = int(tf.train.latest_checkpoint(\".\").split(\"_\")[-1])\n print(\"Resuming training from\", trained_until)\n else:\n trained_until = 0\n print(\"Starting fresh training\")\n for sample in data_sources:\n print(sample)\n h5_source = Hdf5Source(\n os.path.join(cremi_dir, filename.format(sample)),\n datasets={\n ArrayKeys.RAW: \"volumes/raw\",\n ArrayKeys.GT_CLEFTS: \"volumes/labels/clefts\",\n ArrayKeys.GT_MASK: \"volumes/masks/groundtruth\",\n ArrayKeys.TRAINING_MASK: \"volumes/masks/validation\",\n ArrayKeys.GT_LABELS: \"volumes/labels/neuron_ids\",\n },\n array_specs={\n ArrayKeys.GT_MASK: ArraySpec(interpolatable=False),\n ArrayKeys.GT_CLEFTS: ArraySpec(interpolatable=False),\n ArrayKeys.TRAINING_MASK: ArraySpec(interpolatable=False),\n },\n )\n data_providers.append(h5_source)\n\n if cremi_version == \"2017\":\n csv_files = [\n os.path.join(cremi_dir, \"cleft-partners_\" + sample + \"_2017.csv\")\n for sample in data_sources\n ]\n elif cremi_version == \"2016\":\n csv_files = [\n os.path.join(\n cremi_dir,\n \"cleft-partners-\" + sample + \"-20160501.aligned.corrected.csv\",\n )\n for sample in data_sources\n ]\n cleft_to_pre, cleft_to_post = make_cleft_to_prepostsyn_neuron_id_dict(csv_files)\n print(cleft_to_pre, cleft_to_post)\n with open(\"net_io_names.json\", \"r\") as f:\n net_io_names = json.load(f)\n\n voxel_size = Coordinate((40, 4, 4))\n input_size = Coordinate(input_shape) * voxel_size\n output_size = Coordinate(output_shape) * voxel_size\n context = input_size - output_size\n # specifiy which Arrays should be requested for each batch\n request = BatchRequest()\n request.add(ArrayKeys.RAW, input_size)\n request.add(ArrayKeys.GT_LABELS, output_size)\n request.add(ArrayKeys.GT_CLEFTS, output_size)\n request.add(ArrayKeys.GT_MASK, output_size)\n request.add(ArrayKeys.TRAINING_MASK, output_size)\n request.add(ArrayKeys.CLEFT_SCALE, output_size)\n request.add(ArrayKeys.GT_CLEFT_DIST, output_size)\n request.add(ArrayKeys.GT_PRE_DIST, output_size)\n request.add(ArrayKeys.GT_POST_DIST, output_size)\n\n # create a tuple of data sources, one for each HDF file\n data_sources = tuple(\n provider\n + Normalize(ArrayKeys.RAW)\n + IntensityScaleShift( # ensures RAW is in float in [0, 1]\n ArrayKeys.TRAINING_MASK, -1, 1\n )\n +\n # zero-pad provided RAW and GT_MASK to be able to draw batches close to\n # the boundary of the available data\n # size more or less irrelevant as followed by Reject Node\n Pad(ArrayKeys.RAW, None)\n + Pad(ArrayKeys.GT_MASK, None)\n + Pad(ArrayKeys.TRAINING_MASK, context)\n + RandomLocation(min_masked=0.99, mask=ArrayKeys.TRAINING_MASK)\n + Reject( # chose a random location inside the provided arrays\n ArrayKeys.GT_MASK\n )\n + Reject( # reject batches which do contain less than 50% labelled data\n ArrayKeys.GT_CLEFTS, min_masked=0.0, reject_probability=0.95\n )\n for provider in data_providers\n )\n\n snapshot_request = BatchRequest(\n {\n ArrayKeys.LOSS_GRADIENT: request[ArrayKeys.GT_CLEFTS],\n ArrayKeys.PRED_CLEFT_DIST: request[ArrayKeys.GT_CLEFT_DIST],\n ArrayKeys.PRED_PRE_DIST: request[ArrayKeys.GT_PRE_DIST],\n ArrayKeys.PRED_POST_DIST: request[ArrayKeys.GT_POST_DIST],\n }\n )\n\n artifact_source = (\n Hdf5Source(\n os.path.join(cremi_dir, \"sample_ABC_padded_20160501.defects.hdf\"),\n datasets={\n ArrayKeys.RAW: \"defect_sections/raw\",\n ArrayKeys.ALPHA_MASK: \"defect_sections/mask\",\n },\n array_specs={\n ArrayKeys.RAW: ArraySpec(voxel_size=(40, 4, 4)),\n ArrayKeys.ALPHA_MASK: ArraySpec(voxel_size=(40, 4, 4)),\n },\n )\n + RandomLocation(min_masked=0.05, mask=ArrayKeys.ALPHA_MASK)\n + Normalize(ArrayKeys.RAW)\n + IntensityAugment(ArrayKeys.RAW, 0.9, 1.1, -0.1, 0.1, z_section_wise=True)\n + ElasticAugment((4, 40, 40), (0, 2, 2), (0, math.pi / 2.0), subsample=8)\n + SimpleAugment(transpose_only=[1, 2], mirror_only=[1, 2])\n )\n\n train_pipeline = (\n data_sources\n + RandomProvider()\n + SimpleAugment(transpose_only=[1, 2], mirror_only=[1, 2])\n + gpn.ElasticAugment(\n (40, 4, 4),\n (4, 40, 40),\n (0.0, 2.0, 2.0),\n (0, math.pi / 2.0),\n spatial_dims=3,\n subsample=8,\n )\n + gpn.Misalign(\n 40,\n prob_slip=0.05,\n prob_shift=0.05,\n max_misalign=10,\n ignore_keys_for_slip=(\n ArrayKeys.GT_CLEFTS,\n ArrayKeys.GT_MASK,\n ArrayKeys.TRAINING_MASK,\n ArrayKeys.GT_LABELS,\n ),\n )\n + IntensityAugment(ArrayKeys.RAW, 0.9, 1.1, -0.1, 0.1, z_section_wise=True)\n + DefectAugment(\n ArrayKeys.RAW,\n prob_missing=0.03,\n prob_low_contrast=0.01,\n prob_artifact=0.03,\n artifact_source=artifact_source,\n artifacts=ArrayKeys.RAW,\n artifacts_mask=ArrayKeys.ALPHA_MASK,\n contrast_scale=0.5,\n )\n + IntensityScaleShift(ArrayKeys.RAW, 2, -1)\n + ZeroOutConstSections(ArrayKeys.RAW)\n + AddDistance(\n label_array_key=ArrayKeys.GT_CLEFTS,\n distance_array_key=ArrayKeys.GT_CLEFT_DIST,\n normalize=\"tanh\",\n normalize_args=dt_scaling_factor,\n )\n + AddPrePostCleftDistance(\n ArrayKeys.GT_CLEFTS,\n ArrayKeys.GT_LABELS,\n ArrayKeys.GT_PRE_DIST,\n ArrayKeys.GT_POST_DIST,\n cleft_to_pre,\n cleft_to_post,\n normalize=\"tanh\",\n normalize_args=dt_scaling_factor,\n include_cleft=False,\n )\n + BalanceByThreshold(\n labels=ArrayKeys.GT_CLEFT_DIST,\n scales=ArrayKeys.CLEFT_SCALE,\n mask=ArrayKeys.GT_MASK,\n )\n + BalanceByThreshold(\n labels=ArrayKeys.GT_PRE_DIST,\n scales=ArrayKeys.PRE_SCALE,\n mask=ArrayKeys.GT_MASK,\n threshold=-0.5,\n )\n + BalanceByThreshold(\n labels=ArrayKeys.GT_POST_DIST,\n scales=ArrayKeys.POST_SCALE,\n mask=ArrayKeys.GT_MASK,\n threshold=-0.5,\n )\n + PreCache(cache_size=40, num_workers=10)\n + Train(\n \"unet\",\n optimizer=net_io_names[\"optimizer\"],\n loss=net_io_names[loss_name],\n inputs={\n net_io_names[\"raw\"]: ArrayKeys.RAW,\n net_io_names[\"gt_cleft_dist\"]: ArrayKeys.GT_CLEFT_DIST,\n net_io_names[\"gt_pre_dist\"]: ArrayKeys.GT_PRE_DIST,\n net_io_names[\"gt_post_dist\"]: ArrayKeys.GT_POST_DIST,\n net_io_names[\"loss_weights_cleft\"]: ArrayKeys.CLEFT_SCALE,\n net_io_names[\"loss_weights_pre\"]: ArrayKeys.CLEFT_SCALE,\n net_io_names[\"loss_weights_post\"]: ArrayKeys.CLEFT_SCALE,\n net_io_names[\"mask\"]: ArrayKeys.GT_MASK,\n },\n summary=net_io_names[\"summary\"],\n log_dir=\"log\",\n outputs={\n net_io_names[\"cleft_dist\"]: ArrayKeys.PRED_CLEFT_DIST,\n net_io_names[\"pre_dist\"]: ArrayKeys.PRED_PRE_DIST,\n net_io_names[\"post_dist\"]: ArrayKeys.PRED_POST_DIST,\n },\n gradients={net_io_names[\"cleft_dist\"]: ArrayKeys.LOSS_GRADIENT},\n )\n + Snapshot(\n {\n ArrayKeys.RAW: \"volumes/raw\",\n ArrayKeys.GT_CLEFTS: \"volumes/labels/gt_clefts\",\n ArrayKeys.GT_CLEFT_DIST: \"volumes/labels/gt_clefts_dist\",\n ArrayKeys.PRED_CLEFT_DIST: \"volumes/labels/pred_clefts_dist\",\n ArrayKeys.LOSS_GRADIENT: \"volumes/loss_gradient\",\n ArrayKeys.PRED_PRE_DIST: \"volumes/labels/pred_pre_dist\",\n ArrayKeys.PRED_POST_DIST: \"volumes/labels/pred_post_dist\",\n ArrayKeys.GT_PRE_DIST: \"volumes/labels/gt_pre_dist\",\n ArrayKeys.GT_POST_DIST: \"volumes/labels/gt_post_dist\",\n },\n every=500,\n output_filename=\"batch_{iteration}.hdf\",\n output_dir=\"snapshots/\",\n additional_request=snapshot_request,\n )\n + PrintProfilingStats(every=50)\n )\n\n print(\"Starting training...\")\n with build(train_pipeline) as b:\n for i in range(max_iteration):\n b.request_batch(request)\n\n print(\"Training finished\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n data_sources = [\"A\", \"B\", \"C\"] # , 'B', 'C']\n input_shape = (43, 430, 430)\n output_shape = (23, 218, 218)\n dt_scaling_factor = 50\n max_iteration = 400000\n loss_name = \"loss_total\"\n cremi_version = \"2017\"\n aligned = True\n train_until(\n max_iteration,\n data_sources,\n input_shape,\n output_shape,\n dt_scaling_factor,\n loss_name,\n cremi_version,\n aligned,\n )\n" ]
[ [ "tensorflow.train.latest_checkpoint" ] ]
YKSIAT/InceptionV4
[ "95559af854c63b673e8fc21b679eae9765b33411" ]
[ "test2.py" ]
[ "# -*- coding: utf-8 -*-\r\nimport tensorflow as tf\r\nimport Input_Data\r\nLEARNING_RATE = 0.0001\r\nSTEPS = 5000\r\nBATCH = 50\r\nN_CLASSES = 2\r\nIMAGE_WEIGHT = 299\r\nIMAGE_HEIGHT = 299\r\nBATCH_SIZE = 10\r\nCAPACITY = 100\r\nEPOCH = 10\r\n\r\n# INPUT_DATA = \"F:\\\\Program\\\\Data_test\\\\dogvscat\\\\train\"\r\nINPUT_DATA = \"F:\\\\Data\\\\Test\\\\test1025\"\r\n\r\nimages = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMAGE_WEIGHT, IMAGE_HEIGHT, 3], name=\"input_images\")\r\nlabels = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES], name=\"labels\") # one_hot\r\n\r\ntrain_image_list, train_label_list = Input_Data.get_image_label_list(INPUT_DATA)\r\ntrain_image_bath, train_label_batch = \\\r\n Input_Data.get_image_label_batch(train_image_list,\r\n train_label_list,\r\n IMAGE_WEIGHT,\r\n IMAGE_HEIGHT,\r\n BATCH_SIZE,\r\n CAPACITY)\r\n\r\nwith tf.Session() as sess:\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess, coord)\r\n\r\n try:\r\n for i in range(50):\r\n images_batch, labels_batch = sess.run([train_image_bath, train_label_batch])\r\n print(labels_batch)\r\n # print(len(train_image_bath))\r\n except tf.errors.OutOfRangeError:\r\n print(\"done\")\r\n finally:\r\n coord.request_stop()\r\n coord.join(threads)\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.train.Coordinator" ] ]
87003697/FewX-mmdet
[ "3b8f634aca1f8c41a8d5e5d081d335b947bf57fc" ]
[ "mmdet/models/detectors/fsod_rcnn.py" ]
[ "import torch\n\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\nimport pandas as pd\nimport pdb\[email protected]_module()\nclass FsodRCNN(BaseDetector):\n \"\"\"Base class for two-stage detectors of FSOD\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(FsodRCNN, self).__init__(init_cfg)\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n roi_head.pretrained = pretrained\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n support_imgs = None,\n support_bboxes = None,\n support_labels = None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n support_imgs (None | List): support img\n\n support_bboxes (None | List): support bboxes\n\n support_labels (None | List): supprt labels\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n \n # squeeze unused dimensions in bboxs and labels\n # originally these dimensions are for bypass some configs in mmdet\n support_bboxes = torch.squeeze(support_bboxes)\n support_labels = torch.squeeze(support_labels)\n\n # extract support features\n B, N, C, H, W = support_imgs.shape\n support_imgs = support_imgs.reshape(B*N, C, H, W)\n support_features = self.extract_feat(support_imgs)\n\n\n assert self.with_rpn # otherwise it's not fsod :)\n losses_rpn_cls, losses_rpn_bbox, losses_cls, losses_bbox, acces = [], [], [], [], []\n \n # RPN forward and loss\n for i in range(B):\n\n losses_perbatch = dict()\n x_i = tuple(x_[i].unsqueeze(0) for x_ in x)\n\n assert self.with_rpn, 'we need rpn with feature aggregation'\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n # cls_dim = torch.zeros_like(support_bboxes[i])\n # _support_bboxes = torch.cat([cls_dim, support_bboxes[i]], axis = -1)[:,3:].float().contiguous()\n # self.roi_head.bbox_roi_extractor(support_features,_support_bboxes)\n \n # # extract roi features\n batch_size = support_bboxes.shape[1]\n support_bbox_features = []\n for support_features_ in support_features:\n for support_feature, support_bbox in zip(support_features_[i * batch_size: (i + 1) * batch_size],support_bboxes[i]):\n # extract roi features in res5\n support_bbox = torch.cat([torch.zeros_like(support_bbox[:1]), support_bbox]).float().contiguous()\n support_bbox_features.append(self.roi_head.bbox_roi_extractor([support_feature.unsqueeze(0)],support_bbox.unsqueeze(0)))\n\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x_i,\n [img_metas[i]], \n [gt_bboxes[i]],\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n #proposal_list[0].shape = torch.Size([2000, 5])\n losses_perbatch.update(rpn_losses)\n\n roi_losses = self.roi_head.forward_train(x_i, [img_metas[i]], proposal_list,\n [gt_bboxes[i]], [gt_labels[i]],\n support_bbox_features, #support_features,\n gt_bboxes_ignore, gt_masks, \n **kwargs)\n losses_perbatch.update(roi_losses)\n # losses_perbatch.keys() = dict_keys(['loss_rpn_cls', 'loss_rpn_bbox', 'loss_cls', 'acc', 'loss_bbox'])\n\n # update losses\n losses_rpn_cls.append(torch.stack(losses_perbatch['loss_rpn_cls']).sum())\n losses_rpn_bbox.append(torch.stack(losses_perbatch['loss_rpn_bbox']).sum())\n losses_cls.append(losses_perbatch['loss_cls'])\n acces.append(losses_perbatch['acc'])\n losses_bbox.append(losses_perbatch['loss_bbox'])\n\n # sum up losses\n losses = dict()\n losses['loss_rpn_cls'] = torch.stack(losses_rpn_cls).mean()\n losses['loss_rpn_bbox'] = torch.stack(losses_rpn_bbox).mean()\n losses['loss_cls'] = torch.stack(losses_cls).mean()\n losses['acc'] = torch.stack(acces).mean()\n losses['loss_bbox'] = torch.stack(losses_bbox).mean()\n\n return losses\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n # print('start simple testing')\n assert self.with_bbox, 'Bbox head must be implemented.'\n \n x = self.extract_feat(img)\n # get origin input shape to onnx dynamic input shape\n if torch.onnx.is_in_onnx_export():\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, support_bbox_features = [], rescale=rescale)\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n # raise NotImplementedError\n # print('start async simple testing')\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # raise NotImplementedError\n # print('start aug testing')\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n" ]
[ [ "torch.stack", "torch.onnx.is_in_onnx_export", "torch._shape_as_tensor", "torch.squeeze", "torch.zeros_like", "torch.randn" ] ]
svortega/steelpy
[ "bef35eb8ab8728fc29f57b7070b5f3bac0b0e840" ]
[ "steelpy/process/math/runkut5.py" ]
[ "## module run_kut5\n\nimport math\nimport numpy as np\n\ndef integrate(F,x,y,xStop,h,tol=1.0e-6):\n ''' X,Y = integrate(F,x,y,xStop,h,tol=1.0e-6).\n Adaptive Runge-Kutta method with Dormand-Price\n coefficients for solving the\n initial value problem {y}' = {F(x,{y})}, where\n {y} = {y[0],y[1],...y[n-1]}.\n \n x,y = initial conditions\n xStop = terminal value of x\n h = initial increment of x used in integration\n tol = per-step error tolerance\n F = user-supplied function that returns the\n array F(x,y) = {y'[0],y'[1],...,y'[n-1]}.\n ''' \n \n a1 = 0.2; a2 = 0.3; a3 = 0.8; a4 = 8/9; a5 = 1.0\n a6 = 1.0\n \n c0 = 35/384; c2 = 500/1113; c3 = 125/192 \n c4 = -2187/6784; c5 = 11/84\n \n d0 = 5179/57600; d2 = 7571/16695; d3 = 393/640\n d4 = -92097/339200; d5 = 187/2100; d6 = 1/40\n \n b10 = 0.2\n b20 = 0.075; b21 = 0.225\n b30 = 44/45; b31 = -56/15; b32 = 32/9\n b40 = 19372/6561; b41 = -25360/2187; b42 = 64448/6561\n b43 = -212/729\n b50 = 9017/3168; b51 =-355/33; b52 = 46732/5247\n b53 = 49/176; b54 = -5103/18656\n b60 = 35/384; b62 = 500/1113; b63 = 125/192;\n b64 = -2187/6784; b65 = 11/84\n\n X = []\n Y = []\n X.append(x)\n Y.append(y)\n stopper = 0 # Integration stopper(0 = off, 1 = on)\n k0 = h*F(x,y)\n\n for i in range(10000):\n k1 = h*F(x + a1*h, y + b10*k0)\n k2 = h*F(x + a2*h, y + b20*k0 + b21*k1)\n k3 = h*F(x + a3*h, y + b30*k0 + b31*k1 + b32*k2)\n k4 = h*F(x + a4*h, y + b40*k0 + b41*k1 + b42*k2 + b43*k3)\n k5 = h*F(x + a5*h, y + b50*k0 + b51*k1 + b52*k2 + b53*k3 \\\n + b54*k4)\n k6 = h*F(x + a6*h, y + b60*k0 + b62*k2 + b63*k3 + b64*k4 \\\n + b65*k5) \n \n dy = c0*k0 + c2*k2 + c3*k3 + c4*k4 + c5*k5\n E = (c0 - d0)*k0 + (c2 - d2)*k2 + (c3 - d3)*k3 \\\n + (c4 - d4)*k4 + (c5 - d5)*k5 - d6*k6 \n e = math.sqrt(np.sum(E**2)/len(y))\n hNext = 0.9*h*(tol/e)**0.2\n \n # Accept integration step if error e is within tolerance\n if e <= tol:\n y = y + dy\n x = x + h\n X.append(x)\n Y.append(y) \n if stopper == 1: break # Reached end of x-range\n if abs(hNext) > 10.0*abs(h): hNext = 10.0*h\n \n # Check if next step is the last one; if so, adjust h\n if (h > 0.0) == ((x + hNext) >= xStop):\n hNext = xStop - x\n stopper = 1\n k0 = k6*hNext/h\n else:\n if abs(hNext) < 0.1*abs(h): hNext = 0.1*h\n k0 = k0*hNext/h\n \n h = hNext\n return np.array(X),np.array(Y)\n\n" ]
[ [ "numpy.sum", "numpy.array" ] ]
hdfkndkndknknknvklsbljsbsmb/ibis
[ "dbacd7a52ca062529fb7bf6dec51b98d7199d1dc" ]
[ "ibis/backends/pandas/client.py" ]
[ "\"\"\"The pandas client implementation.\"\"\"\nfrom functools import partial\n\nimport dateutil.parser\nimport numpy as np\nimport pandas as pd\nimport pytz\nimport toolz\nfrom pandas.api.types import CategoricalDtype, DatetimeTZDtype\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.backends.base import Client, Database\n\nfrom .core import execute_and_reset\n\ninfer_pandas_dtype = pd.api.types.infer_dtype\n\n\n_ibis_dtypes = toolz.valmap(\n np.dtype,\n {\n dt.Boolean: np.bool_,\n dt.Null: np.object_,\n dt.Array: np.object_,\n dt.String: np.object_,\n dt.Binary: np.object_,\n dt.Date: 'datetime64[ns]',\n dt.Time: 'timedelta64[ns]',\n dt.Timestamp: 'datetime64[ns]',\n dt.Int8: np.int8,\n dt.Int16: np.int16,\n dt.Int32: np.int32,\n dt.Int64: np.int64,\n dt.UInt8: np.uint8,\n dt.UInt16: np.uint16,\n dt.UInt32: np.uint32,\n dt.UInt64: np.uint64,\n dt.Float32: np.float32,\n dt.Float64: np.float64,\n dt.Decimal: np.object_,\n dt.Struct: np.object_,\n },\n)\n\n\n_numpy_dtypes = toolz.keymap(\n np.dtype,\n {\n 'bool': dt.boolean,\n 'int8': dt.int8,\n 'int16': dt.int16,\n 'int32': dt.int32,\n 'int64': dt.int64,\n 'uint8': dt.uint8,\n 'uint16': dt.uint16,\n 'uint32': dt.uint32,\n 'uint64': dt.uint64,\n 'float16': dt.float16,\n 'float32': dt.float32,\n 'float64': dt.float64,\n 'double': dt.double,\n 'unicode': dt.string,\n 'str': dt.string,\n 'datetime64': dt.timestamp,\n 'datetime64[ns]': dt.timestamp,\n 'timedelta64': dt.interval,\n 'timedelta64[ns]': dt.Interval('ns'),\n },\n)\n\n\n_inferable_pandas_dtypes = {\n 'string': dt.string,\n 'bytes': dt.string,\n 'floating': dt.float64,\n 'integer': dt.int64,\n 'mixed-integer': dt.binary,\n 'mixed-integer-float': dt.float64,\n 'decimal': dt.float64,\n 'complex': dt.binary,\n 'categorical': dt.category,\n 'boolean': dt.boolean,\n 'datetime64': dt.timestamp,\n 'datetime': dt.timestamp,\n 'date': dt.date,\n 'timedelta64': dt.interval,\n 'timedelta': dt.interval,\n 'time': dt.time,\n 'period': dt.binary,\n 'mixed': dt.binary,\n 'empty': dt.binary,\n 'unicode': dt.string,\n}\n\n\[email protected](np.dtype)\ndef from_numpy_dtype(value):\n try:\n return _numpy_dtypes[value]\n except KeyError:\n raise TypeError(\n 'numpy dtype {!r} is not supported in the pandas backend'.format(\n value\n )\n )\n\n\[email protected](DatetimeTZDtype)\ndef from_pandas_tzdtype(value):\n return dt.Timestamp(timezone=str(value.tz))\n\n\[email protected](CategoricalDtype)\ndef from_pandas_categorical(value):\n return dt.Category()\n\n\[email protected](np.generic)\ndef infer_numpy_scalar(value):\n return dt.dtype(value.dtype)\n\n\ndef _infer_pandas_series_contents(s: pd.Series) -> dt.DataType:\n \"\"\"Infer the type of the **contents** of a pd.Series.\n\n No dispatch for this because there is no class representing \"the contents\n of a Series\". Instead, this is meant to be used internally, mainly by\n `infer_pandas_series`.\n\n Parameters\n ----------\n s : pd.Series\n The Series whose contents we want to know the type of\n\n Returns\n -------\n dtype : dt.DataType\n The dtype of the contents of the Series\n \"\"\"\n if s.dtype == np.object_:\n inferred_dtype = infer_pandas_dtype(s, skipna=True)\n if inferred_dtype == 'mixed':\n # We need to inspect an element to determine the Ibis dtype\n value = s.iloc[0]\n if isinstance(value, (np.ndarray, list, pd.Series)):\n # Defer to individual `infer` functions for these\n return dt.infer(value)\n else:\n return dt.dtype('binary')\n else:\n return _inferable_pandas_dtypes[inferred_dtype]\n else:\n return dt.dtype(s.dtype)\n\n\[email protected](pd.Series)\ndef infer_pandas_series(s):\n \"\"\"Infer the type of a pd.Series.\n\n Note that the returned datatype will be an array type, which corresponds\n to the fact that a Series is a collection of elements. Please use\n `_infer_pandas_series_contents` if you are interested in the datatype\n of the **contents** of the Series.\n \"\"\"\n return dt.Array(_infer_pandas_series_contents(s))\n\n\[email protected](pd.Timestamp)\ndef infer_pandas_timestamp(value):\n if value.tz is not None:\n return dt.Timestamp(timezone=str(value.tz))\n else:\n return dt.timestamp\n\n\[email protected](np.ndarray)\ndef infer_array(value):\n # In this function, by default we'll directly map the dtype of the\n # np.array to a corresponding Ibis dtype (see bottom)\n np_dtype = value.dtype\n\n # However, there are some special cases where we can't use the np.array's\n # dtype:\n if np_dtype.type == np.object_:\n # np.array dtype is `dtype('O')`, which is ambiguous.\n inferred_dtype = infer_pandas_dtype(value, skipna=True)\n return dt.Array(_inferable_pandas_dtypes[inferred_dtype])\n elif np_dtype.type == np.str_:\n # np.array dtype is `dtype('<U1')` (for np.arrays containing strings),\n # which is ambiguous.\n return dt.Array(dt.string)\n\n # The dtype of the np.array is not ambiguous, and can be used directly.\n return dt.Array(dt.dtype(np_dtype))\n\n\[email protected](pd.Series)\ndef schema_from_series(s):\n return sch.schema(tuple(s.iteritems()))\n\n\[email protected](pd.DataFrame)\ndef infer_pandas_schema(df, schema=None):\n schema = schema if schema is not None else {}\n\n pairs = []\n for column_name, pandas_dtype in df.dtypes.iteritems():\n if not isinstance(column_name, str):\n raise TypeError(\n 'Column names must be strings to use the pandas backend'\n )\n\n if column_name in schema:\n ibis_dtype = dt.dtype(schema[column_name])\n else:\n ibis_dtype = _infer_pandas_series_contents(df[column_name])\n\n pairs.append((column_name, ibis_dtype))\n\n return sch.schema(pairs)\n\n\ndef ibis_dtype_to_pandas(ibis_dtype):\n \"\"\"Convert ibis dtype to the pandas / numpy alternative\"\"\"\n assert isinstance(ibis_dtype, dt.DataType)\n\n if isinstance(ibis_dtype, dt.Timestamp) and ibis_dtype.timezone:\n return DatetimeTZDtype('ns', ibis_dtype.timezone)\n elif isinstance(ibis_dtype, dt.Interval):\n return np.dtype(f'timedelta64[{ibis_dtype.unit}]')\n elif isinstance(ibis_dtype, dt.Category):\n return CategoricalDtype()\n elif type(ibis_dtype) in _ibis_dtypes:\n return _ibis_dtypes[type(ibis_dtype)]\n else:\n return np.dtype(np.object_)\n\n\ndef ibis_schema_to_pandas(schema):\n return list(zip(schema.names, map(ibis_dtype_to_pandas, schema.types)))\n\n\[email protected](DatetimeTZDtype, dt.Timestamp, pd.Series)\ndef convert_datetimetz_to_timestamp(in_dtype, out_dtype, column):\n output_timezone = out_dtype.timezone\n if output_timezone is not None:\n return column.dt.tz_convert(output_timezone)\n return column.astype(out_dtype.to_pandas(), errors='ignore')\n\n\ndef convert_timezone(obj, timezone):\n \"\"\"Convert `obj` to the timezone `timezone`.\n\n Parameters\n ----------\n obj : datetime.date or datetime.datetime\n\n Returns\n -------\n type(obj)\n \"\"\"\n if timezone is None:\n return obj.replace(tzinfo=None)\n return pytz.timezone(timezone).localize(obj)\n\n\nPANDAS_STRING_TYPES = {'string', 'unicode', 'bytes'}\nPANDAS_DATE_TYPES = {'datetime', 'datetime64', 'date'}\n\n\[email protected](np.dtype, dt.Timestamp, pd.Series)\ndef convert_datetime64_to_timestamp(in_dtype, out_dtype, column):\n if in_dtype.type == np.datetime64:\n return column.astype(out_dtype.to_pandas(), errors='ignore')\n try:\n series = pd.to_datetime(column, utc=True)\n except pd.errors.OutOfBoundsDatetime:\n inferred_dtype = infer_pandas_dtype(column, skipna=True)\n if inferred_dtype in PANDAS_DATE_TYPES:\n # not great, but not really any other option\n return column.map(\n partial(convert_timezone, timezone=out_dtype.timezone)\n )\n if inferred_dtype not in PANDAS_STRING_TYPES:\n raise TypeError(\n (\n 'Conversion to timestamp not supported for Series of type '\n '{!r}'\n ).format(inferred_dtype)\n )\n return column.map(dateutil.parser.parse)\n else:\n utc_dtype = DatetimeTZDtype('ns', 'UTC')\n return series.astype(utc_dtype).dt.tz_convert(out_dtype.timezone)\n\n\[email protected](np.dtype, dt.Interval, pd.Series)\ndef convert_any_to_interval(_, out_dtype, column):\n return column.values.astype(out_dtype.to_pandas())\n\n\[email protected](np.dtype, dt.String, pd.Series)\ndef convert_any_to_string(_, out_dtype, column):\n result = column.astype(out_dtype.to_pandas(), errors='ignore')\n return result\n\n\[email protected](np.dtype, dt.Boolean, pd.Series)\ndef convert_boolean_to_series(in_dtype, out_dtype, column):\n # XXX: this is a workaround until #1595 can be addressed\n in_dtype_type = in_dtype.type\n out_dtype_type = out_dtype.to_pandas().type\n if in_dtype_type != np.object_ and in_dtype_type != out_dtype_type:\n return column.astype(out_dtype_type)\n return column\n\n\[email protected](object, dt.DataType, pd.Series)\ndef convert_any_to_any(_, out_dtype, column):\n return column.astype(out_dtype.to_pandas(), errors='ignore')\n\n\ndt.DataType.to_pandas = ibis_dtype_to_pandas # type: ignore\nsch.Schema.to_pandas = ibis_schema_to_pandas # type: ignore\n\n\nclass PandasTable(ops.DatabaseTable):\n pass\n\n\nclass PandasClient(Client):\n def __init__(self, backend, dictionary):\n self.backend = backend\n self.database_class = backend.database_class\n self.table_class = backend.table_class\n self.dictionary = dictionary\n\n def table(self, name, schema=None):\n df = self.dictionary[name]\n schema = sch.infer(df, schema=schema)\n return self.table_class(name, schema, self).to_expr()\n\n def execute(self, query, params=None, limit='default', **kwargs):\n if limit != 'default':\n raise ValueError(\n 'limit parameter to execute is not yet implemented in the '\n 'pandas backend'\n )\n\n if not isinstance(query, ir.Expr):\n raise TypeError(\n \"`query` has type {!r}, expected ibis.expr.types.Expr\".format(\n type(query).__name__\n )\n )\n return execute_and_reset(query, params=params, **kwargs)\n\n def compile(self, expr, *args, **kwargs):\n \"\"\"Compile `expr`.\n\n Notes\n -----\n For the pandas backend this is a no-op.\n\n \"\"\"\n return expr\n\n def database(self, name=None):\n \"\"\"Construct a database called `name`.\"\"\"\n return self.database_class(name, self)\n\n def load_data(self, table_name, obj, **kwargs):\n \"\"\"Load data from `obj` into `table_name`.\n\n Parameters\n ----------\n table_name : str\n obj : pandas.DataFrame\n\n \"\"\"\n # kwargs is a catch all for any options required by other backends.\n self.dictionary[table_name] = pd.DataFrame(obj)\n\n def create_table(self, table_name, obj=None, schema=None):\n \"\"\"Create a table.\"\"\"\n if obj is None and schema is None:\n raise com.IbisError('Must pass expr or schema')\n\n if obj is not None:\n df = pd.DataFrame(obj)\n else:\n dtypes = ibis_schema_to_pandas(schema)\n df = schema.apply_to(\n pd.DataFrame(columns=list(map(toolz.first, dtypes)))\n )\n\n self.dictionary[table_name] = df\n\n def get_schema(self, table_name, database=None):\n \"\"\"Return a Schema object for the indicated table and database.\n\n Parameters\n ----------\n table_name : str\n May be fully qualified\n database : str\n\n Returns\n -------\n ibis.expr.schema.Schema\n\n \"\"\"\n return sch.infer(self.dictionary[table_name])\n\n def exists_table(self, name):\n \"\"\"Determine if the indicated table or view exists.\n\n Parameters\n ----------\n name : str\n database : str\n\n Returns\n -------\n bool\n\n \"\"\"\n return bool(self.list_tables(like=name))\n\n\nclass PandasDatabase(Database):\n pass\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.api.types.CategoricalDtype", "pandas.api.types.DatetimeTZDtype", "numpy.dtype" ] ]
luis-armando-perez-rey/learning-group-structure
[ "e238308de73a29506d9281e1b55cdd2de2795ebb" ]
[ "added_modules/architectures/vgg.py" ]
[ "import torch.nn as nn\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\n\ndef calculate_pad_same(image_size, kernel_size, stride):\n \"\"\"\n Calculates the padding to get the \"same\" size as in Tensorflow\n Only works for images were filter covers the complete image in the convolution\n \"\"\"\n print((image_size[0] - (kernel_size[0] - 1) - 1) % stride[0] == 0)\n print(\"Image size\", image_size)\n print(\"Kernel size\", kernel_size)\n print(\"Stride size\", stride)\n assert (image_size[0] - (kernel_size[0] - 1) - 1) % stride[\n 0] == 0, \"Image can't be convoluted on the height exactly\"\n assert (image_size[1] - (kernel_size[1] - 1) - 1) % stride[1] == 0, \"Image can't be convoluted on the width exactly\"\n\n pad = tuple(\n [(image_size[num] * (stride[num] - 1) - stride[num] + kernel_size[num]) // 2 for num in range(len(image_size))])\n return pad\n\n\nclass Encoder(nn.Module):\n\n def __init__(self,\n n_out=4,\n n_channels=3,\n image_size=(64, 64),\n conv_hid=64,\n conv_kernel=(3, 3),\n conv_stride=(1, 1),\n maxpool_kernel=(2, 2)):\n super().__init__()\n\n conv_pad = calculate_pad_same(image_size, conv_kernel, conv_stride)\n maxpool_pad = calculate_pad_same(image_size, maxpool_kernel, maxpool_kernel)\n self.maxpool_pad = [maxpool_pad[1], maxpool_pad[1], maxpool_pad[0], maxpool_pad[0]]\n self.conv1 = nn.Conv2d(n_channels, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)\n self.maxpool1 = nn.MaxPool2d(maxpool_kernel, None)\n self.conv2 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)\n self.maxpool2 = nn.MaxPool2d(maxpool_kernel, None)\n self.conv3 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)\n self.maxpool3 = nn.MaxPool2d(maxpool_kernel, None)\n\n final_size = np.product((conv_hid, image_size[0] // (2 ** 3), image_size[1] // (2 ** 3)))\n self.fc1 = nn.Linear(final_size, conv_hid)\n self.fc2 = nn.Linear(conv_hid, n_out)\n\n def forward(self, x):\n x = x.unsqueeze(0)\n x = F.relu(self.conv1(x))\n\n x = self.maxpool1(x)\n x = F.relu(self.conv2(x))\n x = self.maxpool2(x)\n x = F.relu(self.conv3(x))\n x = self.maxpool3(x)\n x = torch.flatten(x, 1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.normalize(x).squeeze()\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, image_size=(64, 64),\n n_in=4,\n conv_hid=64,\n conv_kernel=(3, 3),\n conv_stride=(1, 1),\n n_channels=3\n ):\n super().__init__()\n\n self.convdim = (conv_hid, image_size[0] // (2 ** 3), image_size[1] // (2 ** 3))\n self.fc1 = nn.Linear(n_in, conv_hid)\n self.fc2 = nn.Linear(conv_hid, np.product(self.convdim))\n\n conv_pad = calculate_pad_same(image_size, conv_kernel, conv_stride)\n self.up1 = nn.Upsample(scale_factor=2)\n\n self.conv1 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)\n self.conv2 = nn.Conv2d(conv_hid, conv_hid, conv_kernel, stride=conv_stride, padding=conv_pad)\n self.conv3 = nn.Conv2d(conv_hid, n_channels, conv_kernel, stride=conv_stride, padding=conv_pad)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = torch.reshape(x, (-1, self.convdim[0], self.convdim[1], self.convdim[2]))\n x = self.up1(x)\n x = F.relu(self.conv1(x))\n x = self.up1(x)\n x = F.relu(self.conv2(x))\n x = self.up1(x)\n x = self.conv3(x)\n\n return torch.sigmoid(x).squeeze(dim=0)\n" ]
[ [ "torch.nn.Linear", "numpy.product", "torch.nn.functional.normalize", "torch.sigmoid", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.flatten", "torch.reshape" ] ]
gmavros1/ndlib
[ "f5817eec0c8e7e86fb03aafe7be208fd4a0e0f6e" ]
[ "ndlib/models/epidemics/SEIRModel.py" ]
[ "from ..DiffusionModel import DiffusionModel\nimport numpy as np\nimport future\n\n__author__ = [\"Vincenzo Caproni\", \"Beatrice Caputo\", \"Ettore Puccetti\", \"Elisa Salatti\"]\n__license__ = \"BSD-2-Clause\"\n\n\nclass SEIRModel(DiffusionModel):\n\n def __init__(self, graph, seed=None):\n\n super(self.__class__, self).__init__(graph, seed)\n\n self.name = \"SEIR\"\n\n self.available_statuses = {\n \"Susceptible\": 0,\n \"Exposed\": 2,\n \"Infected\": 1,\n \"Removed\": 3\n }\n self.parameters = {\n \"model\": {\n \"alpha\": {\n \"descr\": \"Latent period (1/duration)\",\n \"range\": [0, 1],\n \"optional\": False},\n \"beta\": {\n \"descr\": \"Infection rate\",\n \"range\": [0, 1],\n \"optional\": False},\n \"gamma\": {\n \"descr\": \"Recovery rate\",\n \"range\": [0, 1],\n \"optional\": False\n },\n \"tp_rate\": {\n \"descr\": \"Whether if the infection rate depends on the number of infected neighbors\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 1\n }\n },\n \"nodes\": {},\n \"edges\": {},\n }\n\n def iteration(self, node_status=True):\n self.clean_initial_status(self.available_statuses.values())\n\n actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}\n\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(actual_status)\n if node_status:\n return {\"iteration\": 0, \"status\": actual_status.copy(),\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n else:\n return {\"iteration\": 0, \"status\": {},\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n\n for u in self.graph.nodes:\n\n u_status = self.status[u]\n eventp = np.random.random_sample()\n neighbors = self.graph.neighbors(u)\n if self.graph.directed:\n neighbors = self.graph.predecessors(u)\n\n if u_status == 0: # Susceptible\n\n infected_neighbors = [v for v in neighbors if self.status[v] == 1]\n triggered = 1 if len(infected_neighbors) > 0 else 0\n\n if self.params['model']['tp_rate'] == 1:\n if eventp < 1 - (1 - self.params['model']['beta']) ** len(infected_neighbors):\n actual_status[u] = 2 # Exposed\n else:\n if eventp < self.params['model']['beta'] * triggered:\n actual_status[u] = 2 # Exposed\n\n elif u_status == 2:\n\n # apply prob. of infection, after (t - t_i) \n if eventp < self.params['model']['alpha']:\n actual_status[u] = 1 # Infected\n\n elif u_status == 1:\n if eventp < self.params['model']['gamma']:\n actual_status[u] = 3 # Removed\n\n delta, node_count, status_delta = self.status_delta(actual_status)\n self.status = actual_status\n self.actual_iteration += 1\n\n if node_status:\n return {\"iteration\": self.actual_iteration - 1, \"status\": delta.copy(),\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n else:\n return {\"iteration\": self.actual_iteration - 1, \"status\": {},\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n" ]
[ [ "numpy.random.random_sample" ] ]
tlienart/autogluon
[ "d02e37f41cd947dd1281bb1296cd12a8187ec441" ]
[ "autogluon/utils/tabular/ml/models/lgb/lgb_model.py" ]
[ "import gc\nimport logging\nimport os\nimport random\nimport re\nimport time\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\n\nfrom . import lgb_utils\nfrom .callbacks import early_stopping_custom\nfrom .hyperparameters.lgb_trial import lgb_trial\nfrom .hyperparameters.parameters import get_param_baseline\nfrom .hyperparameters.searchspaces import get_default_searchspace\nfrom .lgb_utils import construct_dataset\nfrom ..abstract.abstract_model import AbstractModel, fixedvals_from_searchspaces\nfrom ...constants import BINARY, MULTICLASS, REGRESSION\nfrom ....utils.savers import save_pkl\nfrom .....try_import import try_import_lightgbm\nfrom ......core import Int, Space\n\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, message=\"Starting from version\") # lightGBM brew libomp warning\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Save dataset to binary and reload for HPO. This will avoid the memory spike overhead when training each model and instead it will only occur once upon saving the dataset.\nclass LGBModel(AbstractModel):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self._internal_feature_map = None\n\n def _set_default_params(self):\n default_params = get_param_baseline(problem_type=self.problem_type, num_classes=self.num_classes)\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n\n def _get_default_searchspace(self):\n return get_default_searchspace(problem_type=self.problem_type, num_classes=self.num_classes)\n\n # Use specialized LightGBM metric if available (fast), otherwise use custom func generator\n def get_eval_metric(self):\n eval_metric = lgb_utils.convert_ag_metric_to_lgbm(ag_metric_name=self.stopping_metric.name, problem_type=self.problem_type)\n if eval_metric is None:\n eval_metric = lgb_utils.func_generator(metric=self.stopping_metric, is_higher_better=True, needs_pred_proba=not self.stopping_metric_needs_y_pred, problem_type=self.problem_type)\n eval_metric_name = self.stopping_metric.name\n else:\n eval_metric_name = eval_metric\n return eval_metric, eval_metric_name\n\n def _fit(self, X_train=None, Y_train=None, X_test=None, Y_test=None, dataset_train=None, dataset_val=None, time_limit=None, **kwargs):\n start_time = time.time()\n params = self.params.copy()\n\n # TODO: kwargs can have num_cpu, num_gpu. Currently these are ignored.\n verbosity = kwargs.get('verbosity', 2)\n params = fixedvals_from_searchspaces(params)\n\n if verbosity <= 1:\n verbose_eval = False\n elif verbosity == 2:\n verbose_eval = 1000\n elif verbosity == 3:\n verbose_eval = 50\n else:\n verbose_eval = 1\n\n eval_metric, eval_metric_name = self.get_eval_metric()\n dataset_train, dataset_val = self.generate_datasets(X_train=X_train, Y_train=Y_train, params=params, X_test=X_test, Y_test=Y_test, dataset_train=dataset_train, dataset_val=dataset_val)\n gc.collect()\n\n num_boost_round = params.pop('num_boost_round', 1000)\n logger.log(15, f'Training Gradient Boosting Model for {num_boost_round} rounds...')\n logger.log(15, \"with the following hyperparameter settings:\")\n logger.log(15, params)\n\n num_rows_train = len(dataset_train.data)\n if 'min_data_in_leaf' in params:\n if params['min_data_in_leaf'] > num_rows_train: # TODO: may not be necessary\n params['min_data_in_leaf'] = max(1, int(num_rows_train / 5.0))\n\n # TODO: Better solution: Track trend to early stop when score is far worse than best score, or score is trending worse over time\n if (dataset_val is not None) and (dataset_train is not None):\n modifier = 1 if num_rows_train <= 10000 else 10000 / num_rows_train\n early_stopping_rounds = max(round(modifier * 150), 10)\n else:\n early_stopping_rounds = 150\n\n callbacks = []\n valid_names = ['train_set']\n valid_sets = [dataset_train]\n if dataset_val is not None:\n reporter = kwargs.get('reporter', None)\n train_loss_name = self._get_train_loss_name() if reporter is not None else None\n if train_loss_name is not None:\n if 'metric' not in params or params['metric'] == '':\n params['metric'] = train_loss_name\n elif train_loss_name not in params['metric']:\n params['metric'] = f'{params[\"metric\"]},{train_loss_name}'\n callbacks += [\n # Note: Don't use self.params_aux['max_memory_usage_ratio'] here as LightGBM handles memory per iteration optimally. # TODO: Consider using when ratio < 1.\n early_stopping_custom(early_stopping_rounds, metrics_to_use=[('valid_set', eval_metric_name)], max_diff=None, start_time=start_time, time_limit=time_limit,\n ignore_dart_warning=True, verbose=False, manual_stop_file=False, reporter=reporter, train_loss_name=train_loss_name),\n ]\n valid_names = ['valid_set'] + valid_names\n valid_sets = [dataset_val] + valid_sets\n\n seed_val = params.pop('seed_value', 0)\n train_params = {\n 'params': params,\n 'train_set': dataset_train,\n 'num_boost_round': num_boost_round,\n 'valid_sets': valid_sets,\n 'valid_names': valid_names,\n 'callbacks': callbacks,\n 'verbose_eval': verbose_eval,\n }\n if not isinstance(eval_metric, str):\n train_params['feval'] = eval_metric\n else:\n if 'metric' not in train_params['params'] or train_params['params']['metric'] == '':\n train_params['params']['metric'] = eval_metric\n elif eval_metric not in train_params['params']['metric']:\n train_params['params']['metric'] = f'{train_params[\"params\"][\"metric\"]},{eval_metric}'\n if seed_val is not None:\n train_params['params']['seed'] = seed_val\n random.seed(seed_val)\n np.random.seed(seed_val)\n\n # Train LightGBM model:\n try_import_lightgbm()\n import lightgbm as lgb\n self.model = lgb.train(**train_params)\n self.params_trained['num_boost_round'] = self.model.best_iteration\n\n def _predict_proba(self, X, preprocess=True):\n if preprocess:\n X = self.preprocess(X)\n if self.problem_type == REGRESSION:\n return self.model.predict(X)\n\n y_pred_proba = self.model.predict(X)\n if self.problem_type == BINARY:\n if len(y_pred_proba.shape) == 1:\n return y_pred_proba\n elif y_pred_proba.shape[1] > 1:\n return y_pred_proba[:, 1]\n else:\n return y_pred_proba\n elif self.problem_type == MULTICLASS:\n return y_pred_proba\n else:\n if len(y_pred_proba.shape) == 1:\n return y_pred_proba\n elif y_pred_proba.shape[1] > 2: # Should this ever happen?\n return y_pred_proba\n else: # Should this ever happen?\n return y_pred_proba[:, 1]\n\n def preprocess(self, X, is_train=False):\n X = super().preprocess(X=X)\n\n if is_train:\n for column in X.columns:\n new_column = re.sub(r'[\",:{}[\\]]', '', column)\n if new_column != column:\n self._internal_feature_map = {feature: i for i, feature in enumerate(list(X.columns))}\n break\n\n if self._internal_feature_map:\n new_columns = [self._internal_feature_map[column] for column in list(X.columns)]\n X_new = X.copy(deep=False)\n X_new.columns = new_columns\n return X_new\n else:\n return X\n\n def generate_datasets(self, X_train: DataFrame, Y_train: Series, params, X_test=None, Y_test=None, dataset_train=None, dataset_val=None, save=False):\n lgb_dataset_params_keys = ['objective', 'two_round', 'num_threads', 'num_classes', 'verbose'] # Keys that are specific to lightGBM Dataset object construction.\n data_params = {key: params[key] for key in lgb_dataset_params_keys if key in params}.copy()\n\n W_train = None # TODO: Add weight support\n W_test = None # TODO: Add weight support\n if X_train is not None:\n X_train = self.preprocess(X_train, is_train=True)\n if X_test is not None:\n X_test = self.preprocess(X_test)\n # TODO: Try creating multiple Datasets for subsets of features, then combining with Dataset.add_features_from(), this might avoid memory spike\n if not dataset_train:\n # X_train, W_train = self.convert_to_weight(X=X_train)\n dataset_train = construct_dataset(x=X_train, y=Y_train, location=f'{self.path}datasets{os.path.sep}train', params=data_params, save=save, weight=W_train)\n # dataset_train = construct_dataset_lowest_memory(X=X_train, y=Y_train, location=self.path + 'datasets/train', params=data_params)\n if (not dataset_val) and (X_test is not None) and (Y_test is not None):\n # X_test, W_test = self.convert_to_weight(X=X_test)\n dataset_val = construct_dataset(x=X_test, y=Y_test, location=f'{self.path}datasets{os.path.sep}val', reference=dataset_train, params=data_params, save=save, weight=W_test)\n # dataset_val = construct_dataset_lowest_memory(X=X_test, y=Y_test, location=self.path + 'datasets/val', reference=dataset_train, params=data_params)\n return dataset_train, dataset_val\n\n def debug_features_to_use(self, X_test_in):\n feature_splits = self.model.feature_importance()\n total_splits = feature_splits.sum()\n feature_names = list(X_test_in.columns.values)\n feature_count = len(feature_names)\n feature_importances = pd.DataFrame(data=feature_names, columns=['feature'])\n feature_importances['splits'] = feature_splits\n feature_importances_unused = feature_importances[feature_importances['splits'] == 0]\n feature_importances_used = feature_importances[feature_importances['splits'] >= (total_splits / feature_count)]\n logger.debug(feature_importances_unused)\n logger.debug(feature_importances_used)\n logger.debug(f'feature_importances_unused: {len(feature_importances_unused)}')\n logger.debug(f'feature_importances_used: {len(feature_importances_used)}')\n features_to_use = list(feature_importances_used['feature'].values)\n logger.debug(str(features_to_use))\n return features_to_use\n\n # FIXME: Requires major refactor + refactor lgb_trial.py\n # model names are not aligned with what is communicated to trainer!\n # FIXME: Likely tabular_nn_trial.py and abstract trial also need to be refactored heavily + hyperparameter functions\n def hyperparameter_tune(self, X_train, X_test, Y_train, Y_test, scheduler_options, **kwargs):\n time_start = time.time()\n logger.log(15, \"Beginning hyperparameter tuning for Gradient Boosting Model...\")\n self._set_default_searchspace()\n params_copy = self.params.copy()\n if isinstance(params_copy['min_data_in_leaf'], Int):\n upper_minleaf = params_copy['min_data_in_leaf'].upper\n if upper_minleaf > X_train.shape[0]: # TODO: this min_data_in_leaf adjustment based on sample size may not be necessary\n upper_minleaf = max(1, int(X_train.shape[0] / 5.0))\n lower_minleaf = params_copy['min_data_in_leaf'].lower\n if lower_minleaf > upper_minleaf:\n lower_minleaf = max(1, int(upper_minleaf / 3.0))\n params_copy['min_data_in_leaf'] = Int(lower=lower_minleaf, upper=upper_minleaf)\n\n directory = self.path # also create model directory if it doesn't exist\n # TODO: This will break on S3! Use tabular/utils/savers for datasets, add new function\n os.makedirs(directory, exist_ok=True)\n scheduler_func, scheduler_options = scheduler_options # Unpack tuple\n if scheduler_func is None or scheduler_options is None:\n raise ValueError(\"scheduler_func and scheduler_options cannot be None for hyperparameter tuning\")\n num_threads = scheduler_options['resource'].get('num_cpus', -1)\n params_copy['num_threads'] = num_threads\n # num_gpus = scheduler_options['resource']['num_gpus'] # TODO: unused\n\n dataset_train, dataset_val = self.generate_datasets(X_train=X_train, Y_train=Y_train, params=params_copy, X_test=X_test, Y_test=Y_test)\n dataset_train_filename = \"dataset_train.bin\"\n train_file = self.path + dataset_train_filename\n if os.path.exists(train_file): # clean up old files first\n os.remove(train_file)\n dataset_train.save_binary(train_file)\n dataset_val_filename = \"dataset_val.bin\" # names without directory info\n val_file = self.path + dataset_val_filename\n if os.path.exists(val_file): # clean up old files first\n os.remove(val_file)\n dataset_val.save_binary(val_file)\n dataset_val_pkl_filename = 'dataset_val.pkl'\n val_pkl_path = directory + dataset_val_pkl_filename\n save_pkl.save(path=val_pkl_path, object=(X_test, Y_test))\n\n if not np.any([isinstance(params_copy[hyperparam], Space) for hyperparam in params_copy]):\n logger.warning(\"Attempting to do hyperparameter optimization without any search space (all hyperparameters are already fixed values)\")\n else:\n logger.log(15, \"Hyperparameter search space for Gradient Boosting Model: \")\n for hyperparam in params_copy:\n if isinstance(params_copy[hyperparam], Space):\n logger.log(15, f'{hyperparam}: {params_copy[hyperparam]}')\n\n util_args = dict(\n dataset_train_filename=dataset_train_filename,\n dataset_val_filename=dataset_val_filename,\n dataset_val_pkl_filename=dataset_val_pkl_filename,\n directory=directory,\n model=self,\n time_start=time_start,\n time_limit=scheduler_options['time_out']\n )\n lgb_trial.register_args(util_args=util_args, **params_copy)\n scheduler = scheduler_func(lgb_trial, **scheduler_options)\n if ('dist_ip_addrs' in scheduler_options) and (len(scheduler_options['dist_ip_addrs']) > 0):\n # This is multi-machine setting, so need to copy dataset to workers:\n logger.log(15, \"Uploading data to remote workers...\")\n scheduler.upload_files([train_file, val_file, val_pkl_path]) # TODO: currently does not work.\n directory = self.path # TODO: need to change to path to working directory used on every remote machine\n lgb_trial.update(directory=directory)\n logger.log(15, \"uploaded\")\n\n scheduler.run()\n scheduler.join_jobs()\n\n return self._get_hpo_results(scheduler=scheduler, scheduler_options=scheduler_options, time_start=time_start)\n\n # TODO: Consider adding _internal_feature_map functionality to abstract_model\n def compute_feature_importance(self, **kwargs):\n permutation_importance = super().compute_feature_importance(**kwargs)\n if self._internal_feature_map is not None:\n inverse_internal_feature_map = {i: feature for feature, i in self._internal_feature_map.items()}\n permutation_importance = {inverse_internal_feature_map[i]: importance for i, importance in permutation_importance.items()}\n return permutation_importance\n\n def _get_train_loss_name(self):\n if self.problem_type == BINARY:\n train_loss_name = 'binary_logloss'\n elif self.problem_type == MULTICLASS:\n train_loss_name = 'multi_logloss'\n elif self.problem_type == REGRESSION:\n train_loss_name = 'l2'\n else:\n raise ValueError(f\"unknown problem_type for LGBModel: {self.problem_type}\")\n return train_loss_name\n\n def get_model_feature_importance(self, use_original_feature_names=False):\n feature_names = self.model.feature_name()\n importances = self.model.feature_importance()\n importance_dict = {feature_name: importance for (feature_name, importance) in zip(feature_names, importances)}\n if use_original_feature_names and (self._internal_feature_map is not None):\n inverse_internal_feature_map = {i: feature for feature, i in self._internal_feature_map.items()}\n importance_dict = {inverse_internal_feature_map[i]: importance for i, importance in importance_dict.items()}\n return importance_dict\n" ]
[ [ "numpy.random.seed", "pandas.DataFrame" ] ]
gioele8/AI-soccer-highlights
[ "756b6b6f332cedbfbc5a3540d0c6d7aa50219e51" ]
[ "utilities/preprocessing/video_resize_preprocessor.py" ]
[ "# import the necessary packages\nimport cv2\nfrom pathlib import Path\nimport os\nimport numpy as np\nfrom skimage.transform import resize\n\nclass VideoResizePreprocessor:\n def __init__(self, width, height, inter=cv2.INTER_AREA, verbose=500):\n # store the target image width, height, and interpolation\n # method used when resizing\n self.width = width\n self.height = height\n self.inter = inter\n self.verbose = verbose\n\n def preprocess_and_save(self, vid_path, *_):\n # resize the video to a fixed size, ignoring the aspect\n # ration\n video = cv2.VideoCapture(vid_path)\n video_name = Path(vid_path.split(os.path.sep)[-1])\n vid_len = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n parent = Path(vid_path).parent.parent\n out_dir = parent / (vid_path.split(os.path.sep)[-2] + '_' + str(self.width) + 'x' + str(self.height))\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n fps = video.get(cv2.CAP_PROP_FPS)\n out_vid_path = out_dir / video_name\n\n # verify if the resized video alredy exists, if it does, then proceed to the next video\n if os.path.isfile(out_vid_path):\n out_vid = cv2.VideoCapture(str(out_vid_path))\n if int(out_vid.get(cv2.CAP_PROP_FRAME_COUNT)) == vid_len:\n return None, None, out_vid_path\n # return np.load(out_vid_path, mmap_mode='r'), None, out_vid_path\n else:\n out_vid.release()\n\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n out_vid = cv2.VideoWriter(str(out_vid_path), fourcc, fps, (self.width, self.height))\n\n i = 0\n\n while video.isOpened():\n # show an update every 'verbose' images\n if self.verbose > 0 and i > 0 and (i + 1) % self.verbose == 0:\n print(\"[INFO] frame resizing processed {}/{}\".format(i + 1, vid_len))\n\n i += 1\n ret, frame = video.read()\n if not ret:\n break\n\n frame = cv2.resize(frame, (self.width, self.height),\n interpolation=self.inter)\n out_vid.write(frame)\n\n out_vid.release()\n\n return None, None, out_vid_path\n\n def preprocess_and_save_npy(self, vid_path, *_):\n video_name = Path(vid_path.split(os.path.sep)[-1])\n parent = Path(vid_path).parent.parent\n out_dir = parent / (vid_path.split(os.path.sep)[-2] + '_' + str(self.width) + 'x' + str(self.height))\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n out_vid_path = out_dir / (str(video_name).split('.')[0] + '.npy')\n\n video = cv2.VideoCapture(str(vid_path))\n vid_len = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # verify if the resized video alredy exists, if it does, then proceed to the next video\n if os.path.isfile(out_vid_path):\n out_vid = np.load(out_vid_path, mmap_mode='r')\n if out_vid.shape[0] == vid_len:\n return None, None, out_vid_path\n # return np.load(out_vid_path, mmap_mode='r'), None, out_vid_path\n frames = []\n for i in range(100):\n ret, frame = video.read()\n frames.append(cv2.resize(frame, (self.width, self.height), interpolation=self.inter))\n\n video.release()\n np.save(out_vid_path, np.array(frames))\n return None, None, out_vid_path\n\n def preprocess(self, vid_path, video, *_):\n frames = []\n frames = [cv2.resize(frame, (self.width, self.height), interpolation=self.inter) for frame in video]\n frames = np.array(frames)\n return frames, None, vid_path\n" ]
[ [ "numpy.array", "numpy.load" ] ]
damo-cv/MotionRGBD
[ "d9c4308e4308192e186cab6fde6d8d4ba3d655ba" ]
[ "lib/datasets/Jester.py" ]
[ "'''\nCopyright (C) 2010-2021 Alibaba Group Holding Limited.\n'''\n\nimport torch\nfrom .base import Datasets\nfrom torchvision import transforms, set_image_backend\nimport random, os\nfrom PIL import Image\nimport numpy as np\nimport logging\nimport accimage\nset_image_backend('accimage')\nnp.random.seed(123)\n\nclass JesterData(Datasets):\n def __init__(self, args, ground_truth, modality, phase='train'):\n super(JesterData, self).__init__(args, ground_truth, modality, phase)\n\n def LoadKeypoints(self):\n if self.phase == 'train':\n kpt_file = os.path.join(self.dataset_root, self.args.splits, 'train_kp.data')\n else:\n kpt_file = os.path.join(self.dataset_root, self.args.splits, 'valid_kp.data')\n with open(kpt_file, 'r') as f:\n kpt_data = [(lambda arr: (os.path.join(self.dataset_root, self.typ, self.phase, arr[0]), list(map(lambda x: int(float(x)), arr[1:]))))(l[:-1].split()) for l in f.readlines()]\n kpt_data = dict(kpt_data)\n\n for k, v in kpt_data.items():\n pose = v[:18*2]\n r_hand = v[18*2: 18*2+21*2]\n l_hand = v[18*2+21*2: 18*2+21*2+21*2]\n kpt_data[k] = {'people': [{'pose_keypoints_2d': pose, 'hand_right_keypoints_2d': r_hand, 'hand_left_keypoints_2d': l_hand}]}\n\n logging.info('Load Keypoints files Done, Total: {}'.format(len(kpt_data)))\n return kpt_data\n def get_path(self, imgs_path, a):\n return os.path.join(imgs_path, \"%05d.jpg\" % int(a + 1))\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n sl = self.get_sl(self.inputs[index][1])\n self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])\n # self.clip = self.image_propose(self.data_path, sl)\n self.clip, skgmaparr = self.image_propose(self.data_path, sl)\n\n return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.data_path\n\n def __len__(self):\n return len(self.inputs)\n" ]
[ [ "numpy.random.seed" ] ]
vfdev-5/gin-config
[ "01875b83cb678ede74ceead7e455a62ca089006e" ]
[ "tests/torch/external_configurables_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Gin-Config Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=utf-8\n# Copyright 2019 The Gin-Config Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest\n\nfrom gin import config\nfrom gin.torch import external_configurables # pylint: disable=unused-import\n\nimport torch\n\n\[email protected]\ndef fake_train_model(optimizer, scheduler=None):\n opt = optimizer([torch.nn.Parameter(torch.rand(10))])\n sch = None\n if scheduler:\n sch = scheduler(opt)\n return opt, sch\n\n\[email protected]\ndef configurable(**kwargs):\n return kwargs\n\n\nclass PyTorchConfigTest(absltest.TestCase):\n\n def assertAlmostEqualList(self, xs, ys):\n for i, (x, y) in enumerate(zip(xs, ys)):\n print(i)\n self.assertAlmostEqual(x, y)\n\n def tearDown(self):\n config.clear_config()\n super(PyTorchConfigTest, self).tearDown()\n\n def testConfigureOptimizerAndLearningRate(self):\n config_str = \"\"\"\n fake_train_model.optimizer = @Adam\n torch.optim.Adam.lr = 0.001\n torch.optim.Adam.betas = (0.8, 0.888)\n fake_train_model.scheduler = @StepLR\n StepLR.step_size = 10\n \"\"\"\n config.parse_config(config_str)\n\n opt, sch = fake_train_model() # pylint: disable=no-value-for-parameter\n\n self.assertIsInstance(opt, torch.optim.Adam)\n self.assertAlmostEqual(opt.param_groups[0]['betas'][0], 0.8)\n self.assertAlmostEqual(opt.param_groups[0]['betas'][1], 0.888)\n self.assertAlmostEqual(opt.defaults['betas'][0], 0.8)\n self.assertAlmostEqual(opt.defaults['betas'][1], 0.888)\n self.assertAlmostEqual(sch.step_size, 10)\n\n lrs = []\n for _ in range(15):\n lrs.append(opt.param_groups[0]['lr'])\n opt.step()\n sch.step()\n\n # Divide lr in tenth epoch by 10\n target_lrs = [0.001] * 10 + [0.0001] * 5\n\n self.assertAlmostEqualList(lrs, target_lrs)\n\n def testOptimizersWithDefaults(self):\n optimizers = [\n torch.optim.Adadelta,\n torch.optim.Adagrad,\n torch.optim.Adam,\n torch.optim.SparseAdam,\n torch.optim.Adamax,\n torch.optim.ASGD,\n torch.optim.LBFGS,\n torch.optim.RMSprop,\n torch.optim.Rprop,\n torch.optim.SGD,\n ]\n for optimizer in optimizers:\n config.clear_config()\n config_str = \"\"\"\n fake_train_model.optimizer = @{optimizer}\n {optimizer}.lr = 0.001\n \"\"\"\n config.parse_config(config_str.format(optimizer=optimizer.__name__))\n configed_optimizer, _ = fake_train_model(config.REQUIRED)\n self.assertIsInstance(configed_optimizer, optimizer)\n\n def testDtypes(self):\n # Spot check a few.\n config_str = \"\"\"\n # Test without torch prefix, but using the\n # prefix is strongly recommended!\n configurable.float32 = %float32\n # Test with torch prefix.\n configurable.int8 = %torch.int8\n configurable.float16 = %torch.float16\n \"\"\"\n config.parse_config(config_str)\n\n vals = configurable()\n self.assertIs(vals['float32'], torch.float32)\n self.assertIs(vals['int8'], torch.int8)\n self.assertIs(vals['float16'], torch.float16)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "torch.rand" ] ]
reveriel/cuda_scheduling_examiner_mirror
[ "16d2404c0dc8d72f7a13e4a167d3db4c86128a26" ]
[ "scripts/view_times_cdf.py" ]
[ "import argparse\nimport glob\nimport itertools\nimport json\nimport matplotlib.pyplot as plot\nimport numpy\nimport re\nimport sys\n\ndef convert_values_to_cdf(values):\n \"\"\"Takes a 1-D list of values and converts it to a CDF representation. The\n CDF consists of a vector of times and a vector of percentages of 100.\"\"\"\n if len(values) == 0:\n return [[], []]\n values.sort()\n total_size = float(len(values))\n current_min = values[0]\n count = 0.0\n data_list = [values[0]]\n ratio_list = [0.0]\n for v in values:\n count += 1.0\n if v > current_min:\n data_list.append(v)\n ratio_list.append((count / total_size) * 100.0)\n current_min = v\n data_list.append(values[-1])\n ratio_list.append(100)\n # Convert seconds to milliseconds\n for i in range(len(data_list)):\n data_list[i] *= 1000.0\n return [data_list, ratio_list]\n\ndef get_benchmark_cdf(benchmark, times_key):\n \"\"\"Takes a parsed benchmark result JSON file and returns a CDF (in seconds\n and percentages) of the CPU (total) times for the benchmark. The times_key\n argument can be used to specify which range of times (in the times array)\n should be used to calculate the durations to include in the CDF.\"\"\"\n raw_values = []\n for t in benchmark[\"times\"]:\n if not times_key in t:\n continue\n times = t[times_key]\n for i in range(len(times) / 2):\n start_index = i * 2\n end_index = i * 2 + 1\n raw_values.append(times[end_index] - times[start_index])\n return convert_values_to_cdf(raw_values)\n\ndef nice_sort_key(label):\n \"\"\"If a label contains numbers, this will prevent sorting them\n lexicographically.\"\"\"\n def tryint(s):\n try:\n return int(s)\n except:\n return s\n return [tryint(c) for c in re.split(r'([0-9]+)', label)]\n\ndef benchmark_sort_key(benchmark):\n \"\"\"Returns the key that may be used to sort benchmarks by label.\"\"\"\n if not \"label\" in benchmark:\n return \"\"\n return nice_sort_key(benchmark[\"label\"])\n\nall_styles = None\ndef get_line_styles():\n \"\"\"Returns a list of line style possibilities, that includes more options\n than matplotlib's default set that includes only a few solid colors.\"\"\"\n global all_styles\n if all_styles is not None:\n return all_styles\n color_options = [\n \"blue\",\n \"green\",\n \"red\",\n \"cyan\",\n \"magenta\",\n \"y\",\n \"black\"\n ]\n style_options = [\n \"-\",\n \"--\",\n \"-.\",\n \":\"\n ]\n marker_options = [\n None,\n \"o\",\n \"v\",\n \"s\",\n \"*\",\n \"+\",\n \"D\"\n ]\n # Build a combined list containing every style combination.\n all_styles = []\n for m in marker_options:\n for s in style_options:\n for c in color_options:\n to_add = {}\n if m is not None:\n to_add[\"marker\"] = m\n to_add[\"markevery\"] = 0.1\n to_add[\"ls\"] = s\n to_add[\"c\"] = c\n all_styles.append(to_add)\n return all_styles\n\ndef add_plot_padding(axes):\n \"\"\"Takes matplotlib axes, and adds some padding so that lines close to\n edges aren't obscured by tickmarks or the plot border.\"\"\"\n y_limits = axes.get_ybound()\n y_range = y_limits[1] - y_limits[0]\n y_pad = y_range * 0.05\n x_limits = axes.get_xbound()\n x_range = x_limits[1] - x_limits[0]\n x_pad = x_range * 0.05\n axes.set_ylim(y_limits[0] - y_pad, y_limits[1] + y_pad)\n axes.set_xlim(x_limits[0] - x_pad, x_limits[1] + x_pad)\n axes.xaxis.set_ticks(numpy.arange(x_limits[0], x_limits[1] + x_pad,\n x_range / 5.0))\n axes.yaxis.set_ticks(numpy.arange(y_limits[0], y_limits[1] + y_pad,\n y_range / 5.0))\n\ndef plot_scenario(benchmarks, name, times_key):\n \"\"\"Takes a list of parsed benchmark results and a scenario name and\n generates a CDF plot of CPU times for the scenario. See get_benchmark_cdf\n for an explanation of the times_key argument.\"\"\"\n benchmarks = sorted(benchmarks, key = benchmark_sort_key)\n style_cycler = itertools.cycle(get_line_styles())\n cdfs = []\n labels = []\n c = 0\n for b in benchmarks:\n c += 1\n label = \"%d: %s\" % (c, b[\"benchmark_name\"])\n if \"label\" in b:\n label = b[\"label\"]\n labels.append(label)\n cdf_data = get_benchmark_cdf(b, times_key)\n cdfs.append(cdf_data)\n figure = plot.figure()\n figure.suptitle(name)\n axes = figure.add_subplot(1, 1, 1)\n # Make the axes track data exactly, we'll manually add padding later.\n axes.autoscale(enable=True, axis='both', tight=True)\n for i in range(len(cdfs)):\n axes.plot(cdfs[i][0], cdfs[i][1], label=labels[i], lw=3,\n **(style_cycler.next()))\n add_plot_padding(axes)\n axes.set_xlabel(\"Time (milliseconds)\")\n axes.set_ylabel(\"% <= X\")\n legend = plot.legend()\n legend.draggable()\n return figure\n\ndef show_plots(filenames, times_key=\"block_times\"):\n \"\"\"Takes a list of filenames, and generates one plot per scenario found in\n the files. See get_benchmark_cdf for an explanation of the times_key\n argument.\"\"\"\n parsed_files = []\n for name in filenames:\n with open(name) as f:\n parsed_files.append(json.loads(f.read()))\n # Group the files by scenario\n scenarios = {}\n for benchmark in parsed_files:\n scenario = benchmark[\"scenario_name\"]\n if not scenario in scenarios:\n scenarios[scenario] = []\n scenarios[scenario].append(benchmark)\n figures = []\n for scenario in scenarios:\n figures.append(plot_scenario(scenarios[scenario], scenario, times_key))\n plot.show()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--directory\",\n help=\"Directory containing result JSON files.\", default='./results')\n parser.add_argument(\"-k\", \"--times_key\",\n help=\"JSON key name for the time property to be plot.\", default=\"block_times\")\n args = parser.parse_args()\n filenames = glob.glob(args.directory + \"/*.json\")\n show_plots(filenames, args.times_key)\n" ]
[ [ "matplotlib.pyplot.show", "numpy.arange", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure" ] ]
cswangjiawei/ChineseNER
[ "183f2a2dd2c552f2c66e6d7be78c3fbc479b9317" ]
[ "ChineseNER/model.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport numpy as np\nfrom .crf import CRF\nimport argparse\nfrom .utils import WordVocabulary, LabelVocabulary, get_mask, write_dict\nimport os\n\n\nclass NamedEntityRecog(nn.Module):\n def __init__(self, word_vocab, label_vocab, word_embed_dim, word_hidden_dim, feature_extractor, tag_num, dropout,\n pretrain_embed=None, use_crf=False, use_gpu=False):\n super(NamedEntityRecog, self).__init__()\n self.use_crf = use_crf\n self.drop = nn.Dropout(dropout)\n self.input_dim = word_embed_dim\n self.feature_extractor = feature_extractor\n self.word_vocab = word_vocab\n self.label_vocab = label_vocab\n\n self.embeds = nn.Embedding(word_vocab.size(), word_embed_dim, padding_idx=0)\n if pretrain_embed is not None:\n self.embeds.weight.data.copy_(torch.from_numpy(pretrain_embed))\n else:\n self.embeds.weight.data.copy_(torch.from_numpy(self.random_embedding(word_vocab.size(), word_embed_dim)))\n\n if feature_extractor == 'lstm':\n self.lstm = nn.LSTM(self.input_dim, word_hidden_dim, batch_first=True, bidirectional=True)\n else:\n self.word2cnn = nn.Linear(self.input_dim, word_hidden_dim * 2)\n self.cnn_list = list()\n for _ in range(4):\n self.cnn_list.append(nn.Conv1d(word_hidden_dim * 2, word_hidden_dim * 2, kernel_size=3, padding=1))\n self.cnn_list.append(nn.ReLU())\n self.cnn_list.append(nn.Dropout(dropout))\n self.cnn_list.append(nn.BatchNorm1d(word_hidden_dim * 2))\n self.cnn = nn.Sequential(*self.cnn_list)\n\n if self.use_crf:\n self.hidden2tag = nn.Linear(word_hidden_dim * 2, tag_num + 2)\n self.crf = CRF(tag_num, use_gpu)\n else:\n self.hidden2tag = nn.Linear(word_hidden_dim * 2, tag_num)\n\n def random_embedding(self, vocab_size, embedding_dim):\n pretrain_emb = np.empty([vocab_size, embedding_dim])\n scale = np.sqrt(3.0 / embedding_dim)\n for index in range(1, vocab_size):\n pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])\n pretrain_emb[0, :] = np.zeros((1, embedding_dim))\n return pretrain_emb\n\n def neg_log_likelihood_loss(self, word_inputs, word_seq_lengths, batch_label, mask):\n batch_size = word_inputs.size(0)\n seq_len = word_inputs.size(1)\n word_embeding = self.embeds(word_inputs)\n word_list = [word_embeding]\n word_embeding = torch.cat(word_list, 2)\n word_represents = self.drop(word_embeding)\n if self.feature_extractor == 'lstm':\n packed_words = pack_padded_sequence(word_represents, word_seq_lengths, True)\n hidden = None\n lstm_out, hidden = self.lstm(packed_words, hidden)\n lstm_out, _ = pad_packed_sequence(lstm_out)\n lstm_out = lstm_out.transpose(0, 1)\n feature_out = self.drop(lstm_out)\n else:\n batch_size = word_inputs.size(0)\n word_in = torch.tanh(self.word2cnn(word_represents)).transpose(2, 1).contiguous()\n feature_out = self.cnn(word_in).transpose(1, 2).contiguous()\n\n feature_out = self.hidden2tag(feature_out)\n\n if self.use_crf:\n total_loss = self.crf.neg_log_likelihood_loss(feature_out, mask, batch_label)\n else:\n loss_function = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')\n feature_out = feature_out.contiguous().view(batch_size * seq_len, -1)\n total_loss = loss_function(feature_out, batch_label.contiguous().view(batch_size * seq_len))\n return total_loss\n\n def forward(self, word_inputs, word_seq_lengths, mask):\n batch_size = word_inputs.size(0)\n seq_len = word_inputs.size(1)\n word_embeding = self.embeds(word_inputs)\n word_list = [word_embeding]\n word_embeding = torch.cat(word_list, 2)\n word_represents = self.drop(word_embeding)\n if self.feature_extractor == 'lstm':\n packed_words = pack_padded_sequence(word_represents, word_seq_lengths, True)\n hidden = None\n lstm_out, hidden = self.lstm(packed_words, hidden)\n lstm_out, _ = pad_packed_sequence(lstm_out)\n lstm_out = lstm_out.transpose(0, 1)\n feature_out = self.drop(lstm_out)\n else:\n batch_size = word_inputs.size(0)\n word_in = torch.tanh(self.word2cnn(word_represents)).transpose(2, 1).contiguous()\n feature_out = self.cnn(word_in).transpose(1, 2).contiguous()\n\n feature_out = self.hidden2tag(feature_out)\n\n if self.use_crf:\n scores, tag_seq = self.crf._viterbi_decode(feature_out, mask)\n else:\n feature_out = feature_out.contiguous().view(batch_size * seq_len, -1)\n _, tag_seq = torch.max(feature_out, 1)\n tag_seq = tag_seq.view(batch_size, seq_len)\n tag_seq = mask.long() * tag_seq\n return tag_seq\n\n def get_entity_from_sent(self, text):\n self.eval()\n text = list(text)\n text_id = list(map(self.word_vocab.word_to_id, text))\n text_tensor = torch.tensor(text_id).long()\n text_tensor = text_tensor.unsqueeze(0)\n mask = get_mask(text_tensor)\n length = [len(text_id)]\n tag_seq = self.forward(text_tensor, length, mask)\n tag_seq = tag_seq.squeeze(0)\n location = list()\n orgnization = list()\n person = list()\n dict1 = {}\n\n for word, label in zip(text, tag_seq):\n tag = self.label_vocab.id_to_label(label)\n if tag == 'O':\n continue\n\n if tag.endswith('LOC'):\n if tag.startswith('B') or tag.startswith('S'):\n location.append(word)\n else:\n location[-1] += word\n\n if tag.endswith('ORG'):\n if tag.startswith('B') or tag.startswith('S'):\n orgnization.append(word)\n else:\n orgnization[-1] += word\n\n if tag.endswith('PER'):\n if tag.startswith('B') or tag.startswith('S'):\n person.append(word)\n else:\n person[-1] += word\n if location:\n dict1['location'] = location\n if orgnization:\n dict1['orgnization'] = orgnization\n if person:\n dict1['person'] = person\n\n return dict1\n\n def get_entity_from_file(self, input_file, out_file):\n if not input_file.endswith('.txt') or not out_file.endswith('.txt'):\n print('输入文件类型错误')\n return\n with open(input_file, 'r', encoding='utf-8') as f1:\n with open(out_file, 'w', encoding='utf-8') as f2:\n line = f1.readline()\n while line:\n dict1 = self.get_entity_from_sent(line.strip())\n if dict1:\n write_dict(dict1, f2)\n f2.write('\\n')\n line = f1.readline()\n\n\ndef load():\n parser = argparse.ArgumentParser(description='Named Entity Recognition Model')\n parser.add_argument('--word_embed_dim', type=int, default=100)\n parser.add_argument('--word_hidden_dim', type=int, default=100)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--batch_size', type=int, default=10)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--optimizer', default='sgd')\n parser.add_argument('--lr', type=float, default=0.015)\n parser.add_argument('--feature_extractor', choices=['lstm', 'cnn'], default='lstm')\n parser.add_argument('--train_path', default='data/msra_train.txt')\n parser.add_argument('--test_path', default='data/msra_test.txt')\n parser.add_argument('--patience', type=int, default=10)\n parser.add_argument('--number_normalized', type=bool, default=True)\n parser.add_argument('--use_crf', type=bool, default=True)\n\n args = parser.parse_args()\n use_gpu = torch.cuda.is_available()\n\n word_vocab = WordVocabulary(args.train_path, args.number_normalized)\n label_vocab = LabelVocabulary(args.train_path)\n\n model = NamedEntityRecog(word_vocab, label_vocab, args.word_embed_dim, args.word_hidden_dim, args.feature_extractor,\n label_vocab.size(), args.dropout, pretrain_embed=None, use_crf=args.use_crf,\n use_gpu=use_gpu)\n model.load_state_dict(torch.load(os.path.join(os.path.dirname(__file__), 'model/lstmTrue')))\n\n return model\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.LSTM", "torch.cuda.is_available", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.CrossEntropyLoss", "numpy.empty", "torch.nn.Conv1d", "torch.tensor", "numpy.sqrt", "numpy.zeros", "torch.max", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Dropout", "torch.from_numpy", "numpy.random.uniform", "torch.nn.utils.rnn.pad_packed_sequence", "torch.nn.BatchNorm1d" ] ]
hassaku/audio-plot-lib
[ "fb3c6e7129dba39fade15147130913946b2172a2" ]
[ "audio_plot_lib/interactive.py" ]
[ "import copy\nimport numpy as np\nfrom bokeh import events\nfrom bokeh.models import CustomJS, HoverTool, Slider, Div\nfrom bokeh.plotting import figure, output_notebook, show\nfrom bokeh.layouts import column, row\nfrom bokeh.models import LinearAxis, Range1d\nfrom IPython.display import HTML, display\n\ndef __set_context():\n display(HTML('''\n <script>\n if (typeof osc === 'undefined') {\n audioContext = new (window.AudioContext || window.webkitAudioContext)();\n audioGain = audioContext.createGain();\n panNode = audioContext.createStereoPanner();\n osc = audioContext.createOscillator();\n osc.connect(panNode);\n panNode.connect(audioGain);\n audioGain.connect(audioContext.destination);\n osc.start(audioContext.currentTime);\n audioGain.gain.setValueAtTime(0, audioContext.currentTime);\n }\n oscTarget = 0;\n </script>\n '''))\n\n\ndef __speak_js(utterance):\n return \"\"\"\n window.speechSynthesis.cancel();\n let msg = new SpeechSynthesisUtterance({});\n msg.lang = \"en-US\";\n window.speechSynthesis.speak(msg);\n \"\"\".format(utterance)\n\n\ndef __speak_inout(title=\"image\", enter=True, read_label=False):\n if read_label and enter:\n label_message = \". Label ${oscTarget} is selected. Double click to change.\"\n else:\n label_message = \"\"\n\n\n if enter:\n inout_message = \"Enter {}\".format(title)\n\n else:\n inout_message = \"Leave {}\".format(title)\n\n return CustomJS(code=__speak_js(\"`{}`\".format(inout_message + label_message)))\n\n\n__FIND_NEAREST_JS = \"\"\"\nvar minX, maxX, minY, maxY;\nif(multiAxes) {\n var labeledX = [];\n var labeledY = [];\n x.forEach(function(val, idx){\n if(label[idx] != oscTarget) { return; }\n labeledX.push(x[idx]);\n labeledY.push(y[idx]);\n });\n minX = Math.min(...labeledX);\n maxX = Math.max(...labeledX);\n minY = Math.min(...labeledY);\n maxY = Math.max(...labeledY);\n} else {\n minX = Math.min(...x);\n maxX = Math.max(...x);\n minY = Math.min(...y);\n maxY = Math.max(...y);\n}\n\nif((position == Infinity) || (position < minX) || (position > maxX)) {\n return;\n}\n\nvar diff = [];\nvar nearestIdx = 0;\nx.forEach(function(val, idx){\n if(label[idx] != oscTarget) {\n return;\n }\n diff[idx] = Math.abs(position - val);\n nearestIdx = (diff[nearestIdx] < diff[idx]) ? nearestIdx : idx;\n});\n\nlet nearestX = x[nearestIdx];\nlet nearestY = y[nearestIdx];\n\"\"\"\n\n\n__COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf']\n\n\ndef plot(y: list, x: list=None, label: list=None, width: int=400, height: int=400, gain: float=0.4,\n margin_x: int=1, title: str=\"graph\", script_name: str=\"\", slider_partitions: int=None,\n multiple_axes=False):\n \"\"\"Plots that represent data with sound and can be checked interactively\n\n You can interactively check the data in graph form by moving the mouse cursor.\n When you enter or leave the graph image, you will be notified by voice.\n Also, when you move the mouse left or right on the graph image,\n the y-axis value corresponding to that location will be expressed with a high or low tone.\n A single click will read out the value corresponding to that location.\n Also, double-clicking switches the group according to the label specified as an option.\n\n Parameters\n ----------\n y : list\n A list of values to be graphed.\n x : list\n A list of x-axis values corresponding to y-axis values.\n If not specified, it is substituted by the value of the equal interval. Optional.\n label : list\n A list of grouping numbers for each value, which must start with zero.\n You can compare the graph data by sound, switching between each number. Optional.\n width : int\n Width of the graph image (in pixels). Optional.\n height : int\n Height of the graph image (in pixels). Optional.\n title: str\n Graph name to be read out. Optional.\n multiple_axes: bool\n Set to True if you want each label to have a separate y-axis. Optional.\n\n Examples\n --------\n >>> plot([0, 1, 2])\n <IPython.core.display.HTML object>\n >>> plot(x=[0, 1, 2], y=[4, 5, 6], label=[0, 0, 1])\n <IPython.core.display.HTML object>\n \"\"\"\n\n if label:\n assert max(label) < len(__COLORS), \"max label must be lower {}\".format(len(__COLORS))\n assert max(label) + 1 == len(set(label)), \"label should be in {} because max label is {}.\".format(\n list(range(max(label) + 1)), max(label))\n\n if type(y) == np.ndarray:\n y = y.tolist()\n\n if type(x) == np.ndarray:\n x = x.tolist()\n elif x == None:\n x = np.arange(len(y)).tolist()\n\n if type(label) == np.ndarray:\n label = label.astype(int).tolist()\n elif label == None:\n label = np.zeros_like(y).astype(int).tolist()\n\n if script_name == \"\":\n __set_context()\n output_notebook()\n\n plot = figure(plot_width=width, plot_height=height, tools=\"\", toolbar_location=None)\n colors = [__COLORS[c] for c in label]\n\n if multiple_axes:\n assert max(label) == 1, \"The number of labels must be two kinds\"\n\n multi_axes_str = \"true\"\n y_ranges = {}\n for l in range(max(label)+1):\n __x = np.array(x)[np.array(label) == l].tolist()\n __y = np.array(y)[np.array(label) == l].tolist()\n __c = np.array(colors)[np.array(label) == l].tolist()\n plot.scatter(__x, __y, line_color=__c, fill_color=__c, y_range_name=str(l))\n if l == 1:\n plot.add_layout(LinearAxis(y_range_name=str(l)), 'right')\n y_ranges[str(l)] = Range1d(start=min(__y) - 1, end=max(__y) + 1)\n\n plot.extra_y_ranges = y_ranges\n\n else:\n multi_axes_str = \"false\"\n plot.scatter(x, y, line_color=colors, fill_color=colors)\n\n sound_js = \"\"\"\n const multiAxes = %s;\n %s\n if(diff[nearestIdx] > marginX) {\n return;\n }\n\n const gain = %s; // max: 1.0\n osc.type = 'triangle'; // sine, square, sawtooth, triangle\n osc.frequency.value = 261.626 + (nearestY - minY) / (maxY - minY) * 261.626 // Hz\n audioGain.gain.linearRampToValueAtTime(gain, audioContext.currentTime + 0.2); // atack\n audioGain.gain.setTargetAtTime(0, audioContext.currentTime + 0.2, 0.5); // decay, sustain\n\n let pan = (nearestX - minX) / (maxX - minX) * 2 - 1;\n panNode.pan.value = pan; // left:-1 ~ right:1\n \"\"\" % (multi_axes_str, __FIND_NEAREST_JS, gain)\n\n # Mouse hover on plot\n hover_code = \"\"\"\n let marginX = %s;\n let position = cb_data.geometry.x;\n %s\n \"\"\" % (margin_x, sound_js)\n\n callback = CustomJS(args={\"x\": x, \"y\": y, \"label\": label}, code=hover_code)\n plot.add_tools(HoverTool(tooltips=None, callback=callback))\n\n # Single tap on plot\n tap_code = \"\"\"\n let position = cb_obj.x;\n const multiAxes = %s;\n %s\n %s\n \"\"\" % (multi_axes_str, __FIND_NEAREST_JS, __speak_js(\"`X is ${nearestX}. Y is ${nearestY}`\"))\n\n plot.js_on_event(events.Tap, CustomJS(args={\"x\": x, \"y\": y, \"label\": label},\n code=tap_code))\n\n if len(set(label)) > 1:\n # Double tap on plot\n double_tap_code = \"\"\"\n oscTarget = (oscTarget + 1) %% (maxLabel + 1);\n %s\n \"\"\" % (__speak_js(\"`label ${oscTarget} is selected`\"))\n plot.js_on_event(events.DoubleTap, CustomJS(args={\"maxLabel\": max(label)},\n code=double_tap_code))\n\n # Enter or leave on plot\n read_label = (max(label) > 0)\n plot.js_on_event(events.MouseEnter, __speak_inout(title, True, read_label))\n plot.js_on_event(events.MouseLeave, __speak_inout(title, False, read_label))\n\n # slider for keyboard interaction\n sliders = []\n for l in range(max(label)+1):\n __x = np.array(x)[np.array(label) == l].tolist()\n\n if slider_partitions is None:\n slider_partitions = np.min([len(__x)-1, 30])\n if slider_partitions == 30:\n print(\"The number of slider partitions has been reduced to 30 as the default limit. Please set slider_partitions as an argument if necessary.\")\n\n slider_start = np.min(__x)\n slider_end = np.max(__x)\n if slider_start == slider_end:\n slider_end += 1\n slider_step = (slider_end - slider_start) / slider_partitions\n\n slider_code = \"\"\"\n oscTarget = target;\n let marginX = %s;\n let position = slider.value;\n %s\n setTimeout(function(){%s}, 3000);\n \"\"\" % (slider_step, sound_js, __speak_js(\"`X is ${nearestX}. Y is ${nearestY}`\"))\n\n slider = Slider(start=slider_start, end=slider_end, value=slider_start, step=slider_step, title=\"label {}\".format(l))\n slider.js_on_change('value', CustomJS(args={\"x\": x, \"y\": y, \"label\": label, \"slider\": slider, \"target\": l}, code=slider_code))\n sliders.append(slider)\n\n # layout\n message1 = Div(text=\"\"\"<h2>output of audio plot lib</h2>\"\"\")\n message2 = Div(text=\"\"\"<p>There is a graph and a series of sliders to check the values. If you have a mouse, you can check the values by hovering over the graph. If you are using only a keyboard, you can move the slider to move the horizontal axis of the graph to check the value of the graph as a pitch according to the location.</p>\"\"\")\n show(column(message1, message2, row(plot, column(sliders))))\n\n if script_name != \"\":\n from bs4 import BeautifulSoup\n\n HTML = \"\"\"\n <button id=\"unmuteButton\">Push here to unmute graph</button>\n <script>\n document.getElementById('unmuteButton').addEventListener('click', function() {\n audioContext = new (window.AudioContext || window.webkitAudioContext)();\n audioGain = audioContext.createGain();\n panNode = audioContext.createStereoPanner();\n osc = audioContext.createOscillator();\n osc.connect(panNode);\n panNode.connect(audioGain);\n audioGain.connect(audioContext.destination);\n osc.start(audioContext.currentTime);\n audioGain.gain.setValueAtTime(0, audioContext.currentTime);\n oscTarget = 0;\n })\n </script>\n \"\"\"\n\n html_filename = script_name.replace(\".py\", \".html\")\n soup = BeautifulSoup(open(html_filename), 'html.parser')\n soup.body.insert(0, BeautifulSoup(HTML, \"html.parser\")) # after body\n\n with open(html_filename, \"w\") as file:\n file.write(str(soup))\n\n" ]
[ [ "numpy.max", "numpy.zeros_like", "numpy.array", "numpy.min" ] ]
whoanuragverma/central-perk
[ "9140f94fad42a5cefb192a50a380951e95eea892" ]
[ "python/scraper.py" ]
[ "from bs4 import BeautifulSoup\nimport pandas as pd\nimport os\n\nmain = []\ntitles = []\n\nep_no = 1\nfor folders in os.listdir('raw'):\n for files in os.listdir(os.path.join('raw', folders)):\n soup = BeautifulSoup(\n open(os.path.join('raw', folders, files), encoding='cp1252'), \"html.parser\")\n titles.append(soup.title.text)\n ctr = 0\n for tag in soup.find_all('p'):\n ctr += 1\n print(\"For episode: {} Number: {} Scanned Lines: {} Total Lines: {}\".format(\n soup.title.text, ep_no, ctr, len(soup.find_all('p'))))\n if tag.find('b') is not None and tag.find('b').find('font') is None:\n main.append(tag.text.replace(\n \"\\xa0\", \" \", 10).replace(\"\\n\", \" \", 10).split(\": \", 1))\n ep_no += 1\ndf1 = pd.DataFrame(titles, columns=['Title'])\ndf1.to_excel(\"titles.xlsx\", index=False)\ndf = pd.DataFrame(main, columns=['Charcater', 'Dialouge'])\ndf.to_excel(\"dialouges.xlsx\", index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
GuoBo98/ShipDet
[ "2979c39c5a56be3b99ba77833cfe556a8a0fc97e" ]
[ "mmdet/models/roi_heads/rbbox_heads/convfc_rbbox_head.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.runner import force_fp32\nfrom mmdet.models.roi_heads.bbox_heads import ConvFCBBoxHead\n\nfrom mmdet.core import bbox_target_rbbox, rbbox_target_rbbox,choose_best_Rroi_batch, hbb2obb_v2, multiclass_nms_rbbox\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.builder import HEADS\n\n\[email protected]_module()\nclass ConvFCBBoxHeadRbbox(ConvFCBBoxHead):\n r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n reg_class_agnostic,\n with_module=True,\n hbb_trans='hbb2obb_v2',\n *args,\n **kwargs):\n super(ConvFCBBoxHeadRbbox, self).__init__(*args, **kwargs)\n\n self.with_module = with_module\n self.hbb_trans = hbb_trans\n self.reg_class_agnostic = reg_class_agnostic\n if self.with_reg:\n out_dim_reg = (5 if self.reg_class_agnostic else 5 * self.num_classes)\n self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)\n\n def get_targets(self, sampling_results, gt_masks, gt_labels,\n rcnn_train_cfg):\n \"\"\"\n obb target hbb\n :param sampling_results:\n :param gt_masks:\n :param gt_labels:\n :param rcnn_train_cfg:\n :param mod: 'normal' or 'best_match', 'best_match' is used for RoI Transformer\n :return:\n \"\"\"\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n neg_proposals = [res.neg_bboxes for res in sampling_results]\n # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n # TODO: first get indexs of pos_gt_bboxes, then index from gt_bboxes\n # TODO: refactor it, direct use the gt_rbboxes instead of gt_masks\n pos_assigned_gt_inds = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n #reg_classes = 1 if self.reg_class_agnostic else self.num_classes\n cls_reg_targets = bbox_target_rbbox(\n pos_proposals,\n neg_proposals,\n pos_assigned_gt_inds,\n gt_masks,\n pos_gt_labels,\n rcnn_train_cfg,\n self.num_classes,\n coder=self.bbox_coder,\n with_module=self.with_module,\n hbb_trans=self.hbb_trans)\n return cls_reg_targets\n\n def get_target_rbbox(self, sampling_results, gt_bboxes, gt_labels,\n rcnn_train_cfg):\n \"\"\"\n obb target obb\n :param sampling_results:\n :param gt_bboxes:\n :param gt_labels:\n :param rcnn_train_cfg:\n :return:\n \"\"\"\n pos_proposals = [res.pos_bboxes for res in sampling_results] # [-pi/4,-3pi/4]\n # pos_proposals = choose_best_Rroi_batch(pos_proposals)\n neg_proposals = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # [0.pi]\n pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n #reg_classes = 1 if self.reg_class_agnostic else self.num_classes\n cls_reg_targets = rbbox_target_rbbox(\n pos_proposals,\n neg_proposals,\n pos_gt_bboxes,\n pos_gt_labels,\n rcnn_train_cfg,\n self.num_classes,\n coder=self.bbox_coder)\n return cls_reg_targets\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def loss(self,\n cls_score,\n bbox_pred,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n reduction_override=None):\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n losses['rbbox_loss_cls'] = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n losses['rbbox_acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n bg_class_ind = self.num_classes\n # 0~self.num_classes-1 are FG, self.num_classes is BG\n pos_inds = (labels >= 0) & (labels < bg_class_ind)\n # do not perform bounding box regression for BG anymore.\n if pos_inds.any():\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), 5)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1,\n 5)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['rbbox_loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n else:\n losses['rbbox_loss_bbox'] = bbox_pred.sum() * 0\n return losses\n\n\n def get_det_rbboxes(self,\n rrois,\n cls_score,\n rbbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None):\n if isinstance(cls_score, list):\n cls_score = sum(cls_score) / float(len(cls_score))\n scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n\n if rbbox_pred is not None:\n\n # dbboxes = delta2dbbox_v2(rrois[:, 1:], rbbox_pred, self.target_means,\n # self.target_stds, img_shape)\n\n dbboxes = self.bbox_coder.decode(rrois[:, 1:], rbbox_pred, 'delta2dbbox_v2', img_shape)\n else:\n # bboxes = rois[:, 1:]\n dbboxes = rrois[:, 1:].clone()\n # TODO: add clip here\n if img_shape is not None:\n dbboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n dbboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n if rescale:\n # bboxes /= scale_factor\n # dbboxes[:, :4] /= scale_factor\n if isinstance(scale_factor, float):\n dbboxes[:, 0::5] /= scale_factor\n dbboxes[:, 1::5] /= scale_factor\n dbboxes[:, 2::5] /= scale_factor\n dbboxes[:, 3::5] /= scale_factor\n else:\n scale_factor = dbboxes.new_tensor(scale_factor)\n dbboxes = dbboxes.view(dbboxes.size(0), -1, 5)\n # TODO: point base scale\n # TODO: check this\n # import pdb\n # pdb.set_trace()\n # print('dbboxes shape', dbboxes.size())\n # print('scale_factor size', scale_factor.size())\n dbboxes[:, :, :4] /= scale_factor\n dbboxes = dbboxes.view(dbboxes.size()[0], -1)\n if cfg is None:\n return dbboxes, scores\n else:\n # check multiscale\n det_bboxes, det_labels = multiclass_nms_rbbox(dbboxes, scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n # det_bboxes = torch.from_numpy(det_bboxes).to(c_device)\n # det_labels = torch.from_numpy(det_labels).to(c_device)\n return det_bboxes, det_labels\n\n @force_fp32(apply_to=('bbox_preds',))\n def refine_rbboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n \"\"\"Refine bboxes during training.\n\n Args:\n rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n and bs is the sampled RoIs per image.\n labels (Tensor): Shape (n*bs, ).\n bbox_preds (Tensor): Shape (n*bs, 5) or (n*bs, 5*#class).\n pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n is a gt bbox.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Refined bboxes of each image in a mini-batch.\n \"\"\"\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() == len(img_metas)\n\n bboxes_list = []\n for i in range(len(img_metas)):\n # TODO check this\n inds = torch.nonzero(rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n bbox_pred_ = bbox_preds[inds]\n img_meta_ = img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class_rbbox(bboxes_, label_, bbox_pred_,\n img_meta_)\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n\n bboxes_list.append(bboxes[keep_inds.type(torch.bool)])\n\n return bboxes_list\n\n def regress_by_class_rbbox(self, rois, label, bbox_pred, img_meta):\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n rois (Tensor): shape (n, 5) or (n, 6)\n label (Tensor): shape (n, )\n bbox_pred (Tensor): shape (n, 5*(#class+1)) or (n, 5)\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n # import pdb\n # pdb.set_trace()\n assert rois.size(1) == 5 or rois.size(1) == 6\n\n if not self.reg_class_agnostic:\n # import pdb\n # pdb.set_trace()\n label = label * 5\n inds = torch.stack((label, label + 1, label + 2, label + 3, label + 4), 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size(1) == 5\n\n if rois.size(1) == 5:\n if self.with_module:\n # new_rois = delta2dbbox(rois, bbox_pred, self.target_means,\n # self.target_stds, img_meta['img_shape'])\n new_rois = self.bbox_coder.decode(rois, bbox_pred, 'delta2dbbox', img_meta['img_shape'])\n else:\n # new_rois = delta2dbbox_v3(rois, bbox_pred, self.target_means,\n # self.target_stds, img_meta['img_shape'])\n new_rois = self.bbox_coder.decode(rois, bbox_pred, 'delta2dbbox_v3', img_meta['img_shape'])\n # choose best Rroi\n new_rois = choose_best_Rroi_batch(new_rois)\n else:\n if self.with_module:\n # bboxes = delta2dbbox(rois[:, 1:], bbox_pred, self.target_means,\n # self.target_stds, img_meta['img_shape'])\n bboxes = self.bbox_coder.decode(rois[:, 1:], bbox_pred, 'delta2dbbox', img_meta['img_shape'])\n else:\n # bboxes = delta2dbbox_v3(rois[:, 1:], bbox_pred, self.target_means,\n # self.target_stds, img_meta['img_shape'])\n bboxes = self.bbox_coder.decode(rois[:, 1:], bbox_pred, 'delta2dbbox_v3', img_meta['img_shape'])\n bboxes = choose_best_Rroi_batch(bboxes)\n new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n return new_rois\n\n\[email protected]_module()\nclass SharedFCBBoxHeadRbbox(ConvFCBBoxHeadRbbox):\n\n def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):\n assert num_fcs >= 1\n super(SharedFCBBoxHeadRbbox, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=num_fcs,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n\[email protected]_module()\nclass ConvFCBBoxHeadRbbox_NotShareCls(ConvFCBBoxHeadRbbox):\n def __init__(self,\n reg_class_agnostic,\n with_module=True,\n hbb_trans='hbb2obb_v2',\n *args,\n **kwargs):\n super(ConvFCBBoxHeadRbbox_NotShareCls, self).__init__(reg_class_agnostic, with_module, hbb_trans, *args, **kwargs)\n\n if self.with_cls:\n del self.fc_cls\n self.fc_cls_share = nn.Linear(self.cls_last_dim, self.num_classes + 1)\n\n def init_weights(self):\n if self.with_cls:\n nn.init.normal_(self.fc_cls_share.weight, 0, 0.01)\n nn.init.constant_(self.fc_cls_share.bias, 0)\n if self.with_reg:\n nn.init.normal_(self.fc_reg.weight, 0, 0.001)\n nn.init.constant_(self.fc_reg.bias, 0)\n\n # conv layers are already initialized by ConvModule\n for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:\n for m in module_list.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n\n x = x.flatten(1)\n\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.flatten(1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.flatten(1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls_share(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n\[email protected]_module()\nclass SharedFCBBoxHeadRbbox_NotShareCls(ConvFCBBoxHeadRbbox_NotShareCls):\n\n def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):\n assert num_fcs >= 1\n super(SharedFCBBoxHeadRbbox_NotShareCls, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=num_fcs,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)" ]
[ [ "torch.nn.Linear", "torch.nonzero", "torch.cat", "torch.stack", "torch.gather", "torch.nn.init.constant_", "torch.nn.init.xavier_uniform_", "torch.nn.init.normal_", "torch.nn.functional.softmax", "torch.sum" ] ]
WuDiDaBinGe/TAKG
[ "83e608e677a4ee74722d18cb5ef430f4f6c6ad31" ]
[ "retrievers/build_tfidf.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"A script to build the tf-idf document matrices for retrieval.\"\"\"\nimport numpy as np\nimport scipy.sparse as sp\nimport argparse\nimport os\nimport math\nimport logging\nfrom multiprocessing import Pool as ProcessPool\nfrom multiprocessing.util import Finalize\nfrom functools import partial\nfrom collections import Counter\n\nimport retrievers.tokenizer as tokenizers\nimport retrievers.ranker_utils as utils\nfrom retrievers.utils import read_tokenized_src_file\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nfmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')\nconsole = logging.StreamHandler()\nconsole.setFormatter(fmt)\nlogger.addHandler(console)\n\n\n# ------------------------------------------------------------------------------\n# Multiprocessing functions\n# ------------------------------------------------------------------------------\n\nDOC2IDX = None\nPROCESS_TOK = None\nDOC2TEXT = None\n\n\ndef init(tokenizer_class):\n global PROCESS_TOK\n PROCESS_TOK = tokenizer_class()\n Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)\n\n\n\ndef fetch_text(doc_id):\n global DOC2TEXT\n return DOC2TEXT[doc_id]\n\n\ndef tokenize(text):\n global PROCESS_TOK\n return PROCESS_TOK.tokenize(text)\n\n\n# ------------------------------------------------------------------------------\n# Build article --> word count sparse matrix.\n# ------------------------------------------------------------------------------\n\n\ndef count(ngram, hash_size, doc_id):\n \"\"\"Fetch the text of a document and compute hashed ngrams counts.\"\"\"\n global DOC2IDX\n row, col, data = [], [], []\n # Tokenize\n tokens = tokenize(utils.normalize(fetch_text(doc_id)))\n\n # Get ngrams from tokens, with stopword/punctuation filtering.\n ngrams = tokens.ngrams(\n n=ngram, uncased=True, filter_fn=utils.filter_ngram\n )\n\n # Hash ngrams and count occurences\n counts = Counter([utils.hash(gram, hash_size) for gram in ngrams])\n\n # Return in sparse matrix data format.\n row.extend(counts.keys())\n col.extend([DOC2IDX[doc_id]] * len(counts))\n data.extend(counts.values())\n return row, col, data\n\n\ndef get_count_matrix(args):\n \"\"\"Form a sparse word to document count matrix (inverted index).\n\n M[i, j] = # times word i appears in document j.\n \"\"\"\n # Map doc_ids to indexes\n global DOC2IDX, DOC2TEXT\n DOC2TEXT = read_tokenized_src_file(args.ref_doc_path)\n DOC2IDX = {i:i for i in range(len(DOC2TEXT))}\n doc_ids = list(range(len(DOC2TEXT)))\n\n # Setup worker pool\n tok_class = tokenizers.get_class(args.tokenizer)\n workers = ProcessPool(args.num_workers,\n initializer=init,\n initargs=(tok_class,)\n )\n\n # Compute the count matrix in steps (to keep in memory)\n logger.info('Mapping...')\n row, col, data = [], [], []\n step = max(int(len(doc_ids) / 10), 1)\n batches = [doc_ids[i:i + step] for i in range(0, len(doc_ids), step)]\n _count = partial(count, args.ngram, args.hash_size)\n for i, batch in enumerate(batches):\n logger.info('-' * 25 + 'Batch %d/%d' % (i + 1, len(batches)) + '-' * 25)\n for b_row, b_col, b_data in workers.imap_unordered(_count, batch):\n row.extend(b_row)\n col.extend(b_col)\n data.extend(b_data)\n workers.close()\n workers.join()\n\n logger.info('Creating sparse matrix...')\n count_matrix = sp.csr_matrix(\n (data, (row, col)), shape=(args.hash_size, len(doc_ids))\n )\n count_matrix.sum_duplicates()\n logger.info('occupied ngram %d, csr matrix shape: (%d, %d)' % (count_matrix.nnz, *count_matrix.shape))\n return count_matrix, (DOC2IDX, doc_ids)\n\n\n# ------------------------------------------------------------------------------\n# Transform count matrix to different forms.\n# ------------------------------------------------------------------------------\n\n\ndef get_tfidf_matrix(cnts):\n \"\"\"Convert the word count matrix into tfidf one.\n\n tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))\n * tf = term frequency in document\n * N = number of documents\n * Nt = number of occurences of term in all documents\n \"\"\"\n Ns = get_doc_freqs(cnts)\n idfs = np.log((cnts.shape[1] - Ns + 0.5) / (Ns + 0.5))\n idfs[idfs < 0] = 0\n idfs = sp.diags(idfs, 0)\n tfs = cnts.log1p()\n tfidfs = idfs.dot(tfs)\n return tfidfs\n\n\ndef get_doc_freqs(cnts):\n \"\"\"Return word --> # of docs it appears in.\"\"\"\n binary = (cnts > 0).astype(int)\n freqs = np.array(binary.sum(1)).squeeze()\n return freqs\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-ref_doc_path', type=str, default=None,\n help='Path to document texts')\n parser.add_argument('-out_dir', type=str, default=None,\n help='Directory for saving output files')\n parser.add_argument('-ngram', type=int, default=2,\n help=('Use up to N-size n-grams '\n '(e.g. 2 = unigrams + bigrams)'))\n parser.add_argument('-hash-size', type=int, default=int(math.pow(2, 24)),\n help='Number of buckets to use for hashing ngrams')\n parser.add_argument('-tokenizer', type=str, default='simple',\n help=(\"String option specifying tokenizer type to use \"\n \"(e.g. 'corenlp')\"))\n parser.add_argument('-num-workers', type=int, default=None,\n help='Number of CPU processes (for tokenizing, etc)')\n\n args = parser.parse_args()\n\n basename = os.path.splitext(os.path.basename(args.ref_doc_path))[0]\n basename += ('-tfidf-ngram=%d-hash=%d-tokenizer=%s' %\n (args.ngram, args.hash_size, args.tokenizer))\n\n filename = os.path.join(args.out_dir, basename)\n\n if os.path.exists(filename+\".npz\"):\n logging.info('Hash file exists in %s, exit!' % (filename+\".npz\"))\n exit()\n\n logging.info('Counting words...')\n count_matrix, doc_dict = get_count_matrix(args)\n\n logger.info('Making tfidf vectors...')\n tfidf = get_tfidf_matrix(count_matrix)\n\n logger.info('Getting word-doc frequencies...')\n freqs = get_doc_freqs(count_matrix)\n\n logger.info('Saving to %s.npz' % filename)\n metadata = {\n 'doc_freqs': freqs,\n 'tokenizer': args.tokenizer,\n 'hash_size': args.hash_size,\n 'ngram': args.ngram,\n 'doc_dict': doc_dict\n }\n utils.save_sparse_csr(filename, tfidf, metadata)\n" ]
[ [ "scipy.sparse.diags", "numpy.log" ] ]
stevenchen521/quant_ml
[ "f7d5efc49c934724f97fcafacc560f4a35b24551" ]
[ "rqalpha_backtest/Portfolio_RSI.py" ]
[ "import talib\nfrom rqalpha.api import *\nfrom rqalpha import run_func\nimport pandas as pd\nimport numpy as np\nfrom scipy.signal import argrelextrema\n\n\n# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。\ndef init(context):\n\n # 选择我们感兴趣的股票\n context.s1 = \"002475.XSHE\"\n # context.s2 = \"601988.XSHG\"\n # context.s3 = \"000068.XSHE\"\n context.stocks = [context.s1]\n # context.stocks = pd.read_csv(\"ticker_list.csv\").ticker.tolist()\n context.TIME_PERIOD = 14\n context.HIGH_RSI = 70\n context.LOW_RSI = 30\n context.ORDER_PERCENT = 0.5\n\n\n# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新\ndef handle_bar(context, bar_dict):\n # 开始编写你的主要的算法逻辑\n\n # bar_dict[order_book_id] 可以拿到某个证券的bar信息\n # context.portfolio 可以拿到现在的投资组合状态信息\n\n # 使用order_shares(id_or_ins, amount)方法进行落单\n\n # 对我们选中的股票集合进行loop,运算每一只股票的RSI数值\n for stock in context.stocks:\n # 读取历史数据\n print(stock)\n prices = history_bars(stock, 50, '1d', 'close')\n\n # 用Talib计算RSI值\n rsi_data = talib.RSI(prices, timeperiod=context.TIME_PERIOD)\n rsi_data = rsi_data[~np.isnan(rsi_data)]\n max_position = argrelextrema(rsi_data, np.greater)[-1][-1]\n min_position =argrelextrema(rsi_data, np.less)[-1][-1]\n\n if max_position > min_position:\n max_rsi = rsi_data[max_position]\n min_rsi = 0\n\n else:\n min_rsi = rsi_data[min_position]\n max_rsi = 0\n\n rsi_data_today = rsi_data[-1]\n # rsi_data_last_day = rsi_data[-2]\n plot('RSI', rsi_data_today)\n\n cur_position = context.portfolio.positions[stock].quantity\n # 用剩余现金的50%来购买新的股票\n target_available_cash = context.portfolio.cash * context.ORDER_PERCENT\n\n # 当RSI大于设置的上限阀值,清仓该股票\n if min_rsi > 0 and rsi_data_today - min_rsi > 15:\n order_target_value(stock, 0.5)\n plot('sell', 0)\n\n # 当RSI小于设置的下限阀值,用剩余cash的一定比例补仓该股\n if max_rsi > 0 and max_rsi - rsi_data_today > 15:\n logger.info(\"target available cash to order: \" + str(target_available_cash))\n # 如果剩余的现金不够一手 - 100shares,那么会被ricequant 的order management system reject掉\n order_value(stock, target_available_cash)\n plot('buy', 100)\n\n\nconfig = {\n \"base\": {\n \"data_bundle_path\": \"D:\\\\rqalpha\\data_bundle\\\\bundle\",\n \"start_date\": \"2017-06-01\",\n \"end_date\": \"2019-06-10\",\n \"benchmark\": \"000300.XSHG\",\n \"accounts\": {\n \"stock\": 10000000\n }\n },\n \"extra\": {\n \"log_level\": \"verbose\",\n },\n \"mod\": {\n \"sys_analyser\": {\n \"enabled\": True,\n \"plot\": True,\n \"output_file\": \"Portfolio_RSI.pkl\"\n }\n }\n}\n\n# 您可以指定您要传递的参数\n# run_func(init=init, before_trading=before_trading, handle_bar=handle_bar, config=config)\n\n# 如果你的函数命名是按照 API 规范来,则可以直接按照以下方式来运行\nrun_func(**globals())\nfrom rqalpha_backtest.Base import pickle_to_excel\npickle_to_excel(pickle_path='Portfolio_RSI.pkl')" ]
[ [ "numpy.isnan", "scipy.signal.argrelextrema" ] ]
matthaeusheer/uncertify
[ "dfc2df16fb07ee8d7d17906827e0f0c8b2747532" ]
[ "uncertify/evaluation/utils.py" ]
[ "import torch\nimport scipy.ndimage\n\nfrom uncertify.utils.custom_types import Tensor\n\n\ndef residual_l1_max(reconstruction: Tensor, original: Tensor) -> Tensor:\n \"\"\"Construct l1 difference between original and reconstruction.\n\n Note: Only positive values in the residual are considered, i.e. values below zero are clamped.\n That means only cases where bright pixels which are brighter in the input (likely lesions) are kept.\"\"\"\n residual = original - reconstruction\n return torch.where(residual > 0.0, residual, torch.zeros_like(residual))\n\n\ndef residual_l1(reconstruction: Tensor, original: Tensor) -> Tensor:\n \"\"\"Construct the absolute l1 difference between original and reconstruction images.\"\"\"\n return torch.abs_(original - reconstruction)\n\n\ndef mask_background_to_zero(input_tensor: Tensor, mask: Tensor) -> Tensor:\n return torch.where(mask, input_tensor, torch.zeros_like(input_tensor))\n\n\ndef mask_background_to_value(input_tensor: Tensor, mask: Tensor, value: float) -> Tensor:\n return torch.where(mask, input_tensor, value * torch.ones_like(input_tensor))\n\n\ndef threshold_batch_to_one_zero(tensor: Tensor, threshold: float) -> Tensor:\n \"\"\"Apply threshold, s.t. output values become zero if smaller then threshold and one if bigger than threshold.\"\"\"\n zeros = torch.zeros_like(tensor)\n ones = torch.ones_like(tensor)\n return torch.where(tensor > threshold, ones, zeros)\n\n\ndef convert_segmentation_to_one_zero(segmentation: Tensor) -> Tensor:\n \"\"\"The segmentation map might have multiple labels. Here we crush them to simply 1 (anomaly) or zero (healthy).\"\"\"\n return torch.where(segmentation > 0, torch.ones_like(segmentation), torch.zeros_like(segmentation))\n\n\ndef erode_mask(mask: Tensor) -> Tensor:\n \"\"\"Erode the boolean mask tensor inwards the get rid of edge effects on the residual mask.\"\"\"\n dev = mask.device()\n mask = mask.cpu()\n mask = scipy.ndimage.binary_erosion(np.squeeze(brainmask), structure=strel, iterations=12)\n mask = torch.tensor(mask.cuda())\n return mask\n" ]
[ [ "torch.zeros_like", "torch.where", "torch.ones_like", "torch.abs_" ] ]
aletuf93/analogistics
[ "c5f76910683bc3a1cc6f24799f0299232b4fc522" ]
[ "analogistics/learning/analogizers_grids.py" ]
[ "from sklearn import svm\n\nfrom analogistics.learning.grids import GridSearch\n\ntuned_param_svm = [{'kernel': ['rbf'],\n 'gamma': [1e-3, 1e-4],\n 'C': [1, 10, 100, 1000],\n }]\n\ntuned_param_svm_linear = [{'penalty': ['l1', 'l2'],\n 'C': [1, 10, 100, 1000],\n }]\n\ntuned_param_regr = [{'kernel': ['rbf'],\n 'gamma': [1e-3, 1e-4],\n 'C': [1, 10, 100, 1000],\n }]\n\n\nclass GridSearchAnalogizer(GridSearch):\n def __init__(self):\n\n self.models_classification = {'svm': {'estimator': svm.SVC(),\n 'param': tuned_param_svm,\n },\n\n 'svm_linear': {'estimator': svm.LinearSVC(),\n 'param': tuned_param_svm_linear,\n },\n }\n self.models_regression = {'svm': {'estimator': svm.SVR(),\n 'param': tuned_param_regr,\n },\n }\n" ]
[ [ "sklearn.svm.SVR", "sklearn.svm.LinearSVC", "sklearn.svm.SVC" ] ]
catskillsresearch/xview2-catskills
[ "5671cff323c8121c0ae251e360e454a1e8568f58" ]
[ "spacenet/src/models/polygon_loss_from_cuda_gt_mask.py" ]
[ "import chainer.functions as F\nfrom chainer import cuda\nimport numpy as np\nfrom imantics import Mask\nfrom predict_polygons import predict_polygons\nimport cupy\n\ndef polygon_loss_from_cuda_gt_mask(score_cuda, gt_mask_cuda):\n try:\n gt_mask=cuda.to_cpu(gt_mask_cuda)[0]\n except:\n gt_mask=cuda.to_cpu(gt_mask_cuda.data)[0]\n\n gt_mask=gt_mask.astype(bool)\n\n score1 = F.softmax(score_cuda)\n score1_cpu = cuda.to_cpu(score1.data)[0]\n\n building_mask_pred = (np.argmax(score1_cpu, axis=0) == 1)\n pred_polygons = Mask(building_mask_pred).polygons()\n n_pred_polygons = len(predict_polygons(pred_polygons))\n\n polygons_gt = Mask(gt_mask).polygons()\n n_gt_polygons = len(predict_polygons(polygons_gt))\n\n if n_gt_polygons == 0:\n if n_pred_polygons == 0:\n poly_loss = 0.0\n else:\n poly_loss = 1.0\n else: \n poly_loss = abs(n_pred_polygons-n_gt_polygons)/n_gt_polygons\n\n return poly_loss\n" ]
[ [ "numpy.argmax" ] ]
woaksths/set2regex-baseline
[ "be377593526ad664a727dd7152fcb186118adaa5" ]
[ "seq2seq/evaluator/evaluator.py" ]
[ "from __future__ import print_function, division\n\nimport torch\nimport torchtext\n\nimport seq2seq\nfrom seq2seq.loss import NLLLoss\n\nclass Evaluator(object):\n \"\"\" Class to evaluate models with given datasets.\n\n Args:\n loss (seq2seq.loss, optional): loss for evaluator (default: seq2seq.loss.NLLLoss)\n batch_size (int, optional): batch size for evaluator (default: 64)\n \"\"\"\n\n def __init__(self, loss=NLLLoss(), batch_size=64):\n self.loss = loss\n self.batch_size = batch_size\n\n def evaluate(self, model, data):\n \"\"\" Evaluate a model on given dataset and return performance.\n\n Args:\n model (seq2seq.models): model to evaluate\n data (seq2seq.dataset.dataset.Dataset): dataset to evaluate against\n\n Returns:\n loss (float): loss of the given model on the given dataset\n \"\"\"\n model.eval()\n\n loss = self.loss\n loss.reset()\n match = 0\n total = 0\n\n device = torch.device('cuda:0') if torch.cuda.is_available() else -1\n batch_iterator = torchtext.data.BucketIterator(\n dataset=data, batch_size=self.batch_size,\n sort=False, sort_key=lambda x: len(x.src),\n device=device, repeat=False, shuffle=True, train=False)\n tgt_vocab = data.fields[seq2seq.tgt_field_name].vocab\n pad = tgt_vocab.stoi[data.fields[seq2seq.tgt_field_name].pad_token]\n\n with torch.no_grad():\n for batch in batch_iterator:\n input_variables, input_lengths = getattr(batch, seq2seq.src_field_name)\n target_variables = getattr(batch, seq2seq.tgt_field_name)\n\n decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(), target_variables)\n\n # Evaluation\n seqlist = other['sequence']\n for step, step_output in enumerate(decoder_outputs):\n target = target_variables[:, step + 1]\n loss.eval_batch(step_output.view(target_variables.size(0), -1), target)\n\n non_padding = target.ne(pad)\n correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().item()\n match += correct\n total += non_padding.sum().item()\n\n if total == 0:\n accuracy = float('nan')\n else:\n accuracy = match / total\n\n return loss.get_loss(), accuracy\n" ]
[ [ "torch.device", "torch.no_grad", "torch.cuda.is_available" ] ]
xiong233/SOAP
[ "2376c39fe89ea1416eddfa6bf0ec70e60fc56a8b" ]
[ "lib/model/faster_rcnn/faster_rcnn.py" ]
[ "#encoding=utf-8\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nfrom model.utils.config import cfg\nfrom model.rpn.rpn import _RPN\n\nfrom model.roi_layers import ROIAlign, ROIPool\n\n# from model.roi_pooling.modules.roi_pool import _RoIPooling\n# from model.roi_align.modules.roi_align import RoIAlignAvg\n\nfrom model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer\nimport time\nimport pdb\nfrom model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes, class_agnostic):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n self.class_agnostic = class_agnostic\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n # define rpn\n self.RCNN_rpn = _RPN(self.dout_base_model)\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\n\n\n self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0)\n self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0, 0)\n\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes, rois_s):\n\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n # feed image data to base model to obtain base feature map\n base_feat = self.RCNN_base(im_data)\n # do roi pooling based on predicted rois\n self.RCNN_rpn.eval()\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n rois = Variable(rois)\n\n if rois_s.shape[1]!=1:\n rois = rois_s\n\n if cfg.POOLING_MODE == 'align':\n pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))\n elif cfg.POOLING_MODE == 'pool':\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1, 5))\n # pdb.set_trace()\n # feed pooled features to top model\n pooled_feat = self._head_to_tail(pooled_feat)\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(pooled_feat)\n cls_prob = F.softmax(cls_score, 1)\n\n return rois, cls_prob, bbox_pred, base_feat, pooled_feat, cls_score\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\n normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n" ]
[ [ "torch.autograd.Variable", "torch.nn.functional.softmax" ] ]
o-netzer/VornamenBerlin
[ "9ca776ef8d89226044289e5e8112c2754ae0cfa3" ]
[ "vornamen.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 18:50:52 2020\n\n@author: netzer\n\"\"\"\n\n\n############## source path to csv files ##########################\n\nyear = '2012'\n\n############## source path to csv files ##########################\n\n# p = Path(sourcepath)\n# dirs = [x for x in p.iterdir() if x.is_dir()]\n\n# files = [] \n# for d in dirs:\n# for f in d.iterdir():\n# if f.name.endswith(('csv')):\n# files.append(f.name)\n# pprint(sorted(files))\n\nimport os\nimport glob\nimport pandas as pd\nimport gc\n\nsourcepath = \"D:\\Tableau\\VornamenBerlin\\data\\cleaned\"\nos.chdir(sourcepath)\ncombined_years = 'union_2012-2019'\nif not os.path.exists(os.path.join(sourcepath,combined_years)):\n os.mkdir(os.path.join(sourcepath,combined_years))\n\n\nsourceyear = os.path.join(sourcepath,year)\n\n\nos.chdir(sourceyear)\n\n\nextension = 'csv'\ncombined_csv = pd.DataFrame()\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n#combine all files in the list\nfor file in all_filenames:\n df = pd.read_csv(file,\n header=0,\n names = ['Vorname', 'Anzahl', 'Geschlecht']) # + Position ab 2017\n df['Jahr']=year\n df['Bezirk']=file\n \n combined_csv = combined_csv.append(df)\n \nos.chdir(os.path.join(sourcepath,combined_years))\n\ncombined_csv.to_csv(year + \".csv\",\n index=False,\n encoding='utf-8-sig')\n\n\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
DandyWei/finlab_course_ml
[ "e52542f320bfb50e333046075ff554b1f745e105", "e52542f320bfb50e333046075ff554b1f745e105" ]
[ "finlab/finlab_old/backtest.py", "finlab/finlab_old/data.py" ]
[ "import datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport warnings\nimport math\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ndef backtest(start_date, end_date, hold_days, strategy, data, weight='average', benchmark=None, stop_loss=None, stop_profit=None):\n \n # portfolio check\n if weight != 'average' and weight != 'price':\n print('Backtest stop, weight should be \"average\" or \"price\", find', weight, 'instead')\n\n # get price data in order backtest\n data.date = end_date\n price = data.get('收盤價', (end_date - start_date).days)\n # start from 1 TWD at start_date, \n end = 1\n date = start_date\n \n # record some history\n equality = pd.Series()\n nstock = {}\n transections = pd.DataFrame()\n maxreturn = -10000\n minreturn = 10000\n \n def trading_day(date):\n if date not in price.index:\n temp = price.loc[date:]\n if temp.empty:\n return price.index[-1]\n else:\n return temp.index[0]\n else:\n return date\n \n def date_iter_periodicity(start_date, end_date, hold_days):\n date = start_date\n while date < end_date:\n yield (date), (date + datetime.timedelta(hold_days))\n date += datetime.timedelta(hold_days)\n \n def date_iter_specify_dates(start_date, end_date, hold_days):\n dlist = [start_date] + hold_days + [end_date]\n if dlist[0] == dlist[1]:\n dlist = dlist[1:]\n if dlist[-1] == dlist[-2]:\n dlist = dlist[:-1]\n for sdate, edate in zip(dlist, dlist[1:]):\n yield (sdate), (edate)\n \n if isinstance(hold_days, int):\n dates = date_iter_periodicity(start_date, end_date, hold_days)\n elif isinstance(hold_days, list):\n dates = date_iter_specify_dates(start_date, end_date, hold_days)\n else:\n print('the type of hold_dates should be list or int.')\n return None\n\n for sdate, edate in dates:\n \n # select stocks at date\n data.date = sdate\n stocks = strategy(data)\n \n # hold the stocks for hold_days day\n s = price[stocks.index & price.columns][sdate:edate].iloc[1:]\n \n \n if s.empty:\n s = pd.Series(1, index=pd.date_range(sdate + datetime.timedelta(days=1), edate))\n else:\n \n if stop_loss != None:\n below_stop = ((s / s.bfill().iloc[0]) - 1)*100 < -np.abs(stop_loss)\n below_stop = (below_stop.cumsum() > 0).shift(2).fillna(False)\n s[below_stop] = np.nan\n \n if stop_profit != None:\n above_stop = ((s / s.bfill().iloc[0]) - 1)*100 > np.abs(stop_profit)\n above_stop = (above_stop.cumsum() > 0).shift(2).fillna(False)\n s[above_stop] = np.nan\n \n s.dropna(axis=1, how='all', inplace=True)\n \n # record transections\n bprice = s.bfill().iloc[0]\n sprice = s.apply(lambda s:s.dropna().iloc[-1])\n transections = transections.append(pd.DataFrame({\n 'buy_price': bprice,\n 'sell_price': sprice,\n 'lowest_price': s.min(),\n 'highest_price': s.max(),\n 'buy_date': pd.Series(s.index[0], index=s.columns),\n 'sell_date': s.apply(lambda s:s.dropna().index[-1]),\n 'profit(%)': (sprice/bprice - 1) * 100\n }))\n \n s.ffill(inplace=True)\n \n # calculate equality\n # normalize and average the price of each stocks\n if weight == 'average':\n s = s/s.bfill().iloc[0]\n s = s.mean(axis=1)\n s = s / s.bfill()[0]\n \n # print some log\n print(sdate,'-', edate, \n '報酬率: %.2f'%( s.iloc[-1]/s.iloc[0] * 100 - 100), \n '%', 'nstock', len(stocks))\n maxreturn = max(maxreturn, s.iloc[-1]/s.iloc[0] * 100 - 100)\n minreturn = min(minreturn, s.iloc[-1]/s.iloc[0] * 100 - 100)\n \n # plot backtest result\n ((s*end-1)*100).plot()\n equality = equality.append(s*end)\n end = (s/s[0]*end).iloc[-1]\n \n if math.isnan(end):\n end = 1\n \n # add nstock history\n nstock[sdate] = len(stocks)\n \n print('每次換手最大報酬 : %.2f %' % maxreturn)\n print('每次換手最少報酬 : %.2f %' % minreturn)\n \n if benchmark is None:\n benchmark = price['0050'][start_date:end_date].iloc[1:]\n \n # bechmark (thanks to Markk1227)\n ((benchmark/benchmark[0]-1)*100).plot(color=(0.8,0.8,0.8))\n plt.ylabel('Return On Investment (%)')\n plt.grid(linestyle='-.')\n plt.show()\n ((benchmark/benchmark.cummax()-1)*100).plot(legend=True, color=(0.8,0.8,0.8))\n ((equality/equality.cummax()-1)*100).plot(legend=True)\n plt.ylabel('Dropdown (%)')\n plt.grid(linestyle='-.')\n plt.show()\n pd.Series(nstock).plot.bar()\n plt.ylabel('Number of stocks held')\n return equality, transections\n\ndef portfolio(stock_list, money, data, lowest_fee=20, discount=0.6, add_cost=10):\n price = data.get('收盤價', 1)\n stock_list = price.iloc[-1][stock_list].transpose()\n print('estimate price according to', price.index[-1])\n\n print('initial number of stock', len(stock_list))\n while (money / len(stock_list)) < (lowest_fee - add_cost) * 1000 / 1.425 / discount:\n stock_list = stock_list[stock_list != stock_list.max()]\n print('after considering fee', len(stock_list))\n \n while True:\n invest_amount = (money / len(stock_list))\n ret = np.floor(invest_amount / stock_list / 1000)\n \n if (ret == 0).any():\n stock_list = stock_list[stock_list != stock_list.max()]\n else:\n break\n \n print('after considering 1000 share', len(stock_list))\n \n return ret, (ret * stock_list * 1000).sum()", "import sqlite3\nimport pandas as pd\nimport os\nimport datetime\n\nclass Data():\n \n def __init__(self):\n\n # 開啟資料庫\n self.conn = sqlite3.connect(os.path.join('data', \"data.db\"))\n cursor = self.conn.execute('SELECT name FROM sqlite_master WHERE type = \"table\"')\n\n # 找到所有的table名稱\n table_names = [t[0] for t in list(cursor)]\n\n # 找到所有的column名稱,對應到的table名稱\n self.col2table = {}\n for tname in table_names:\n\n # 獲取所有column名稱\n c = self.conn.execute('PRAGMA table_info(' + tname + ');')\n for cname in [i[1] for i in list(c)]:\n\n # 將column名稱對應到的table名稱assign到self.col2table中\n self.col2table[cname] = tname\n \n # 初始self.date(使用data.get時,可以或的self.date以前的所有資料(以防拿到未來數據)\n self.date = datetime.datetime.now().date()\n \n # 假如self.cache是true的話,\n # 使用data.get的資料,會被儲存在self.data中,之後再呼叫data.get時,就不需要從資料庫裡面找,\n # 直接調用self.data中的資料即可\n self.cache = False\n self.data = {}\n \n # 先將每個table的所有日期都拿出來\n self.dates = {}\n\n # 對於每個table,都將所有資料的日期取出\n for tname in table_names:\n c = self.conn.execute('PRAGMA table_info(' + tname + ');')\n cnames = [i[1] for i in list(c)]\n if 'date' in cnames:\n if tname == 'price':\n # 假如table是股價的話,則觀察這三檔股票的日期即可(不用所有股票日期都觀察,節省速度)\n s1 = (\"\"\"SELECT DISTINCT date FROM %s where stock_id='0050'\"\"\"%('price'))\n s2 = (\"\"\"SELECT DISTINCT date FROM %s where stock_id='1101'\"\"\"%('price'))\n s3 = (\"\"\"SELECT DISTINCT date FROM %s where stock_id='2330'\"\"\"%('price'))\n\n # 將日期抓出來並排序整理,放到self.dates中\n df = (pd.read_sql(s1, self.conn)\n .append(pd.read_sql(s2, self.conn))\n .append(pd.read_sql(s3, self.conn))\n .drop_duplicates('date').sort_values('date'))\n df['date'] = pd.to_datetime(df['date'])\n df = df.set_index('date')\n self.dates[tname] = df\n else:\n # 將日期抓出來並排序整理,放到self.dates中\n s = (\"\"\"SELECT DISTINCT date FROM '%s'\"\"\"%(tname))\n self.dates[tname] = pd.read_sql(s, self.conn, parse_dates=['date'], index_col=['date']).sort_index()\n #print('Data: done')\n \n \n def get(self, name, n):\n \n # 確認名稱是否存在於資料庫\n if name not in self.col2table or n == 0:\n print('Data: **ERROR: cannot find', name, 'in database')\n return pd.DataFrame()\n \n # 找出欲爬取的時間段(startdate, enddate)\n df = self.dates[self.col2table[name]].loc[:self.date].iloc[-n:]\n try:\n startdate = df.index[-1]\n enddate = df.index[0]\n except:\n print('Data: **WARRN: data cannot be retrieve completely:', name)\n enddate = df.iloc[0]\n \n # 假如該時間段已經在self.data中,則直接從self.data中拿取並回傳即可\n if name in self.data and self.contain_date(name, enddate, startdate):\n return self.data[name][enddate:startdate]\n \n # 從資料庫中拿取所需的資料\n s = (\"\"\"SELECT stock_id, date, [%s] FROM %s WHERE date BETWEEN '%s' AND '%s'\"\"\"%(name, \n self.col2table[name], str(enddate.strftime('%Y-%m-%d')), \n str((self.date + datetime.timedelta(days=1)).strftime('%Y-%m-%d'))))\n ret = pd.read_sql(s, self.conn, parse_dates=['date']).pivot(index='date', columns='stock_id')[name]\n \n # 將這些資料存入cache,以便將來要使用時,不需要從資料庫額外調出來\n if self.cache:\n self.data[name] = ret\n\n return ret\n \n # 確認該資料區間段是否已經存在self.data\n def contain_date(self, name, startdate, enddate):\n if name not in self.data:\n return False\n if self.data[name].index[0] <= startdate <= enddate <= self.data[name].index[-1]:\n return True\n \n return False\n \n # 目前沒作用,不需使用\n def get3(self, name):\n s = (\"\"\"SELECT stock_id, %s FROM %s \"\"\"%(name, self.col2table[name]))\n return pd.read_sql(s, self.conn, index_col=['stock_id'])" ]
[ [ "matplotlib.pyplot.grid", "pandas.DataFrame", "matplotlib.pyplot.ylabel", "pandas.Series", "numpy.abs", "matplotlib.pyplot.show", "numpy.floor" ], [ "pandas.to_datetime", "pandas.DataFrame", "pandas.read_sql" ] ]
kevinmooreiii/autochem
[ "87f50adc09c3f1170459c629697aadd74154c769" ]
[ "automol/convert/zmat.py" ]
[ "\"\"\" z-matrix conversions\n\"\"\"\nimport itertools\nimport numpy\nfrom automol import create\nfrom automol import cart\nfrom automol.convert import _util\nimport automol.zmat\n\n\n# z-matrix => geometry\ndef geometry(zma, remove_dummy_atoms=None):\n \"\"\" z-matrix => geometry\n \"\"\"\n syms = automol.zmat.symbols(zma)\n\n natms = len(syms)\n key_mat = automol.zmat.key_matrix(zma)\n val_mat = automol.zmat.value_matrix(zma)\n\n xyzs = numpy.zeros((natms, 3))\n\n for key in range(1, natms):\n vals = val_mat[key][:min(key, 3)]\n keys = key_mat[key][:min(key, 3)]\n ref_xyzs = xyzs[list(keys)]\n xyz = cart.vec.from_internals(*itertools.chain(*zip(vals, ref_xyzs)))\n xyzs[key] = xyz\n\n geo = create.geom.from_data(syms, xyzs)\n if remove_dummy_atoms:\n geo = automol.geom.without_dummy_atoms(geo)\n\n return geo\n\n\n# z-matrix => graph\ndef graph(zma, remove_stereo=False):\n \"\"\" z-matrix => graph\n \"\"\"\n geo = geometry(zma)\n gra = automol.convert.geom.graph(geo, remove_stereo=remove_stereo)\n return gra\n\n\ndef connectivity_graph(zma,\n rqq_bond_max=3.5, rqh_bond_max=2.6, rhh_bond_max=1.9):\n \"\"\" z-matrix => connectivity graph\n \"\"\"\n geo = geometry(zma)\n gra = automol.convert.geom.connectivity_graph(\n geo, rqq_bond_max=rqq_bond_max, rqh_bond_max=rqh_bond_max,\n rhh_bond_max=rhh_bond_max)\n return gra\n\n\n# z-matrix => formula\ndef formula(zma):\n \"\"\" z-matrix => formula\n \"\"\"\n syms = automol.zmatrix.symbols(zma)\n fml = _util.formula(syms)\n return fml\n" ]
[ [ "numpy.zeros" ] ]
eliorodriguez/Proctoring-AI
[ "06c7c9e3bf704b7b07e7daf346479cbd28339313" ]
[ "validate_head_position.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 31 03:00:36 2020\n\n@author: hp\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport math\nfrom face_detector import get_face_detector, find_faces\nfrom face_landmarks import get_landmark_model, detect_marks\n\ndef get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):\n \"\"\"Return the 3D points present as 2D for making annotation box\"\"\"\n point_3d = []\n dist_coeffs = np.zeros((4,1))\n rear_size = val[0]\n rear_depth = val[1]\n point_3d.append((-rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, -rear_size, rear_depth))\n \n front_size = val[2]\n front_depth = val[3]\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d.append((-front_size, front_size, front_depth))\n point_3d.append((front_size, front_size, front_depth))\n point_3d.append((front_size, -front_size, front_depth))\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)\n \n # Map to 2d img points\n (point_2d, _) = cv2.projectPoints(point_3d,\n rotation_vector,\n translation_vector,\n camera_matrix,\n dist_coeffs)\n point_2d = np.int32(point_2d.reshape(-1, 2))\n return point_2d\n\ndef draw_annotation_box(img, rotation_vector, translation_vector, camera_matrix,\n rear_size=300, rear_depth=0, front_size=500, front_depth=400,\n color=(255, 255, 0), line_width=2):\n \"\"\"\n Draw a 3D anotation box on the face for head pose estimation\n\n Parameters\n ----------\n img : np.unit8\n Original Image.\n rotation_vector : Array of float64\n Rotation Vector obtained from cv2.solvePnP\n translation_vector : Array of float64\n Translation Vector obtained from cv2.solvePnP\n camera_matrix : Array of float64\n The camera matrix\n rear_size : int, optional\n Size of rear box. The default is 300.\n rear_depth : int, optional\n The default is 0.\n front_size : int, optional\n Size of front box. The default is 500.\n front_depth : int, optional\n Front depth. The default is 400.\n color : tuple, optional\n The color with which to draw annotation box. The default is (255, 255, 0).\n line_width : int, optional\n line width of lines drawn. The default is 2.\n\n Returns\n -------\n None.\n\n \"\"\"\n \n rear_size = 1\n rear_depth = 0\n front_size = img.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)\n # # Draw all the lines\n cv2.polylines(img, [point_2d], True, color, line_width, cv2.LINE_AA)\n cv2.line(img, tuple(point_2d[1]), tuple(\n point_2d[6]), color, line_width, cv2.LINE_AA)\n cv2.line(img, tuple(point_2d[2]), tuple(\n point_2d[7]), color, line_width, cv2.LINE_AA)\n cv2.line(img, tuple(point_2d[3]), tuple(\n point_2d[8]), color, line_width, cv2.LINE_AA)\n \n \ndef head_pose_points(img, rotation_vector, translation_vector, camera_matrix):\n \"\"\"\n Get the points to estimate head pose sideways \n\n Parameters\n ----------\n img : np.unit8\n Original Image.\n rotation_vector : Array of float64\n Rotation Vector obtained from cv2.solvePnP\n translation_vector : Array of float64\n Translation Vector obtained from cv2.solvePnP\n camera_matrix : Array of float64\n The camera matrix\n\n Returns\n -------\n (x, y) : tuple\n Coordinates of line to estimate head pose\n\n \"\"\"\n rear_size = 1\n rear_depth = 0\n front_size = img.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)\n y = (point_2d[5] + point_2d[8])//2\n x = point_2d[2]\n \n return (x, y)\n \n\ndef validate_head_position(filepath):\n face_model = get_face_detector()\n landmark_model = get_landmark_model()\n #cap = cv2.VideoCapture(0)\n #ret, img = cap.read()\n img = cv2.imread(filepath) \n size = img.shape\n font = cv2.FONT_HERSHEY_SIMPLEX \n # 3D model points.\n model_points = np.array([\n (0.0, 0.0, 0.0), # Nose tip\n (0.0, -330.0, -65.0), # Chin\n (-225.0, 170.0, -135.0), # Left eye left corner\n (225.0, 170.0, -135.0), # Right eye right corne\n (-150.0, -150.0, -125.0), # Left Mouth corner\n (150.0, -150.0, -125.0) # Right mouth corner\n ])\n\n # Camera internals\n focal_length = size[1]\n center = (size[1]/2, size[0]/2)\n camera_matrix = np.array(\n [[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype = \"double\"\n )\n\n\n # cv2.imshow(\"OpenCV Image Reading\", img)\n\n faces = find_faces(img, face_model)\n for face in faces:\n marks = detect_marks(img, landmark_model, face)\n # mark_detector.draw_marks(img, marks, color=(0, 255, 0))\n image_points = np.array([\n marks[30], # Nose tip\n marks[8], # Chin\n marks[36], # Left eye left corner\n marks[45], # Right eye right corne\n marks[48], # Left Mouth corner\n marks[54] # Right mouth corner\n ], dtype=\"double\")\n dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion\n (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_UPNP)\n \n \n # Project a 3D point (0, 0, 1000.0) onto the image plane.\n # We use this to draw a line sticking out of the nose\n \n (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)\n \n for p in image_points:\n cv2.circle(img, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n \n \n p1 = ( int(image_points[0][0]), int(image_points[0][1]))\n p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))\n x1, x2 = head_pose_points(img, rotation_vector, translation_vector, camera_matrix)\n\n cv2.line(img, p1, p2, (0, 255, 255), 2)\n cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)\n # for (x, y) in marks:\n # cv2.circle(img, (x, y), 4, (255, 255, 0), -1)\n # cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)\n position = None\n try:\n m = (p2[1] - p1[1])/(p2[0] - p1[0])\n ang1 = int(math.degrees(math.atan(m)))\n except:\n ang1 = 90\n \n try:\n m = (x2[1] - x1[1])/(x2[0] - x1[0])\n ang2 = int(math.degrees(math.atan(-1/m)))\n except:\n ang2 = 90\n \n # print('div by zero error')\n if ang1 >= 48:\n #print('Head down')\n cv2.putText(img, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)\n position = \"head_down\"\n elif ang1 <= -48:\n #print('Head up')\n cv2.putText(img, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)\n position = \"head_up\"\n if ang2 >= 48:\n #print('Head right')\n cv2.putText(img, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)\n position = \"head_right\"\n elif ang2 <= -48:\n #print('Head left')\n cv2.putText(img, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)\n position = \"head_left\"\n \n cv2.putText(img, str(ang1), tuple(p1), font, 2, (128, 255, 255), 3)\n cv2.putText(img, str(ang2), tuple(x1), font, 2, (255, 255, 128), 3)\n\n\n # cv2.imshow('img', img)\n # cv2.waitKey(0) \n #if cv2.waitK \n return position\n\nfilepath = '/home/elio/Documentos/personal/onpe/Proctoring-AI/fotos/test2.jpg'\nposition = validate_head_position(filepath)\nprint(position)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
jiangz17THU/ColossalAI
[ "354b7954d1fa6b21a5ba566f0d5ec099280ad315" ]
[ "tests/test_moe/test_moe_group.py" ]
[ "from functools import partial\nimport pytest\nimport torch.nn as nn\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nimport colossalai\nfrom colossalai.utils import free_port, get_current_device\nfrom colossalai.nn.layer.moe import Experts\nfrom colossalai.context.moe_context import MOE_CONTEXT\nfrom colossalai.utils.moe import sync_moe_model_param\nfrom colossalai.testing import assert_equal_in_group, rerun_on_exception\n\nD_MODEL = 4\nD_FF = 8\nCONFIG = dict()\n\n\ndef run_test(rank, port):\n world_size = 4\n colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\n expert_module = nn.Linear\n expert_factor = dict(in_features=D_MODEL, out_features=D_FF, device=get_current_device())\n\n MOE_CONTEXT.setup(42) # MOE environment initialization\n exp0 = Experts(expert_module, 1, **expert_factor)\n exp1 = Experts(expert_module, 2, **expert_factor)\n exp2 = Experts(expert_module, 4, **expert_factor)\n exp3 = Experts(expert_module, 8, **expert_factor)\n\n assert exp0.num_local_experts == 1\n assert exp1.num_local_experts == 1\n assert exp2.num_local_experts == 1\n assert exp3.num_local_experts == 2\n # experts deployment passed\n\n parallel_info_dict = MOE_CONTEXT.parallel_info_dict\n rank = dist.get_rank()\n\n assert len(parallel_info_dict) == 3\n assert dist.get_rank(parallel_info_dict[4].ep_group) == rank\n assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2\n assert dist.get_rank(parallel_info_dict[1].ep_group) == 0\n\n assert dist.get_rank(parallel_info_dict[4].dp_group) == 0\n assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2\n assert dist.get_rank(parallel_info_dict[1].dp_group) == rank\n # group creation passed\n\n model = nn.ModuleList([exp0, exp1, exp2, exp3])\n model = model.to(get_current_device())\n sync_moe_model_param(model)\n\n assert_equal_in_group(exp0.experts[0].weight.data, parallel_info_dict[1].dp_group)\n assert_equal_in_group(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group)\n # MOE experts layout success when ep_size = 1\n\n assert_equal_in_group(exp1.experts[0].weight.data, parallel_info_dict[2].dp_group)\n assert_equal_in_group(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group)\n # MOE experts layout success when ep_size = 2\n\n\[email protected]\n@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=\".*Address already in use.*\")\ndef test_moe_initialization():\n world_size = 4\n run_func = partial(run_test, port=free_port())\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_moe_initialization()\n" ]
[ [ "torch.distributed.get_rank", "torch.multiprocessing.spawn", "torch.nn.ModuleList" ] ]
raoulcollenteur/flopy
[ "e6eafcb11fb8dd9831373f2e355d4cec96518b79" ]
[ "flopy/modflow/mfsfr2.py" ]
[ "__author__ = 'aleaf'\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport warnings\r\nimport copy\r\nfrom numpy.lib import recfunctions\r\nfrom ..pakbase import Package\r\nfrom ..utils import MfList\r\nfrom ..utils.flopy_io import line_parse\r\nfrom ..utils.recarray_utils import create_empty_recarray\r\nfrom ..utils.optionblock import OptionBlock\r\nfrom collections import OrderedDict\r\n\r\ntry:\r\n import pandas as pd\r\nexcept:\r\n pd = False\r\n\r\n\r\nclass ModflowSfr2(Package):\r\n \"\"\"\r\n Streamflow-Routing (SFR2) Package Class\r\n\r\n Parameters\r\n ----------\r\n model : model object\r\n The model object (of type :class:'flopy.modflow.mf.Modflow') to which\r\n this package will be added.\r\n nstrm : integer\r\n An integer value that can be specified to be positive or negative. The\r\n absolute value of NSTRM is equal to the number of stream reaches\r\n (finite-difference cells) that are active during the simulation and\r\n the number of lines of data to be included in Item 2, described below.\r\n When NSTRM is specified to be a negative integer, it is also used as a\r\n flag for changing the format of the data input, for simulating\r\n unsaturated flow beneath streams, and (or) for simulating transient\r\n streamflow routing (for MODFLOW-2005 simulations only), depending\r\n on the values specified for variables ISFROPT and IRTFLG, as described\r\n below. When NSTRM is negative, NSFRPAR must be set to zero, which means\r\n that parameters cannot be specified. By default, nstrm is set to\r\n negative.\r\n nss : integer\r\n An integer value equal to the number of stream segments (consisting of\r\n one or more reaches) that are used to define the complete stream\r\n network. The value of NSS represents the number of segments that must\r\n be defined through a combination of parameters and variables in Item 4\r\n or variables in Item 6.\r\n nparseg : integer\r\n An integer value equal to (or exceeding) the number of stream-segment\r\n definitions associated with all parameters. This number can be more\r\n than the total number of segments (NSS) in the stream network because\r\n the same segment can be defined in multiple parameters, and because\r\n parameters can be time-varying. NPARSEG must equal or exceed the sum\r\n of NLST x N for all parameters, where N is the greater of 1 and\r\n NUMINST; that is, NPARSEG must equal or exceed the total number of\r\n repetitions of item 4b. This variable must be zero when NSTRM is\r\n negative.\r\n const : float\r\n A real value (or conversion factor) used in calculating stream depth\r\n for stream reach. If stream depth is not calculated using Manning's\r\n equation for any stream segment (that is, ICALC does not equal 1 or 2),\r\n then a value of zero can be entered. If Manning's equation is used, a\r\n constant of 1.486 is used for flow units of cubic feet per second, and\r\n a constant of 1.0 is used for units of cubic meters per second. The\r\n constant must be multiplied by 86,400 when using time units of days in\r\n the simulation. An explanation of time units used in MODFLOW is given\r\n by Harbaugh and others (2000, p. 10).\r\n dleak : float\r\n A real value equal to the tolerance level of stream depth used in\r\n computing leakage between each stream reach and active model cell.\r\n Value is in units of length. Usually a value of 0.0001 is sufficient\r\n when units of feet or meters are used in model.\r\n ipakcb : integer\r\n An integer value used as a flag for writing stream-aquifer leakage\r\n values. If ipakcb > 0, unformatted leakage between each stream reach\r\n and corresponding model cell will be saved to the main cell-by-cell\r\n budget file whenever when a cell-by-cell budget has been specified in\r\n Output Control (see Harbaugh and others, 2000, pages 52-55). If\r\n ipakcb = 0, leakage values will not be printed or saved. Printing to\r\n the listing file (ipakcb < 0) is not supported.\r\n istcsb2 : integer\r\n An integer value used as a flag for writing to a separate formatted\r\n file all information on inflows and outflows from each reach; on\r\n stream depth, width, and streambed conductance; and on head difference\r\n and gradient across the streambed. If ISTCB2 > 0, then ISTCB2 also\r\n represents the unit number to which all information for each stream\r\n reach will be saved to a separate file when a cell-by-cell budget has\r\n been specified in Output Control. If ISTCB2 < 0, it is the unit number\r\n to which unformatted streamflow out of each reach will be saved to a\r\n file whenever the cell-by-cell budget has been specified in Output\r\n Control. Unformatted output will be saved to <model name>.sfq.\r\n isfropt : integer\r\n An integer value that defines the format of the input data and whether\r\n or not unsaturated flow is simulated beneath streams. Values of ISFROPT\r\n are defined as follows\r\n\r\n 0 No vertical unsaturated flow beneath streams. Streambed elevations,\r\n stream slope, streambed thickness, and streambed hydraulic\r\n conductivity are read for each stress period using variables\r\n defined in Items 6b and 6c; the optional variables in Item 2 are\r\n not used.\r\n 1 No vertical unsaturated flow beneath streams. Streambed elevation,\r\n stream slope, streambed thickness, and streambed hydraulic\r\n conductivity are read for each reach only once at the beginning of\r\n the simulation using optional variables defined in Item 2; Items 6b\r\n and 6c are used to define stream width and depth for ICALC = 0 and\r\n stream width for ICALC = 1.\r\n 2 Streambed and unsaturated-zone properties are read for each reach\r\n only once at the beginning of the simulation using optional\r\n variables defined in Item 2; Items 6b and 6c are used to define\r\n stream width and depth for ICALC = 0 and stream width for\r\n ICALC = 1. When using the LPF Package, saturated vertical\r\n hydraulic conductivity for the unsaturated zone is the same as\r\n the vertical hydraulic conductivity of the corresponding layer in\r\n LPF and input variable UHC is not read.\r\n 3 Same as 2 except saturated vertical hydraulic conductivity for the\r\n unsaturated zone (input variable UHC) is read for each reach.\r\n 4 Streambed and unsaturated-zone properties are read for the\r\n beginning and end of each stream segment using variables defined\r\n in Items 6b and 6c; the optional variables in Item 2 are not used.\r\n Streambed properties can vary each stress period. When using the\r\n LPF Package, saturated vertical hydraulic conductivity for the\r\n unsaturated zone is the same as the vertical hydraulic conductivity\r\n of the corresponding layer in LPF and input variable UHC1 is not\r\n read.\r\n 5 Same as 4 except saturated vertical hydraulic conductivity for the\r\n unsaturated zone (input variable UHC1) is read for each segment at\r\n the beginning of the first stress period only.\r\n\r\n nstrail : integer\r\n An integer value that is the number of trailing wave increments used to\r\n represent a trailing wave. Trailing waves are used to represent a\r\n decrease in the surface infiltration rate. The value can be increased\r\n to improve mass balance in the unsaturated zone. Values between 10 and\r\n 20 work well and result in unsaturated-zone mass balance errors beneath\r\n streams ranging between 0.001 and 0.01 percent. Please see Smith (1983)\r\n for further details. (default is 10; for MODFLOW-2005 simulations only\r\n when isfropt > 1)\r\n isuzn : integer\r\n An integer value that is the maximum number of vertical cells used to\r\n define the unsaturated zone beneath a stream reach. If ICALC is 1 for\r\n all segments then ISUZN should be set to 1. (default is 1; for\r\n MODFLOW-2005 simulations only when isfropt > 1)\r\n nsfrsets : integer\r\n An integer value that is the maximum number of different sets of\r\n trailing waves used to allocate arrays. Arrays are allocated by\r\n multiplying NSTRAIL by NSFRSETS. A value of 30 is sufficient for\r\n problems where the stream depth varies often. NSFRSETS does not affect\r\n model run time. (default is 30; for MODFLOW-2005 simulations only\r\n when isfropt > 1)\r\n irtflg : integer\r\n An integer value that indicates whether transient streamflow routing is\r\n active. IRTFLG must be specified if NSTRM < 0. If IRTFLG > 0,\r\n streamflow will be routed using the kinematic-wave equation (see USGS\r\n Techniques and Methods 6-D1, p. 68-69); otherwise, IRTFLG should be\r\n specified as 0. Transient streamflow routing is only available for\r\n MODFLOW-2005; IRTFLG can be left blank for MODFLOW-2000 simulations.\r\n (default is 1)\r\n numtim : integer\r\n An integer value equal to the number of sub time steps used to route\r\n streamflow. The time step that will be used to route streamflow will\r\n be equal to the MODFLOW time step divided by NUMTIM. (default is 2;\r\n for MODFLOW-2005 simulations only when irtflg > 0)\r\n weight : float\r\n A real number equal to the time weighting factor used to calculate the\r\n change in channel storage. WEIGHT has a value between 0.5 and 1. Please\r\n refer to equation 83 in USGS Techniques and Methods 6-D1 for further\r\n details. (default is 0.75; for MODFLOW-2005 simulations only when\r\n irtflg > 0)\r\n flwtol : float\r\n A real number equal to the streamflow tolerance for convergence of the\r\n kinematic wave equation used for transient streamflow routing. A value\r\n of 0.00003 cubic meters per second has been used successfully in test\r\n simulations (and would need to be converted to whatever units are being\r\n used in the particular simulation). (default is 0.0001; for\r\n MODFLOW-2005 simulations only when irtflg > 0)\r\n reach_data : recarray\r\n Numpy record array of length equal to nstrm, with columns for each\r\n variable entered in item 2 (see SFR package input instructions). In\r\n following flopy convention, layer, row, column and node number\r\n (for unstructured grids) are zero-based; segment and reach are\r\n one-based.\r\n segment_data : recarray\r\n Numpy record array of length equal to nss, with columns for each\r\n variable entered in items 6a, 6b and 6c (see SFR package input\r\n instructions). Segment numbers are one-based.\r\n dataset_5 : dict of lists\r\n Optional; will be built automatically from segment_data unless\r\n specified. Dict of lists, with key for each stress period. Each list\r\n contains the variables [itmp, irdflag, iptflag]. (see SFR documentation\r\n for more details):\r\n itmp : list of integers (len = NPER)\r\n For each stress period, an integer value for reusing or reading stream\r\n segment data that can change each stress period. If ITMP = 0 then all\r\n stream segment data are defined by Item 4 (NSFRPAR > 0; number of\r\n stream parameters is greater than 0). If ITMP > 0, then stream segment\r\n data are not defined in Item 4 and must be defined in Item 6 below for\r\n a number of segments equal to the value of ITMP. If ITMP < 0, then\r\n stream segment data not defined in Item 4 will be reused from the last\r\n stress period (Item 6 is not read for the current stress period). ITMP\r\n must be defined >= 0 for the first stress period of a simulation.\r\n irdflag : int or list of integers (len = NPER)\r\n For each stress period, an integer value for printing input data\r\n specified for this stress period. If IRDFLG = 0, input data for this\r\n stress period will be printed. If IRDFLG > 0, then input data for this\r\n stress period will not be printed.\r\n iptflag : int or list of integers (len = NPER)\r\n For each stress period, an integer value for printing streamflow-\r\n routing results during this stress period. If IPTFLG = 0, or whenever\r\n the variable ICBCFL or \"Save Budget\" is specified in Output Control,\r\n the results for specified time steps during this stress period will be\r\n printed. If IPTFLG > 0, then the results during this stress period will\r\n not be printed.\r\n extension : string\r\n Filename extension (default is 'sfr')\r\n unit_number : int\r\n File unit number (default is None).\r\n filenames : str or list of str\r\n Filenames to use for the package and the output files. If\r\n filenames=None the package name will be created using the model name\r\n and package extension and the cbc output and sfr output name will be\r\n created using the model name and .cbc the .sfr.bin/.sfr.out extensions\r\n (for example, modflowtest.cbc, and modflowtest.sfr.bin), if ipakcbc and\r\n istcb2 are numbers greater than zero. If a single string is passed the\r\n package name will be set to the string and other uzf output files will\r\n be set to the model name with the appropriate output file extensions.\r\n To define the names for all package files (input and output) the\r\n length of the list of strings should be 3. Default is None.\r\n\r\n Attributes\r\n ----------\r\n outlets : nested dictionary\r\n Contains the outlet for each SFR segment; format is\r\n {per: {segment: outlet}} This attribute is created by the\r\n get_outlets() method.\r\n outsegs : dictionary of arrays\r\n Each array is of shape nss rows x maximum of nss columns. The first\r\n column contains the SFR segments, the second column contains the\r\n outsegs of those segments; the third column the outsegs of the outsegs,\r\n and so on, until all outlets have been encountered, or nss is reached.\r\n The latter case indicates circular routing. This attribute is created\r\n by the get_outlets() method.\r\n\r\n Methods\r\n -------\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n Parameters are not supported in FloPy.\r\n\r\n MODFLOW-OWHM is not supported.\r\n\r\n The Ground-Water Transport (GWT) process is not supported.\r\n\r\n Limitations on which features are supported...\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> ml = flopy.modflow.Modflow()\r\n >>> sfr2 = flopy.modflow.ModflowSfr2(ml, ...)\r\n\r\n \"\"\"\r\n _options = OrderedDict([(\"reachinput\",\r\n OptionBlock.simple_flag),\r\n (\"transroute\",\r\n OptionBlock.simple_flag),\r\n (\"tabfiles\",\r\n OptionBlock.simple_tabfile),\r\n (\"lossfactor\", {OptionBlock.dtype: np.bool_,\r\n OptionBlock.nested: True,\r\n OptionBlock.n_nested: 1,\r\n OptionBlock.vars:\r\n {\"factor\":\r\n OptionBlock.simple_float}}),\r\n (\"strhc1kh\", {OptionBlock.dtype: np.bool_,\r\n OptionBlock.nested: True,\r\n OptionBlock.n_nested: 1,\r\n OptionBlock.vars:\r\n {\"factorkh\":\r\n OptionBlock.simple_float}}),\r\n (\"strhc1kv\", {OptionBlock.dtype: np.bool_,\r\n OptionBlock.nested: True,\r\n OptionBlock.n_nested: 1,\r\n OptionBlock.vars:\r\n {\"factorkv\":\r\n OptionBlock.simple_float}})])\r\n\r\n nsfrpar = 0\r\n heading = '# Streamflow-Routing (SFR2) file for MODFLOW, generated by Flopy'\r\n default_value = 0.\r\n # LENUNI = {\"u\": 0, \"f\": 1, \"m\": 2, \"c\": 3}\r\n len_const = {1: 1.486, 2: 1.0, 3: 100.}\r\n # {\"u\": 0, \"s\": 1, \"m\": 2, \"h\": 3, \"d\": 4, \"y\": 5}\r\n time_const = {1: 1., 2: 60., 3: 3600., 4: 86400., 5: 31557600.}\r\n\r\n def __init__(self, model, nstrm=-2, nss=1, nsfrpar=0, nparseg=0,\r\n const=None, dleak=0.0001, ipakcb=None, istcb2=None,\r\n isfropt=0,\r\n nstrail=10, isuzn=1, nsfrsets=30, irtflg=0, numtim=2,\r\n weight=0.75, flwtol=0.0001,\r\n reach_data=None,\r\n segment_data=None,\r\n channel_geometry_data=None,\r\n channel_flow_data=None,\r\n dataset_5=None, irdflag=0, iptflag=0,\r\n reachinput=False, transroute=False,\r\n tabfiles=False, tabfiles_dict=None,\r\n extension='sfr', unit_number=None,\r\n filenames=None, options=None):\r\n\r\n \"\"\"\r\n Package constructor\r\n \"\"\"\r\n # set default unit number of one is not specified\r\n if unit_number is None:\r\n unit_number = ModflowSfr2.defaultunit()\r\n\r\n # set filenames\r\n if filenames is None:\r\n filenames = [None, None, None]\r\n elif isinstance(filenames, str):\r\n filenames = [filenames, None, None]\r\n elif isinstance(filenames, list):\r\n if len(filenames) < 3:\r\n for _ in range(len(filenames), 3):\r\n filenames.append(None)\r\n\r\n # update external file information with cbc output, if necessary\r\n if ipakcb is not None:\r\n fname = filenames[1]\r\n model.add_output_file(ipakcb, fname=fname,\r\n package=ModflowSfr2.ftype())\r\n else:\r\n ipakcb = 0\r\n\r\n # add sfr flow output file\r\n if istcb2 is not None:\r\n if abs(istcb2) > 0:\r\n binflag = False\r\n ext = 'out'\r\n if istcb2 < 0:\r\n binflag = True\r\n ext = 'bin'\r\n fname = filenames[2]\r\n if fname is None:\r\n fname = model.name + '.sfr.{}'.format(ext)\r\n model.add_output_file(abs(istcb2), fname=fname,\r\n binflag=binflag,\r\n package=ModflowSfr2.ftype())\r\n else:\r\n istcb2 = 0\r\n\r\n # Fill namefile items\r\n name = [ModflowSfr2.ftype()]\r\n units = [unit_number]\r\n extra = ['']\r\n\r\n # set package name\r\n fname = [filenames[0]]\r\n\r\n # Call ancestor's init to set self.parent, extension, name and unit number\r\n Package.__init__(self, model, extension=extension, name=name,\r\n unit_number=units, extra=extra, filenames=fname)\r\n\r\n self.url = 'sfr2.htm'\r\n\r\n # Dataset 0\r\n self.heading = '# {} package for '.format(self.name[0]) + \\\r\n ' {}, '.format(model.version_types[model.version]) + \\\r\n 'generated by Flopy.'\r\n\r\n # Dataset 1a and 1b\r\n self.reachinput = reachinput\r\n self.transroute = transroute\r\n self.tabfiles = tabfiles\r\n self.tabfiles_dict = tabfiles_dict\r\n self.numtab = 0 if not tabfiles else len(tabfiles_dict)\r\n self.maxval = np.max([tb['numval'] for tb in\r\n tabfiles_dict.values()]) if self.numtab > 0 else 0\r\n\r\n if options is None:\r\n if (reachinput, transroute, tabfiles) != (False, False, False):\r\n options = OptionBlock(\"\", ModflowSfr2, block=False)\r\n\r\n self.options = options\r\n\r\n # Dataset 1c.\r\n # number of reaches, negative value is flag for unsat.\r\n # flow beneath streams and/or transient routing\r\n self._nstrm = np.sign(nstrm) * len(\r\n reach_data) if reach_data is not None else nstrm\r\n if segment_data is not None:\r\n # segment_data is a zero-d array\r\n if not isinstance(segment_data, dict):\r\n if len(segment_data.shape) == 0:\r\n segment_data = np.atleast_1d(segment_data)\r\n nss = len(segment_data)\r\n segment_data = {0: segment_data}\r\n nss = len(set(reach_data[\"iseg\"]))\r\n else:\r\n pass\r\n # use atleast_1d for length since segment_data might be a 0D array\r\n # this seems to be OK, because self.segment_data is produced by the constructor (never 0D)\r\n self.nsfrpar = nsfrpar\r\n self.nparseg = nparseg\r\n # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2)\r\n self._const = const if const is not None else None\r\n self.dleak = dleak # tolerance level of stream depth used in computing leakage\r\n\r\n self.ipakcb = ipakcb\r\n # flag; unit number for writing table of SFR output to text file\r\n self.istcb2 = istcb2\r\n\r\n # if nstrm < 0\r\n # defines the format of the input data and whether or not unsaturated flow is simulated\r\n self.isfropt = isfropt\r\n\r\n # if isfropt > 1\r\n # number of trailing wave increments\r\n self.nstrail = nstrail\r\n # max number of vertical cells used to define unsat. zone\r\n self.isuzn = isuzn\r\n # max number trailing waves sets\r\n self.nsfrsets = nsfrsets\r\n\r\n # if nstrm < 0 (MF-2005 only)\r\n # switch for transient streamflow routing (> 0 = kinematic wave)\r\n self.irtflg = irtflg\r\n # if irtflg > 0\r\n # number of subtimesteps used for routing\r\n self.numtim = numtim\r\n # time weighting factor used to calculate the change in channel storage\r\n self.weight = weight\r\n # streamflow tolerance for convergence of the kinematic wave equation\r\n self.flwtol = flwtol\r\n\r\n # Dataset 2.\r\n self.reach_data = self.get_empty_reach_data(np.abs(self._nstrm))\r\n if reach_data is not None:\r\n for n in reach_data.dtype.names:\r\n self.reach_data[n] = reach_data[n]\r\n\r\n # assign node numbers if there are none (structured grid)\r\n if np.diff(\r\n self.reach_data.node).max() == 0 and self.parent.has_package(\r\n 'DIS'):\r\n # first make kij list\r\n lrc = np.array(self.reach_data)[['k', 'i', 'j']].tolist()\r\n self.reach_data['node'] = self.parent.dis.get_node(lrc)\r\n # assign unique ID and outreach columns to each reach\r\n self.reach_data.sort(order=['iseg', 'ireach'])\r\n new_cols = {'reachID': np.arange(1, len(self.reach_data) + 1),\r\n 'outreach': np.zeros(len(self.reach_data))}\r\n for k, v in new_cols.items():\r\n if k not in self.reach_data.dtype.names:\r\n recfunctions.append_fields(self.reach_data, names=k, data=v,\r\n asrecarray=True)\r\n # create a stress_period_data attribute to enable parent functions (e.g. plot)\r\n self.stress_period_data = MfList(self, self.reach_data,\r\n dtype=self.reach_data.dtype)\r\n\r\n # Datasets 4 and 6.\r\n\r\n # list of values that indicate segments outside of the model\r\n # (depending on how SFR package was constructed)\r\n self.not_a_segment_values = [999999]\r\n\r\n self._segments = None\r\n self.segment_data = {0: self.get_empty_segment_data(nss)}\r\n if segment_data is not None:\r\n for i in segment_data.keys():\r\n nseg = len(segment_data[i])\r\n self.segment_data[i] = self.get_empty_segment_data(nseg)\r\n for n in segment_data[i].dtype.names:\r\n # inds = (segment_data[i]['nseg'] -1).astype(int)\r\n self.segment_data[i][n] = segment_data[i][n]\r\n # compute outreaches if nseg and outseg columns have non-default values\r\n if np.diff(self.reach_data.iseg).max() != 0 and \\\r\n np.diff(self.all_segments.nseg).max() != 0 \\\r\n and np.diff(self.all_segments.outseg).max() != 0:\r\n if len(self.all_segments) == 1:\r\n self.segment_data[0]['nseg'] = 1\r\n self.reach_data['iseg'] = 1\r\n\r\n consistent_seg_numbers = len(set(self.reach_data.iseg).difference(\r\n set(self.all_segments.nseg))) == 0\r\n if not consistent_seg_numbers:\r\n warnings.warn(\r\n \"Inconsistent segment numbers of reach_data and segment_data\")\r\n\r\n # first convert any not_a_segment_values to 0\r\n for v in self.not_a_segment_values:\r\n self.segment_data[0].outseg[\r\n self.segment_data[0].outseg == v] = 0\r\n self.set_outreaches()\r\n self.channel_geometry_data = channel_geometry_data\r\n self.channel_flow_data = channel_flow_data\r\n\r\n # Dataset 5\r\n # set by property from segment_data unless specified manually\r\n self._dataset_5 = dataset_5\r\n self.irdflag = irdflag\r\n self.iptflag = iptflag\r\n\r\n # Attributes not included in SFR package input\r\n # dictionary of arrays; see Attributes section of documentation\r\n self.outsegs = {}\r\n # nested dictionary of format {per: {segment: outlet}}\r\n self.outlets = {}\r\n # input format checks:\r\n assert isfropt in [0, 1, 2, 3, 4, 5]\r\n\r\n # derived attributes\r\n self._paths = None\r\n\r\n self.parent.add_package(self)\r\n\r\n def __setattr__(self, key, value):\r\n if key == \"nstrm\":\r\n super(ModflowSfr2, self). \\\r\n __setattr__(\"_nstrm\", value)\r\n elif key == \"dataset_5\":\r\n super(ModflowSfr2, self). \\\r\n __setattr__(\"_dataset_5\", value)\r\n elif key == \"segment_data\":\r\n super(ModflowSfr2, self). \\\r\n __setattr__(\"segment_data\", value)\r\n self._dataset_5 = None\r\n elif key == \"const\":\r\n super(ModflowSfr2, self). \\\r\n __setattr__(\"_const\", value)\r\n else: # return to default behavior of pakbase\r\n super(ModflowSfr2, self).__setattr__(key, value)\r\n\r\n @property\r\n def const(self):\r\n if self._const is None:\r\n const = self.len_const[self.parent.dis.lenuni] * \\\r\n self.time_const[self.parent.dis.itmuni]\r\n else:\r\n const = self._const\r\n return const\r\n\r\n @property\r\n def nss(self):\r\n # number of stream segments\r\n return len(set(self.reach_data[\"iseg\"]))\r\n\r\n @property\r\n def nstrm(self):\r\n return np.sign(self._nstrm) * len(self.reach_data)\r\n\r\n @property\r\n def nper(self):\r\n nper = self.parent.nrow_ncol_nlay_nper[-1]\r\n nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run\r\n return nper\r\n\r\n @property\r\n def dataset_5(self):\r\n \"\"\"\r\n auto-update itmp so it is consistent with segment_data.\r\n \"\"\"\r\n ds5 = self._dataset_5\r\n nss = self.nss\r\n if ds5 is None:\r\n irdflag = self._get_flag('irdflag')\r\n iptflag = self._get_flag('iptflag')\r\n ds5 = {0: [nss, irdflag[0], iptflag[0]]}\r\n for per in range(1, self.nper):\r\n sd = self.segment_data.get(per, None)\r\n if sd is None:\r\n ds5[per] = [-nss, irdflag[per], iptflag[per]]\r\n else:\r\n ds5[per] = [len(sd), irdflag[per], iptflag[per]]\r\n return ds5\r\n\r\n @property\r\n def all_segments(self):\r\n \"\"\"\r\n Method to get a list of all segments in the simulation,\r\n since all segments do not have to be active in a given stress\r\n period\r\n\r\n returns:\r\n -------\r\n ra : (np.recarray)\r\n recarray contains a single entry of segment data for each stream segment\r\n \"\"\"\r\n\r\n ra = self.get_empty_segment_data(self.nss)\r\n i = 0\r\n for _, recarray in sorted(self.segment_data.items()):\r\n if i == self.nss:\r\n break\r\n else:\r\n for rec in recarray:\r\n if rec.nseg in ra.nseg:\r\n pass\r\n else:\r\n ra[i] = rec\r\n i += 1\r\n\r\n if i == self.nss:\r\n break\r\n return ra\r\n\r\n @property\r\n def graph(self):\r\n graph = dict(\r\n zip(self.all_segments.nseg, self.all_segments.outseg))\r\n outlets = set(graph.values()).difference(\r\n set(graph.keys())) # including lakes\r\n graph.update({o: 0 for o in outlets})\r\n return graph\r\n\r\n @property\r\n def paths(self):\r\n if self._paths is None:\r\n self._set_paths()\r\n return self._paths\r\n # check to see if routing in segment data was changed\r\n nseg = np.array(sorted(self._paths.keys()), dtype=int)\r\n nseg = nseg[nseg > 0].copy()\r\n outseg = np.array([self._paths[k][1] for k in nseg])\r\n sd = self.all_segments\r\n if not np.array_equal(nseg, sd.nseg) or not np.array_equal(outseg,\r\n sd.outseg):\r\n self._set_paths()\r\n return self._paths\r\n\r\n @property\r\n def df(self):\r\n if pd:\r\n return pd.DataFrame(self.reach_data)\r\n else:\r\n msg = 'ModflowSfr2.df: pandas not available'\r\n raise ImportError(msg)\r\n\r\n def _set_paths(self):\r\n graph = self.graph\r\n self._paths = {seg: find_path(graph, seg) for seg in graph.keys()}\r\n\r\n def _get_flag(self, flagname):\r\n \"\"\"\r\n populate values for each stress period\r\n \"\"\"\r\n flg = self.__dict__[flagname]\r\n flg = [flg] if np.isscalar(flg) else flg\r\n if len(flg) < self.nper:\r\n return flg + [flg[-1]] * (self.nper - len(flg))\r\n return flg\r\n\r\n @staticmethod\r\n def get_empty_reach_data(nreaches=0, aux_names=None, structured=True,\r\n default_value=0.):\r\n # get an empty recarray that corresponds to dtype\r\n dtype = ModflowSfr2.get_default_reach_dtype(structured=structured)\r\n if aux_names is not None:\r\n dtype = Package.add_to_dtype(dtype, aux_names, np.float32)\r\n d = create_empty_recarray(nreaches, dtype, default_value=default_value)\r\n d['reachID'] = np.arange(1, nreaches + 1)\r\n return d\r\n\r\n @staticmethod\r\n def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.):\r\n # get an empty recarray that corresponds to dtype\r\n dtype = ModflowSfr2.get_default_segment_dtype()\r\n if aux_names is not None:\r\n dtype = Package.add_to_dtype(dtype, aux_names, np.float32)\r\n d = create_empty_recarray(nsegments, dtype,\r\n default_value=default_value)\r\n return d\r\n\r\n @staticmethod\r\n def get_default_reach_dtype(structured=True):\r\n if structured:\r\n # include node column for structured grids (useful for indexing)\r\n return np.dtype([('node', np.int),\r\n ('k', np.int),\r\n ('i', np.int),\r\n ('j', np.int),\r\n ('iseg', np.int),\r\n ('ireach', np.int),\r\n ('rchlen', np.float32),\r\n ('strtop', np.float32),\r\n ('slope', np.float32),\r\n ('strthick', np.float32),\r\n ('strhc1', np.float32),\r\n ('thts', np.float32),\r\n ('thti', np.float32),\r\n ('eps', np.float32),\r\n ('uhc', np.float32),\r\n ('reachID', np.int),\r\n ('outreach', np.int)])\r\n else:\r\n return np.dtype([('node', np.int),\r\n ('iseg', np.int),\r\n ('ireach', np.int),\r\n ('rchlen', np.float32),\r\n ('strtop', np.float32),\r\n ('slope', np.float32),\r\n ('strthick', np.float32),\r\n ('strhc1', np.float32),\r\n ('thts', np.float32),\r\n ('thti', np.float32),\r\n ('eps', np.float32),\r\n ('uhc', np.float32),\r\n ('reachID', np.int),\r\n ('outreach', np.int)])\r\n\r\n @staticmethod\r\n def get_default_segment_dtype():\r\n return np.dtype([('nseg', np.int),\r\n ('icalc', np.int),\r\n ('outseg', np.int),\r\n ('iupseg', np.int),\r\n ('iprior', np.int),\r\n ('nstrpts', np.int),\r\n ('flow', np.float32),\r\n ('runoff', np.float32),\r\n ('etsw', np.float32),\r\n ('pptsw', np.float32),\r\n ('roughch', np.float32),\r\n ('roughbk', np.float32),\r\n ('cdpth', np.float32),\r\n ('fdpth', np.float32),\r\n ('awdth', np.float32),\r\n ('bwdth', np.float32),\r\n ('hcond1', np.float32),\r\n ('thickm1', np.float32),\r\n ('elevup', np.float32),\r\n ('width1', np.float32),\r\n ('depth1', np.float32),\r\n ('thts1', np.float32),\r\n ('thti1', np.float32),\r\n ('eps1', np.float32),\r\n ('uhc1', np.float32),\r\n ('hcond2', np.float32),\r\n ('thickm2', np.float32),\r\n ('elevdn', np.float32),\r\n ('width2', np.float32),\r\n ('depth2', np.float32),\r\n ('thts2', np.float32),\r\n ('thti2', np.float32),\r\n ('eps2', np.float32),\r\n ('uhc2', np.float32)])\r\n\r\n @staticmethod\r\n def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):\r\n\r\n if model.verbose:\r\n sys.stdout.write('loading sfr2 package file...\\n')\r\n\r\n tabfiles = False\r\n tabfiles_dict = {}\r\n transroute = False\r\n reachinput = False\r\n structured = model.structured\r\n if nper is None:\r\n nper = model.nper\r\n nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run\r\n\r\n if not hasattr(f, 'read'):\r\n filename = f\r\n f = open(filename, 'r')\r\n # Item 0 -- header\r\n while True:\r\n line = f.readline()\r\n if line[0] != '#':\r\n break\r\n\r\n options = None\r\n if model.version == \"mfnwt\" and \"options\" in line.lower():\r\n options = OptionBlock.load_options(f, ModflowSfr2)\r\n\r\n else:\r\n query = (\"reachinput\", \"transroute\", \"tabfiles\",\r\n \"lossfactor\", \"strhc1kh\", \"strhc1kv\")\r\n for i in query:\r\n if i in line.lower():\r\n options = OptionBlock(line.lower().strip(),\r\n ModflowSfr2, block=False)\r\n break\r\n\r\n if options is not None:\r\n line = f.readline()\r\n # check for 1b in modflow-2005\r\n if \"tabfile\" in line.lower():\r\n t = line.strip().split()\r\n options.tabfiles = True\r\n options.numtab = int(t[1])\r\n options.maxval = int(t[2])\r\n line = f.readline()\r\n\r\n # set varibles to be passed to class args\r\n transroute = options.transroute\r\n reachinput = options.reachinput\r\n tabfiles = isinstance(options.tabfiles, np.ndarray)\r\n numtab = options.numtab if tabfiles else 0\r\n\r\n # item 1c\r\n nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \\\r\n isfropt, nstrail, isuzn, nsfrsets, \\\r\n irtflg, numtim, weight, flwtol, option = _parse_1c(line,\r\n reachinput=reachinput,\r\n transroute=transroute)\r\n\r\n # item 2\r\n # set column names, dtypes\r\n names = _get_item2_names(nstrm, reachinput, isfropt, structured)\r\n dtypes = [d for d in ModflowSfr2.get_default_reach_dtype().descr\r\n if d[0] in names]\r\n\r\n lines = []\r\n for i in range(abs(nstrm)):\r\n line = f.readline()\r\n line = line_parse(line)\r\n ireach = tuple(map(float, line[:len(dtypes)]))\r\n lines.append(ireach)\r\n\r\n tmp = np.array(lines, dtype=dtypes)\r\n # initialize full reach_data array with all possible columns\r\n reach_data = ModflowSfr2.get_empty_reach_data(len(lines))\r\n for n in names:\r\n reach_data[n] = tmp[\r\n n] # not sure if there's a way to assign multiple columns\r\n\r\n # zero-based convention\r\n inds = ['k', 'i', 'j'] if structured else ['node']\r\n _markitzero(reach_data, inds)\r\n\r\n # items 3 and 4 are skipped (parameters not supported)\r\n # item 5\r\n segment_data = {}\r\n channel_geometry_data = {}\r\n channel_flow_data = {}\r\n dataset_5 = {}\r\n aux_variables = {} # not sure where the auxiliary variables are supposed to go\r\n for i in range(0, nper):\r\n # Dataset 5\r\n dataset_5[i] = _get_dataset(f.readline(), [1, 0, 0, 0])\r\n itmp = dataset_5[i][0]\r\n if itmp > 0:\r\n # Item 6\r\n current = ModflowSfr2.get_empty_segment_data(nsegments=itmp,\r\n aux_names=option)\r\n # container to hold any auxiliary variables\r\n current_aux = {}\r\n # these could also be implemented as structured arrays with a column for segment number\r\n current_6d = {}\r\n current_6e = {}\r\n # print(i,icalc,nstrm,isfropt,reachinput)\r\n for j in range(itmp):\r\n dataset_6a = _parse_6a(f.readline(), option)\r\n current_aux[j] = dataset_6a[-1]\r\n dataset_6a = dataset_6a[:-1] # drop xyz\r\n icalc = dataset_6a[1]\r\n # link dataset 6d, 6e by nseg of dataset_6a\r\n temp_nseg = dataset_6a[0]\r\n dataset_6b = _parse_6bc(f.readline(), icalc, nstrm,\r\n isfropt,\r\n reachinput, per=i)\r\n dataset_6c = _parse_6bc(f.readline(), icalc, nstrm,\r\n isfropt,\r\n reachinput, per=i)\r\n\r\n current[j] = dataset_6a + dataset_6b + dataset_6c\r\n\r\n if icalc == 2:\r\n # ATL: not sure exactly how isfropt logic functions for this\r\n # dataset 6d description suggests that this line isn't read for isfropt > 1\r\n # but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt\r\n if i == 0 or nstrm > 0 and not reachinput: # or isfropt <= 1:\r\n dataset_6d = []\r\n for _ in range(2):\r\n dataset_6d.append(\r\n _get_dataset(f.readline(), [0.0] * 8))\r\n # dataset_6d.append(list(map(float, f.readline().strip().split())))\r\n current_6d[temp_nseg] = dataset_6d\r\n if icalc == 4:\r\n nstrpts = dataset_6a[5]\r\n dataset_6e = []\r\n for _ in range(3):\r\n dataset_6e.append(\r\n _get_dataset(f.readline(), [0.0] * nstrpts))\r\n current_6e[temp_nseg] = dataset_6e\r\n\r\n segment_data[i] = current\r\n aux_variables[j + 1] = current_aux\r\n if len(current_6d) > 0:\r\n channel_geometry_data[i] = current_6d\r\n if len(current_6e) > 0:\r\n channel_flow_data[i] = current_6e\r\n\r\n if tabfiles and i == 0:\r\n for j in range(numtab):\r\n segnum, numval, iunit = map(int,\r\n f.readline().strip().split())\r\n tabfiles_dict[segnum] = {'numval': numval, 'inuit': iunit}\r\n\r\n else:\r\n continue\r\n\r\n # determine specified unit number\r\n unitnumber = None\r\n filenames = [None, None, None]\r\n if ext_unit_dict is not None:\r\n for key, value in ext_unit_dict.items():\r\n if value.filetype == ModflowSfr2.ftype():\r\n unitnumber = key\r\n filenames[0] = os.path.basename(value.filename)\r\n\r\n if ipakcb > 0:\r\n if key == ipakcb:\r\n filenames[1] = os.path.basename(value.filename)\r\n model.add_pop_key_list(key)\r\n\r\n if abs(istcb2) > 0:\r\n if key == abs(istcb2):\r\n filenames[2] = os.path.basename(value.filename)\r\n model.add_pop_key_list(key)\r\n\r\n return ModflowSfr2(model, nstrm=nstrm, nss=nss, nsfrpar=nsfrpar,\r\n nparseg=nparseg, const=const, dleak=dleak,\r\n ipakcb=ipakcb, istcb2=istcb2,\r\n isfropt=isfropt, nstrail=nstrail, isuzn=isuzn,\r\n nsfrsets=nsfrsets, irtflg=irtflg,\r\n numtim=numtim, weight=weight, flwtol=flwtol,\r\n reach_data=reach_data,\r\n segment_data=segment_data,\r\n dataset_5=dataset_5,\r\n channel_geometry_data=channel_geometry_data,\r\n channel_flow_data=channel_flow_data,\r\n reachinput=reachinput, transroute=transroute,\r\n tabfiles=tabfiles, tabfiles_dict=tabfiles_dict,\r\n unit_number=unitnumber, filenames=filenames,\r\n options=options)\r\n\r\n def check(self, f=None, verbose=True, level=1):\r\n \"\"\"\r\n Check sfr2 package data for common errors.\r\n\r\n Parameters\r\n ----------\r\n f : str or file handle\r\n String defining file name or file handle for summary file\r\n of check method output. If a string is passed a file handle\r\n is created. If f is None, check method does not write\r\n results to a summary file. (default is None)\r\n verbose : bool\r\n Boolean flag used to determine if check method results are\r\n written to the screen\r\n level : int\r\n Check method analysis level. If level=0, summary checks are\r\n performed. If level=1, full checks are performed.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> m = flopy.modflow.Modflow.load('model.nam')\r\n >>> m.sfr2.check()\r\n \"\"\"\r\n chk = check(self, verbose=verbose, level=level)\r\n chk.for_nans()\r\n chk.numbering()\r\n chk.routing()\r\n chk.overlapping_conductance()\r\n chk.elevations()\r\n chk.slope()\r\n\r\n if f is not None:\r\n if isinstance(f, str):\r\n pth = os.path.join(self.parent.model_ws, f)\r\n f = open(pth, 'w')\r\n f.write('{}\\n'.format(chk.txt))\r\n # f.close()\r\n return chk\r\n\r\n def assign_layers(self, adjust_botms=False, pad=1.):\r\n \"\"\"\r\n Assigns the appropriate layer for each SFR reach,\r\n based on cell bottoms at location of reach.\r\n\r\n Parameters\r\n ----------\r\n adjust_botms : bool\r\n Streambed bottom elevations below the model bottom\r\n will cause an error in MODFLOW. If True, adjust\r\n bottom elevations in lowest layer of the model\r\n so they are at least pad distance below any co-located\r\n streambed elevations.\r\n pad : scalar\r\n Minimum distance below streambed bottom to set\r\n any conflicting model bottom elevations.\r\n\r\n Notes\r\n -----\r\n Streambed bottom = strtop - strthick\r\n This routine updates the elevations in the botm array\r\n of the flopy.model.ModflowDis instance. To produce a\r\n new DIS package file, model.write() or flopy.model.ModflowDis.write()\r\n must be run.\r\n\r\n \"\"\"\r\n streambotms = self.reach_data.strtop - self.reach_data.strthick\r\n i, j = self.reach_data.i, self.reach_data.j\r\n layers = self.parent.dis.get_layer(i, j, streambotms)\r\n\r\n # check against model bottom\r\n logfile = 'sfr_botm_conflicts.chk'\r\n mbotms = self.parent.dis.botm.array[-1, i, j]\r\n below = streambotms <= mbotms\r\n below_i = self.reach_data.i[below]\r\n below_j = self.reach_data.j[below]\r\n l = []\r\n header = ''\r\n if np.any(below):\r\n print('Warning: SFR streambed elevations below model bottom. '\r\n 'See sfr_botm_conflicts.chk')\r\n if not adjust_botms:\r\n l += [below_i,\r\n below_j,\r\n mbotms[below],\r\n streambotms[below]]\r\n header += 'i,j,model_botm,streambed_botm'\r\n else:\r\n print('Fixing elevation conflicts...')\r\n botm = self.parent.dis.botm.array.copy()\r\n for ib, jb in zip(below_i, below_j):\r\n inds = (self.reach_data.i == ib) & (\r\n self.reach_data.j == jb)\r\n botm[-1, ib, jb] = streambotms[inds].min() - pad\r\n # l.append(botm[-1, ib, jb])\r\n # botm[-1, below_i, below_j] = streambotms[below] - pad\r\n l.append(botm[-1, below_i, below_j])\r\n header += ',new_model_botm'\r\n self.parent.dis.botm = botm\r\n mbotms = self.parent.dis.botm.array[-1, i, j]\r\n assert not np.any(streambotms <= mbotms)\r\n print('New bottom array assigned to Flopy DIS package '\r\n 'instance.\\nRun flopy.model.write() or '\r\n 'flopy.model.ModflowDis.write() to write new DIS file.')\r\n header += '\\n'\r\n\r\n with open(logfile, 'w') as log:\r\n log.write(header)\r\n a = np.array(l).transpose()\r\n for line in a:\r\n log.write(','.join(map(str, line)) + '\\n')\r\n self.reach_data['k'] = layers\r\n\r\n def deactivate_ibound_above(self):\r\n \"\"\"\r\n Sets ibound to 0 for all cells above active SFR cells.\r\n\r\n Parameters\r\n ----------\r\n none\r\n\r\n Notes\r\n -----\r\n This routine updates the ibound array of the flopy.model.ModflowBas6\r\n instance. To produce a new BAS6 package file, model.write() or\r\n flopy.model.ModflowBas6.write() must be run.\r\n\r\n \"\"\"\r\n ib = self.parent.bas6.ibound.array\r\n deact_lays = [list(range(i)) for i in self.reach_data.k]\r\n for ks, i, j in zip(deact_lays, self.reach_data.i, self.reach_data.j):\r\n for k in ks:\r\n ib[k, i, j] = 0\r\n self.parent.bas6.ibound = ib\r\n\r\n def get_outlets(self, level=0, verbose=True):\r\n \"\"\"\r\n Traces all routing connections from each headwater to the outlet.\r\n \"\"\"\r\n txt = ''\r\n for per in range(self.nper):\r\n if per > 0 > self.dataset_5[per][\r\n 0]: # skip stress periods where seg data not defined\r\n continue\r\n # segments = self.segment_data[per].nseg\r\n # outsegs = self.segment_data[per].outseg\r\n #\r\n # all_outsegs = np.vstack([segments, outsegs])\r\n # max_outseg = all_outsegs[-1].max()\r\n # knt = 1\r\n # while max_outseg > 0:\r\n #\r\n # nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0\r\n # for s in all_outsegs[-1]])\r\n #\r\n # all_outsegs = np.vstack([all_outsegs, nextlevel])\r\n # max_outseg = nextlevel.max()\r\n # if max_outseg == 0:\r\n # break\r\n # knt += 1\r\n # if knt > self.nss:\r\n # # subset outsegs map to only include rows with outseg number > 0 in last column\r\n # circular_segs = all_outsegs.T[all_outsegs[-1] > 0]\r\n #\r\n # # only retain one instance of each outseg number at iteration=nss\r\n # vals = [] # append outseg values to vals after they've appeared once\r\n # mask = [(True, vals.append(v))[0]\r\n # if v not in vals\r\n # else False for v in circular_segs[-1]]\r\n # circular_segs = circular_segs[:, np.array(mask)]\r\n #\r\n # # cull the circular segments array to remove duplicate instances of routing circles\r\n # circles = []\r\n # duplicates = []\r\n # for i in range(np.shape(circular_segs)[0]):\r\n # # find where values in the row equal the last value;\r\n # # record the index of the second to last instance of last value\r\n # repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0]\r\n # # use that index to slice out the repeated segment sequence\r\n # circular_seq = circular_segs[i, repeat_start_ind:].tolist()\r\n # # keep track of unique sequences of repeated segments\r\n # if set(circular_seq) not in circles:\r\n # circles.append(set(circular_seq))\r\n # duplicates.append(False)\r\n # else:\r\n # duplicates.append(True)\r\n # circular_segs = circular_segs[~np.array(duplicates), :]\r\n #\r\n # txt += '{0} instances where an outlet was not found after {1} consecutive segments!\\n' \\\r\n # .format(len(circular_segs), self.nss)\r\n # if level == 1:\r\n # txt += '\\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\\n'\r\n # else:\r\n # f = 'circular_routing.csv'\r\n # np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt)\r\n # txt += 'See {} for details.'.format(f)\r\n # if verbose:\r\n # print(txt)\r\n # break\r\n # # the array of segment sequence is useful for other other operations,\r\n # # such as plotting elevation profiles\r\n # self.outsegs[per] = all_outsegs\r\n #\r\n # use graph instead of above loop\r\n nrow = len(self.segment_data[per].nseg)\r\n ncol = np.max(\r\n [len(v) if v is not None else 0 for v in self.paths.values()])\r\n all_outsegs = np.zeros((nrow, ncol), dtype=int)\r\n for i, (k, v) in enumerate(self.paths.items()):\r\n if k > 0:\r\n all_outsegs[i, :len(v)] = v\r\n all_outsegs.sort(axis=0)\r\n self.outsegs[per] = all_outsegs\r\n # create a dictionary listing outlets associated with each segment\r\n # outlet is the last value in each row of outseg array that is != 0 or 999999\r\n # self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1]\r\n # if len(r[(r != 0) & (r != 999999)]) > 0\r\n # else i + 1\r\n # for i, r in enumerate(all_outsegs.T)}\r\n self.outlets[per] = {k: self.paths[k][-1] if k in self.paths\r\n else k for k in self.segment_data[per].nseg}\r\n return txt\r\n\r\n def reset_reaches(self):\r\n self.reach_data.sort(order=['iseg', 'ireach'])\r\n reach_data = self.reach_data\r\n segment_data = list(set(self.reach_data.iseg))# self.segment_data[0]\r\n # ireach = []\r\n # for iseg in segment_data.nseg:\r\n # nreaches = np.sum(reach_data.iseg == iseg)\r\n # ireach += list(range(1, nreaches + 1))\r\n reach_counts = np.bincount(reach_data.iseg)[1:]\r\n reach_counts = dict(zip(range(1, len(reach_counts) + 1),\r\n reach_counts))\r\n ireach = [list(range(1, reach_counts[s] + 1))\r\n for s in segment_data]\r\n ireach = np.concatenate(ireach)\r\n self.reach_data['ireach'] = ireach\r\n\r\n def set_outreaches(self):\r\n \"\"\"\r\n Determine the outreach for each SFR reach (requires a reachID\r\n column in reach_data). Uses the segment routing specified for the\r\n first stress period to route reaches between segments.\r\n \"\"\"\r\n self.reach_data.sort(order=['iseg', 'ireach'])\r\n # ensure that each segment starts with reach 1\r\n self.reset_reaches()\r\n # ensure that all outsegs are segments, outlets, or negative (lakes)\r\n self.repair_outsegs()\r\n rd = self.reach_data\r\n outseg = self.graph\r\n reach1IDs = dict(zip(rd[rd.ireach == 1].iseg,\r\n rd[rd.ireach == 1].reachID))\r\n outreach = []\r\n for i in range(len(rd)):\r\n # if at the end of reach data or current segment\r\n if i + 1 == len(rd) or rd.ireach[i + 1] == 1:\r\n nextseg = outseg[rd.iseg[i]] # get next segment\r\n if nextseg > 0: # current reach is not an outlet\r\n nextrchid = reach1IDs[\r\n nextseg] # get reach 1 of next segment\r\n else:\r\n nextrchid = 0\r\n else: # otherwise, it's the next reachID\r\n nextrchid = rd.reachID[i + 1]\r\n outreach.append(nextrchid)\r\n self.reach_data['outreach'] = outreach\r\n\r\n def get_slopes(self, default_slope=0.001, minimum_slope=0.0001,\r\n maximum_slope=1.):\r\n \"\"\"\r\n Compute slopes by reach using values in strtop (streambed top)\r\n and rchlen (reach length) columns of reach_data. The slope for a\r\n reach n is computed as strtop(n+1) - strtop(n) / rchlen(n).\r\n Slopes for outlet reaches are set equal to a default value\r\n (default_slope). Populates the slope column in reach_data.\r\n\r\n Parameters\r\n ----------\r\n default_slope : float\r\n Slope value applied to outlet reaches\r\n (where water leaves the model). Default value is 0.001\r\n minimum_slope : float\r\n Assigned to reaches with computed slopes less than this value.\r\n This ensures that the Manning's equation won't produce unreasonable\r\n values of stage (in other words, that stage is consistent with\r\n assumption that streamflow is primarily drive by the streambed\r\n gradient). Default value is 0.0001.\r\n maximum_slope : float\r\n Assigned to reaches with computed slopes more than this value.\r\n Default value is 1.\r\n\r\n \"\"\"\r\n # compute outreaches if they aren't there already\r\n if np.diff(self.reach_data.outreach).max() == 0:\r\n self.set_outreaches()\r\n rd = self.reach_data\r\n elev = dict(zip(rd.reachID, rd.strtop))\r\n dist = dict(zip(rd.reachID, rd.rchlen))\r\n dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0\r\n else -9999 for i, rid in enumerate(rd.reachID)}\r\n slopes = np.array(\r\n [(elev[i] - dnelev[i]) / dist[i] if dnelev[i] != -9999\r\n else default_slope for i in rd.reachID])\r\n slopes[slopes < minimum_slope] = minimum_slope\r\n slopes[slopes > maximum_slope] = maximum_slope\r\n self.reach_data['slope'] = slopes\r\n\r\n def get_upsegs(self):\r\n \"\"\"\r\n From segment_data, returns nested dict of all upstream segments by\r\n segment, by stress period.\r\n\r\n Returns\r\n -------\r\n all_upsegs : dict\r\n Nested dictionary of form\r\n {stress period: {segment: [list of upsegs]}}\r\n\r\n Notes\r\n -----\r\n This method will not work if there are instances of circular routing.\r\n\r\n \"\"\"\r\n all_upsegs = {}\r\n for per in range(self.nper):\r\n if per > 0 > self.dataset_5[per][\r\n 0]: # skip stress periods where seg data not defined\r\n continue\r\n segment_data = self.segment_data[per]\r\n\r\n # make a list of adjacent upsegments keyed to outseg list in Mat2\r\n upsegs = {o: segment_data.nseg[segment_data.outseg == o].tolist()\r\n for o in np.unique(segment_data.outseg)}\r\n\r\n outsegs = [k for k in list(upsegs.keys()) if\r\n k > 0] # exclude 0, which is the outlet designator\r\n\r\n # for each outseg key, for each upseg, check for more upsegs,\r\n # append until headwaters has been reached\r\n for outseg in outsegs:\r\n\r\n up = True\r\n upsegslist = upsegs[outseg]\r\n while up:\r\n added_upsegs = []\r\n for us in upsegslist:\r\n if us in outsegs:\r\n added_upsegs += upsegs[us]\r\n if len(added_upsegs) == 0:\r\n up = False\r\n break\r\n else:\r\n upsegslist = added_upsegs\r\n upsegs[outseg] += added_upsegs\r\n\r\n # the above algorithm is recursive, so lower order streams\r\n # get duplicated many times use a set to get unique upsegs\r\n all_upsegs[per] = {u: list(set(upsegs[u])) for u in outsegs}\r\n return all_upsegs\r\n\r\n def get_variable_by_stress_period(self, varname):\r\n\r\n dtype = []\r\n all_data = np.zeros((self.nss, self.nper), dtype=float)\r\n for per in range(self.nper):\r\n inds = self.segment_data[per].nseg - 1\r\n all_data[inds, per] = self.segment_data[per][varname]\r\n dtype.append(('{}{}'.format(varname, per), float))\r\n isvar = all_data.sum(axis=1) != 0\r\n ra = np.core.records.fromarrays(all_data[isvar].transpose().copy(),\r\n dtype=dtype)\r\n segs = self.segment_data[0].nseg[isvar]\r\n isseg = np.array(\r\n [True if s in segs else False for s in self.reach_data.iseg])\r\n isinlet = isseg & (self.reach_data.ireach == 1)\r\n rd = np.array(self.reach_data[isinlet])[\r\n ['k', 'i', 'j', 'iseg', 'ireach']]\r\n ra = recfunctions.merge_arrays([rd, ra], flatten=True, usemask=False)\r\n return ra.view(np.recarray)\r\n\r\n def repair_outsegs(self):\r\n isasegment = np.in1d(self.segment_data[0].outseg,\r\n self.segment_data[0].nseg)\r\n isasegment = isasegment | (self.segment_data[0].outseg < 0)\r\n self.segment_data[0]['outseg'][~isasegment] = 0.\r\n\r\n def renumber_segments(self):\r\n \"\"\"\r\n Renumber segments so that segment numbering is continuous and always\r\n increases in the downstream direction. This may speed convergence of\r\n the NWT solver in some situations.\r\n\r\n Returns\r\n -------\r\n r : dictionary mapping old segment numbers to new\r\n \"\"\"\r\n\r\n segments = self.all_segments\r\n segments.sort(order=\"nseg\")\r\n # get renumbering info from per=0\r\n nseg = segments.nseg\r\n outseg = segments.outseg\r\n\r\n # explicitly fix any gaps in the numbering\r\n # (i.e. from removing segments)\r\n nseg2 = np.arange(1, len(nseg) + 1)\r\n # intermediate mapping that\r\n r1 = dict(zip(nseg, nseg2))\r\n r1[0] = 0\r\n outseg2 = np.array([r1[s] for s in outseg])\r\n\r\n # function re-assigning upseg numbers consecutively at one level\r\n # relative to outlet(s). Counts down from the number of segments\r\n def reassign_upsegs(r, nexts, upsegs):\r\n nextupsegs = []\r\n for u in upsegs:\r\n r[u] = nexts if u > 0 else u # handle lakes\r\n nexts -= 1\r\n nextupsegs += list(nseg2[outseg2 == u])\r\n return r, nexts, nextupsegs\r\n\r\n ns = len(nseg)\r\n\r\n # start at outlets with nss;\r\n # renumber upsegs consecutively at each level\r\n # until all headwaters have been reached\r\n nexts = ns\r\n r2 = {0: 0}\r\n nextupsegs = nseg2[outseg2 == 0]\r\n for _ in range(ns):\r\n r2, nexts, nextupsegs = reassign_upsegs(r2, nexts, nextupsegs)\r\n if len(nextupsegs) == 0:\r\n break\r\n # map original segment numbers to new numbers\r\n r = {k: r2.get(v, v) for k, v in r1.items()}\r\n\r\n # renumber segments in all stress period data\r\n for per in self.segment_data.keys():\r\n self.segment_data[per]['nseg'] = [r.get(s, s) for s in\r\n self.segment_data[per].nseg]\r\n self.segment_data[per]['outseg'] = [r.get(s, s) for s in\r\n self.segment_data[per].outseg]\r\n self.segment_data[per].sort(order='nseg')\r\n nseg = self.segment_data[per].nseg\r\n outseg = self.segment_data[per].outseg\r\n inds = (outseg > 0) & (nseg > outseg)\r\n assert not np.any(inds)\r\n assert len(self.segment_data[per]['nseg']) == \\\r\n self.segment_data[per]['nseg'].max()\r\n\r\n # renumber segments in reach_data\r\n self.reach_data['iseg'] = [r.get(s, s) for s in self.reach_data.iseg]\r\n self.reach_data.sort(order=['iseg', 'ireach'])\r\n self.reach_data['reachID'] = np.arange(1, len(self.reach_data) + 1)\r\n self.set_outreaches() # reset the outreaches to ensure continuity\r\n\r\n # renumber segments in other datasets\r\n def renumber_channel_data(d):\r\n if d is not None:\r\n d2 = {}\r\n for k, v in d.items():\r\n d2[k] = {}\r\n for s, vv in v.items():\r\n d2[k][r[s]] = vv\r\n else:\r\n d2 = None\r\n return d2\r\n\r\n self.channel_geometry_data = renumber_channel_data(\r\n self.channel_geometry_data)\r\n self.channel_flow_data = renumber_channel_data(self.channel_flow_data)\r\n return r\r\n\r\n def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True):\r\n \"\"\"\r\n Plot a profile of streambed elevation and model top\r\n along a path of segments.\r\n\r\n Parameters\r\n ----------\r\n start_seg : int\r\n Number of first segment in path.\r\n end_seg : int\r\n Number of last segment in path (defaults to 0/outlet).\r\n plot_segment_lines : bool\r\n Controls plotting of segment end locations along profile.\r\n (default True)\r\n\r\n Returns\r\n -------\r\n ax : matplotlib.axes._subplots.AxesSubplot object\r\n \"\"\"\r\n import matplotlib.pyplot as plt\r\n if not pd:\r\n msg = 'ModflowSfr2.plot_path: pandas not available'\r\n raise ImportError(msg)\r\n\r\n df = self.df\r\n m = self.parent\r\n mfunits = m.sr.model_length_units\r\n\r\n to_miles = {'feet': 1 / 5280., 'meters': 1 / (.3048 * 5280.)}\r\n\r\n # slice the path\r\n path = np.array(self.paths[start_seg])\r\n endidx = np.where(path == end_seg)[0]\r\n endidx = endidx if len(endidx) > 0 else None\r\n path = path[:np.squeeze(endidx)]\r\n path = [s for s in path if s > 0] # skip lakes for now\r\n\r\n # get the values\r\n groups = df.groupby('iseg')\r\n tmp = pd.concat([groups.get_group(s) for s in path])\r\n tops = m.dis.top.array[tmp.i, tmp.j]\r\n dist = np.cumsum(tmp.rchlen.values) * to_miles.get(mfunits, 1.)\r\n\r\n # segment starts\r\n starts = dist[np.where(tmp.ireach.values == 1)[0]]\r\n\r\n ax = plt.subplots(figsize=(11, 8.5))[-1]\r\n ax.plot(dist, tops, label='Model top')\r\n ax.plot(dist, tmp.strtop, label='Streambed top')\r\n ax.set_xlabel('Distance along path, in miles')\r\n ax.set_ylabel('Elevation, in {}'.format(mfunits))\r\n ymin, ymax = ax.get_ylim()\r\n plt.autoscale(False)\r\n\r\n if plot_segment_lines: # plot segment ends as vertical lines\r\n ax.vlines(x=starts, ymin=ymin, ymax=ymax, lw=.1, alpha=.1,\r\n label='Gray lines indicate\\nsegment ends.')\r\n ax.legend()\r\n\r\n # plot selected segment numbers along path\r\n stride = np.floor(len(dist) / 10)\r\n stride = 1 if stride < 1 else stride\r\n inds = np.arange(0, len(dist), stride, dtype=int)\r\n plot_segnumbers = tmp.iseg.values[inds]\r\n xlocs = dist[inds]\r\n pad = 0.04 * (ymax - ymin)\r\n for x, sn in zip(xlocs, plot_segnumbers):\r\n ax.text(x, ymin + pad, '{}'.format(sn), va='top')\r\n ax.text(xlocs[0], ymin + pad * 1.2, 'Segment numbers:', va='bottom',\r\n fontweight='bold')\r\n ax.text(dist[-1], ymin + pad, '{}'.format(end_seg), ha='center',\r\n va='top')\r\n return ax\r\n\r\n def _get_headwaters(self, per=0):\r\n \"\"\"\r\n List all segments that are not outsegs (that do not have any\r\n segments upstream).\r\n\r\n Parameters\r\n ----------\r\n per : int\r\n Stress period for which to list headwater segments (default 0)\r\n\r\n Returns\r\n -------\r\n headwaters : np.ndarray (1-D)\r\n One dimensional array listing all headwater segments.\r\n \"\"\"\r\n upsegs = [self.segment_data[per].nseg[\r\n self.segment_data[per].outseg == s].tolist()\r\n for s in self.segment_data[0].nseg]\r\n return self.segment_data[per].nseg[\r\n np.array([i for i, u in enumerate(upsegs) if len(u) == 0])]\r\n\r\n def _interpolate_to_reaches(self, segvar1, segvar2, per=0):\r\n \"\"\"\r\n Interpolate values in datasets 6b and 6c to each reach in\r\n stream segment\r\n\r\n Parameters\r\n ----------\r\n segvar1 : str\r\n Column/variable name in segment_data array for representing start\r\n of segment (e.g. hcond1 for hydraulic conductivity)\r\n For segments with icalc=2 (specified channel geometry); if width1\r\n is given, the eighth distance point (XCPT8) from dataset 6d will\r\n be used as the stream width.\r\n For icalc=3, an arbitrary width of 5 is assigned.\r\n For icalc=4, the mean value for width given in item 6e is used.\r\n segvar2 : str\r\n Column/variable name in segment_data array for representing start\r\n of segment (e.g. hcond2 for hydraulic conductivity)\r\n per : int\r\n Stress period with segment data to interpolate\r\n\r\n Returns\r\n -------\r\n reach_values : 1D array\r\n One dimensional array of interpolated values of same length as\r\n reach_data array. For example, hcond1 and hcond2 could be entered\r\n as inputs to get values for the strhc1 (hydraulic conductivity)\r\n column in reach_data.\r\n\r\n \"\"\"\r\n reach_data = self.reach_data\r\n segment_data = self.segment_data[per]\r\n segment_data.sort(order='nseg')\r\n reach_data.sort(order=['iseg', 'ireach'])\r\n reach_values = []\r\n for seg in segment_data.nseg:\r\n reaches = reach_data[reach_data.iseg == seg]\r\n dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen\r\n icalc = segment_data.icalc[segment_data.nseg == seg]\r\n # get width from channel cross section length\r\n if 'width' in segvar1 and icalc == 2:\r\n channel_geometry_data = self.channel_geometry_data[per]\r\n reach_values += list(\r\n np.ones(len(reaches)) * channel_geometry_data[seg][0][-1])\r\n # assign arbitrary width since width is based on flow\r\n elif 'width' in segvar1 and icalc == 3:\r\n reach_values += list(np.ones(len(reaches)) * 5)\r\n # assume width to be mean from streamflow width/flow table\r\n elif 'width' in segvar1 and icalc == 4:\r\n channel_flow_data = self.channel_flow_data[per]\r\n reach_values += list(\r\n np.ones(len(reaches)) * np.mean(channel_flow_data[seg][2]))\r\n else:\r\n fp = [segment_data[segment_data['nseg'] == seg][segvar1][0],\r\n segment_data[segment_data['nseg'] == seg][segvar2][0]]\r\n xp = [dist[0], dist[-1]]\r\n reach_values += np.interp(dist, xp, fp).tolist()\r\n return np.array(reach_values)\r\n\r\n def _write_1c(self, f_sfr):\r\n\r\n # NSTRM NSS NSFRPAR NPARSEG CONST DLEAK ipakcb ISTCB2\r\n # [ISFROPT] [NSTRAIL] [ISUZN] [NSFRSETS] [IRTFLG] [NUMTIM] [WEIGHT] [FLWTOL]\r\n f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.8f} {:.8f} {:.0f} {:.0f} '\r\n .format(self.nstrm, self.nss, self.nsfrpar, self.nparseg,\r\n self.const, self.dleak, self.ipakcb, self.istcb2))\r\n if self.reachinput:\r\n self.nstrm = abs(\r\n self.nstrm) # see explanation for dataset 1c in online guide\r\n f_sfr.write('{:.0f} '.format(self.isfropt))\r\n if self.isfropt > 1:\r\n f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail,\r\n self.isuzn,\r\n self.nsfrsets))\r\n if self.nstrm < 0:\r\n f_sfr.write('{:.0f} '.format(self.isfropt))\r\n if self.isfropt > 1:\r\n f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail,\r\n self.isuzn,\r\n self.nsfrsets))\r\n if self.nstrm < 0 or self.transroute:\r\n f_sfr.write('{:.0f} '.format(self.irtflg))\r\n if self.irtflg > 0:\r\n f_sfr.write('{:.0f} {:.8f} {:.8f} '.format(self.numtim,\r\n self.weight,\r\n self.flwtol))\r\n f_sfr.write('\\n')\r\n\r\n def _write_reach_data(self, f_sfr):\r\n\r\n # Write the recarray (data) to the file (or file handle) f\r\n assert isinstance(self.reach_data,\r\n np.recarray), \"MfList.__tofile() data arg \" + \\\r\n \"not a recarray\"\r\n\r\n # decide which columns to write\r\n # columns = self._get_item2_names()\r\n columns = _get_item2_names(self.nstrm, self.reachinput, self.isfropt,\r\n structured=self.parent.structured)\r\n\r\n # Add one to the kij indices\r\n # names = self.reach_data.dtype.names\r\n # lnames = []\r\n # [lnames.append(name.lower()) for name in names]\r\n # --make copy of data for multiple calls\r\n d = np.array(self.reach_data)\r\n for idx in ['k', 'i', 'j', 'node']:\r\n if (idx in columns):\r\n d[idx] += 1\r\n d = d[columns]\r\n formats = _fmt_string(d)[:-1] + '\\n'\r\n for rec in d:\r\n f_sfr.write(formats.format(*rec))\r\n\r\n def _write_segment_data(self, i, j, f_sfr):\r\n cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts',\r\n 'flow', 'runoff',\r\n 'etsw', 'pptsw', 'roughch', 'roughbk', 'cdpth', 'fdpth',\r\n 'awdth', 'bwdth']\r\n seg_dat = np.array(self.segment_data[i])[cols][j]\r\n fmts = _fmt_string_list(seg_dat)\r\n\r\n nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \\\r\n pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth = \\\r\n [0 if v == self.default_value else v for v in seg_dat]\r\n\r\n f_sfr.write(\r\n ' '.join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + ' ')\r\n\r\n if iupseg > 0:\r\n f_sfr.write(fmts[4].format(iprior) + ' ')\r\n if icalc == 4:\r\n f_sfr.write(fmts[5].format(nstrpts) + ' ')\r\n\r\n f_sfr.write(\r\n ' '.join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + ' ')\r\n\r\n if icalc in [1, 2]:\r\n f_sfr.write(fmts[10].format(roughch) + ' ')\r\n if icalc == 2:\r\n f_sfr.write(fmts[11].format(roughbk) + ' ')\r\n\r\n if icalc == 3:\r\n f_sfr.write(\r\n ' '.join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + ' ')\r\n f_sfr.write('\\n')\r\n\r\n self._write_6bc(i, j, f_sfr,\r\n cols=['hcond1', 'thickm1', 'elevup', 'width1',\r\n 'depth1', 'thts1', 'thti1',\r\n 'eps1', 'uhc1'])\r\n self._write_6bc(i, j, f_sfr,\r\n cols=['hcond2', 'thickm2', 'elevdn', 'width2',\r\n 'depth2', 'thts2', 'thti2',\r\n 'eps2', 'uhc2'])\r\n\r\n def _write_6bc(self, i, j, f_sfr, cols=()):\r\n cols = list(cols)\r\n icalc = self.segment_data[i][j][1]\r\n seg_dat = np.array(self.segment_data[i])[cols][j]\r\n fmts = _fmt_string_list(seg_dat)\r\n hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = \\\r\n [0 if v == self.default_value else v for v in seg_dat]\r\n\r\n if self.isfropt in [0, 4, 5] and icalc <= 0:\r\n f_sfr.write(\r\n ' '.join(fmts[0:5]).format(hcond, thickm, elevupdn, width,\r\n depth) + ' ')\r\n\r\n elif self.isfropt in [0, 4, 5] and icalc == 1:\r\n f_sfr.write(fmts[0].format(hcond) + ' ')\r\n\r\n if i == 0:\r\n f_sfr.write(\r\n ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ')\r\n if self.isfropt in [4, 5]:\r\n f_sfr.write(\r\n ' '.join(fmts[5:8]).format(thts, thti, eps) + ' ')\r\n\r\n if self.isfropt == 5:\r\n f_sfr.write(fmts[8].format(uhc) + ' ')\r\n\r\n elif i > 0 and self.isfropt == 0:\r\n f_sfr.write(\r\n ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ')\r\n\r\n elif self.isfropt in [0, 4, 5] and icalc >= 2:\r\n f_sfr.write(fmts[0].format(hcond) + ' ')\r\n\r\n if self.isfropt in [4, 5] and i > 0 and icalc == 2:\r\n pass\r\n else:\r\n f_sfr.write(' '.join(fmts[1:3]).format(thickm, elevupdn) + ' ')\r\n\r\n if self.isfropt in [4, 5] and icalc == 2 and i == 0:\r\n f_sfr.write(\r\n ' '.join(fmts[3:6]).format(thts, thti, eps) + ' ')\r\n\r\n if self.isfropt == 5:\r\n f_sfr.write(fmts[8].format(uhc) + ' ')\r\n else:\r\n pass\r\n elif self.isfropt == 1 and icalc <= 1:\r\n f_sfr.write(fmts[3].format(width) + ' ')\r\n if icalc <= 0:\r\n f_sfr.write(fmts[4].format(depth) + ' ')\r\n elif self.isfropt in [2, 3] and icalc <= 1:\r\n if i > 0:\r\n pass\r\n else:\r\n f_sfr.write(fmts[3].format(width) + ' ')\r\n if icalc <= 0:\r\n f_sfr.write(fmts[4].format(depth) + ' ')\r\n else:\r\n pass\r\n f_sfr.write('\\n')\r\n\r\n def write_file(self, filename=None):\r\n \"\"\"\r\n Write the package file.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n\r\n # tabfiles = False\r\n # tabfiles_dict = {}\r\n # transroute = False\r\n # reachinput = False\r\n if filename is not None:\r\n self.fn_path = filename\r\n\r\n f_sfr = open(self.fn_path, 'w')\r\n\r\n # Item 0 -- header\r\n f_sfr.write('{0}\\n'.format(self.heading))\r\n\r\n # Item 1\r\n if isinstance(self.options,\r\n OptionBlock) and self.parent.version == \"mfnwt\":\r\n self.options.update_from_package(self)\r\n self.options.write_options(f_sfr)\r\n elif isinstance(self.options, OptionBlock):\r\n self.options.update_from_package(self)\r\n self.options.block = False\r\n self.options.write_options(f_sfr)\r\n else:\r\n pass\r\n\r\n self._write_1c(f_sfr)\r\n\r\n # item 2\r\n self._write_reach_data(f_sfr)\r\n\r\n # items 3 and 4 are skipped (parameters not supported)\r\n\r\n for i in range(0, self.nper):\r\n\r\n # item 5\r\n itmp = self.dataset_5[i][0]\r\n f_sfr.write(' '.join(map(str, self.dataset_5[i])) + '\\n')\r\n if itmp > 0:\r\n\r\n # Item 6\r\n for j in range(itmp):\r\n\r\n # write datasets 6a, 6b and 6c\r\n self._write_segment_data(i, j, f_sfr)\r\n\r\n icalc = self.segment_data[i].icalc[j]\r\n nseg = self.segment_data[i].nseg[j]\r\n if icalc == 2:\r\n # or isfropt <= 1:\r\n if i == 0 or self.nstrm > 0 and not self.reachinput:\r\n for k in range(2):\r\n for d in self.channel_geometry_data[i][nseg][\r\n k]:\r\n f_sfr.write('{:.2f} '.format(d))\r\n f_sfr.write('\\n')\r\n\r\n if icalc == 4:\r\n # nstrpts = self.segment_data[i][j][5]\r\n for k in range(3):\r\n for d in self.channel_flow_data[i][nseg][k]:\r\n f_sfr.write('{:.2f} '.format(d))\r\n f_sfr.write('\\n')\r\n if self.tabfiles and i == 0:\r\n for j in sorted(self.tabfiles_dict.keys()):\r\n f_sfr.write('{:.0f} {:.0f} {:.0f}\\n'.format(j,\r\n self.tabfiles_dict[\r\n j][\r\n 'numval'],\r\n self.tabfiles_dict[\r\n j][\r\n 'inuit']))\r\n else:\r\n continue\r\n f_sfr.close()\r\n\r\n def export(self, f, **kwargs):\r\n if isinstance(f, str) and f.lower().endswith(\".shp\"):\r\n from flopy.utils.geometry import Polygon\r\n from flopy.export.shapefile_utils import recarray2shp\r\n verts = self.parent.sr.get_vertices(self.reach_data.i,\r\n self.reach_data.j)\r\n geoms = [Polygon(v) for v in verts]\r\n recarray2shp(self.reach_data, geoms, shpname=f, **kwargs)\r\n else:\r\n from flopy import export\r\n return export.utils.package_export(f, self, **kwargs)\r\n\r\n def export_linkages(self, f, **kwargs):\r\n \"\"\"\r\n Export linework shapefile showing all routing connections between\r\n SFR reaches. A length field containing the distance between connected\r\n reaches can be used to filter for the longest connections in a GIS.\r\n\r\n \"\"\"\r\n from flopy.utils.geometry import LineString\r\n from flopy.export.shapefile_utils import recarray2shp\r\n rd = self.reach_data.copy()\r\n m = self.parent\r\n rd.sort(order=['reachID'])\r\n\r\n # get the cell centers for each reach\r\n mg = m.modelgrid\r\n x0 = mg.xcellcenters[rd.i, rd.j]\r\n y0 = mg.ycellcenters[rd.i, rd.j]\r\n loc = dict(zip(rd.reachID, zip(x0, y0)))\r\n\r\n # make lines of the reach connections between cell centers\r\n geoms = []\r\n lengths = []\r\n for r in rd.reachID:\r\n x0, y0 = loc[r]\r\n outreach = rd.outreach[r - 1]\r\n if outreach == 0:\r\n x1, y1 = x0, y0\r\n else:\r\n x1, y1 = loc[outreach]\r\n geoms.append(LineString([(x0, y0), (x1, y1)]))\r\n lengths.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2))\r\n lengths = np.array(lengths)\r\n\r\n # append connection lengths for filtering in GIS\r\n rd = recfunctions.append_fields(rd,\r\n names=['length'],\r\n data=[lengths],\r\n usemask=False,\r\n asrecarray=True)\r\n recarray2shp(rd, geoms, f, **kwargs)\r\n\r\n def export_outlets(self, f, **kwargs):\r\n \"\"\"\r\n Export point shapefile showing locations where streamflow is leaving\r\n the model (outset=0).\r\n\r\n \"\"\"\r\n from flopy.utils.geometry import Point\r\n from flopy.export.shapefile_utils import recarray2shp\r\n rd = self.reach_data\r\n if np.min(rd.outreach) == np.max(rd.outreach):\r\n self.set_outreaches()\r\n rd = self.reach_data[self.reach_data.outreach == 0].copy()\r\n m = self.parent\r\n rd.sort(order=['iseg', 'ireach'])\r\n\r\n # get the cell centers for each reach\r\n mg = m.modelgrid\r\n x0 = mg.xcellcenters[rd.i, rd.j]\r\n y0 = mg.ycellcenters[rd.i, rd.j]\r\n geoms = [Point(x, y) for x, y in zip(x0, y0)]\r\n recarray2shp(rd, geoms, f, **kwargs)\r\n\r\n def export_transient_variable(self, f, varname, **kwargs):\r\n \"\"\"\r\n Export point shapefile showing locations with a given segment_data\r\n variable applied. For example, segments where streamflow is entering\r\n or leaving the upstream end of a stream segment (FLOW) or where RUNOFF\r\n is applied. Cell centroids of the first reach of segments with non-zero\r\n terms of varname are exported; values of varname are exported by stress\r\n period in the attribute fields (e.g. flow0, flow1, flow2... for FLOW\r\n in stress periods 0, 1, 2...\r\n\r\n Parameters\r\n ----------\r\n f : str, filename\r\n varname : str\r\n Variable in SFR Package dataset 6a (see SFR package documentation)\r\n\r\n \"\"\"\r\n from flopy.utils.geometry import Point\r\n from flopy.export.shapefile_utils import recarray2shp\r\n\r\n rd = self.reach_data\r\n if np.min(rd.outreach) == np.max(rd.outreach):\r\n self.set_outreaches()\r\n ra = self.get_variable_by_stress_period(varname.lower())\r\n\r\n # get the cell centers for each reach\r\n m = self.parent\r\n mg = m.modelgrid\r\n x0 = mg.xcellcenters[ra.i, ra.j]\r\n y0 = mg.ycellcenters[ra.i, ra.j]\r\n geoms = [Point(x, y) for x, y in zip(x0, y0)]\r\n recarray2shp(ra, geoms, f, **kwargs)\r\n\r\n @staticmethod\r\n def ftype():\r\n return 'SFR'\r\n\r\n @staticmethod\r\n def defaultunit():\r\n return 17\r\n\r\n\r\nclass check:\r\n \"\"\"\r\n Check SFR2 package for common errors\r\n\r\n Parameters\r\n ----------\r\n sfrpackage : object\r\n Instance of Flopy ModflowSfr2 class.\r\n verbose : bool\r\n Boolean flag used to determine if check method results are\r\n written to the screen\r\n level : int\r\n Check method analysis level. If level=0, summary checks are\r\n performed. If level=1, full checks are performed.\r\n\r\n Notes\r\n -----\r\n\r\n Daniel Feinstein's top 10 SFR problems (7/16/2014):\r\n 1) cell gaps btw adjacent reaches in a single segment\r\n 2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge\r\n 3) adjacent reaches with STOP sloping the wrong way\r\n 4) routed segments with end/start sloping the wrong way\r\n 5) STOP>TOP1 violations, i.e.,floaters\r\n 6) STOP<<TOP1 violations, i.e., exaggerated incisions\r\n 7) segments that end within one diagonal cell distance from another segment, inviting linkage\r\n 8) circular routing of segments\r\n 9) multiple reaches with non-zero conductance in a single cell\r\n 10) reaches in inactive cells\r\n\r\n Also after running the model they will want to check for backwater effects.\r\n \"\"\"\r\n\r\n def __init__(self, sfrpackage, verbose=True, level=1):\r\n self.sfr = copy.copy(sfrpackage)\r\n\r\n try:\r\n self.mg = self.sfr.parent.modelgrid\r\n self.sr = self.sfr.parent.modelgrid.sr\r\n except AttributeError:\r\n self.sr = self.sfr.parent.sr\r\n\r\n self.reach_data = sfrpackage.reach_data\r\n self.segment_data = sfrpackage.segment_data\r\n self.all_segments = sfrpackage.all_segments\r\n self.verbose = verbose\r\n self.level = level\r\n self.passed = []\r\n self.warnings = []\r\n self.errors = []\r\n self.txt = '\\n{} ERRORS:\\n'.format(self.sfr.name[0])\r\n self.summary_array = None\r\n\r\n def _boolean_compare(self, array, col1, col2,\r\n level0txt='{} violations encountered.',\r\n level1txt='Violations:',\r\n sort_ascending=True, print_delimiter=' '):\r\n \"\"\"\r\n Compare two columns in a record array. For each row,\r\n tests if value in col1 is greater than col2. If any values\r\n in col1 are > col2, subsets array to only include rows where\r\n col1 is greater. Creates another column with differences\r\n (col1-col2), and prints the array sorted by the differences\r\n column (diff).\r\n\r\n Parameters\r\n ----------\r\n array : record array\r\n Array with columns to compare.\r\n col1 : string\r\n Column name in array.\r\n col2 : string\r\n Column name in array.\r\n sort_ascending : T/F; default True\r\n If True, printed array will be sorted by differences in\r\n ascending order.\r\n print_delimiter : str\r\n Delimiter for printed array.\r\n\r\n Returns\r\n -------\r\n txt : str\r\n Error messages and printed array (if .level attribute of\r\n checker is set to 1). Returns an empty string if no\r\n values in col1 are greater than col2.\r\n\r\n Notes\r\n -----\r\n info about appending to record arrays (views vs. copies and upcoming\r\n changes to numpy):\r\n http://stackoverflow.com/questions/22865877/how-do-i-write-to-multiple-fields-of-a-structured-array\r\n \"\"\"\r\n txt = ''\r\n array = array.view(np.recarray).copy()\r\n if isinstance(col1, np.ndarray):\r\n array = recfunctions.append_fields(array, names='tmp1', data=col1,\r\n asrecarray=True)\r\n col1 = 'tmp1'\r\n if isinstance(col2, np.ndarray):\r\n array = recfunctions.append_fields(array, names='tmp2', data=col2,\r\n asrecarray=True)\r\n col2 = 'tmp2'\r\n if isinstance(col1, tuple):\r\n array = recfunctions.append_fields(array, names=col1[0],\r\n data=col1[1],\r\n asrecarray=True)\r\n col1 = col1[0]\r\n if isinstance(col2, tuple):\r\n array = recfunctions.append_fields(array, names=col2[0],\r\n data=col2[1],\r\n asrecarray=True)\r\n col2 = col2[0]\r\n\r\n failed = array[col1] > array[col2]\r\n if np.any(failed):\r\n failed_info = np.array(array)[failed]\r\n txt += level0txt.format(len(failed_info)) + '\\n'\r\n if self.level == 1:\r\n diff = failed_info[col2] - failed_info[col1]\r\n cols = [c for c in failed_info.dtype.names if\r\n failed_info[c].sum() != 0\r\n and c != 'diff'\r\n and 'tmp' not in c]\r\n failed_info = recfunctions.append_fields(\r\n failed_info[cols].copy(), names='diff', data=diff,\r\n usemask=False, asrecarray=False)\r\n failed_info.sort(order='diff', axis=0)\r\n if not sort_ascending:\r\n failed_info = failed_info[::-1]\r\n txt += level1txt + '\\n'\r\n txt += _print_rec_array(failed_info, delimiter=print_delimiter)\r\n txt += '\\n'\r\n return txt\r\n\r\n def _txt_footer(self, headertxt, txt, testname, passed=False,\r\n warning=True):\r\n if len(txt) == 0 or passed:\r\n txt += 'passed.'\r\n self.passed.append(testname)\r\n elif warning:\r\n self.warnings.append(testname)\r\n else:\r\n self.errors.append(testname)\r\n if self.verbose:\r\n print(txt + '\\n')\r\n self.txt += headertxt + txt + '\\n'\r\n\r\n def for_nans(self):\r\n \"\"\"\r\n Check for nans in reach or segment data\r\n\r\n \"\"\"\r\n headertxt = 'Checking for nan values...\\n'\r\n txt = ''\r\n passed = False\r\n isnan = np.any(np.isnan(np.array(self.reach_data.tolist())), axis=1)\r\n nanreaches = self.reach_data[isnan]\r\n if np.any(isnan):\r\n txt += 'Found {} reachs with nans:\\n'.format(len(nanreaches))\r\n if self.level == 1:\r\n txt += _print_rec_array(nanreaches, delimiter=' ')\r\n for per, sd in self.segment_data.items():\r\n isnan = np.any(np.isnan(np.array(sd.tolist())), axis=1)\r\n nansd = sd[isnan]\r\n if np.any(isnan):\r\n txt += 'Per {}: found {} segments with nans:\\n'.format(per,\r\n len(\r\n nanreaches))\r\n if self.level == 1:\r\n txt += _print_rec_array(nansd, delimiter=' ')\r\n if len(txt) == 0:\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'nan values', passed)\r\n\r\n def run_all(self):\r\n return self.sfr.check()\r\n\r\n def numbering(self):\r\n \"\"\"\r\n Checks for continuity in segment and reach numbering\r\n \"\"\"\r\n\r\n headertxt = 'Checking for continuity in segment and reach numbering...\\n'\r\n if self.verbose:\r\n print(headertxt.strip())\r\n txt = ''\r\n passed = False\r\n\r\n sd = self.segment_data[0]\r\n # check segment numbering\r\n txt += _check_numbers(self.sfr.nss,\r\n sd['nseg'],\r\n level=self.level,\r\n datatype='segment')\r\n\r\n # check reach numbering\r\n for segment in np.arange(1, self.sfr.nss + 1):\r\n reaches = self.reach_data.ireach[self.reach_data.iseg == segment]\r\n t = _check_numbers(len(reaches),\r\n reaches,\r\n level=self.level,\r\n datatype='reach')\r\n if len(t) > 0:\r\n txt += 'Segment {} has {}'.format(segment, t)\r\n if txt == '':\r\n passed = True\r\n self._txt_footer(headertxt, txt,\r\n 'continuity in segment and reach numbering', passed,\r\n warning=False)\r\n\r\n headertxt = 'Checking for increasing segment numbers in downstream direction...\\n'\r\n txt = ''\r\n passed = False\r\n if self.verbose:\r\n print(headertxt.strip())\r\n # for per, segment_data in self.segment_data.items():\r\n\r\n inds = (sd.outseg < sd.nseg) & (sd.outseg != 0)\r\n\r\n if len(txt) == 0 and np.any(inds):\r\n decreases = np.array(sd[inds])[['nseg', 'outseg']]\r\n txt += 'Found {} segment numbers decreasing in the downstream direction.\\n'.format(\r\n len(decreases))\r\n txt += 'MODFLOW will run but convergence may be slowed:\\n'\r\n if self.level == 1:\r\n txt += 'nseg outseg\\n'\r\n t = ''\r\n for nseg, outseg in decreases:\r\n t += '{} {}\\n'.format(nseg, outseg)\r\n txt += t # '\\n'.join(textwrap.wrap(t, width=10))\r\n if len(t) == 0:\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'segment numbering order', passed)\r\n\r\n def routing(self):\r\n \"\"\"\r\n Checks for breaks in routing and does comprehensive check for\r\n circular routing\r\n\r\n \"\"\"\r\n headertxt = 'Checking for circular routing...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n # txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True\r\n # simpler check method using paths from routing graph\r\n circular_segs = [k for k, v in self.sfr.paths.items() if v is None]\r\n if len(circular_segs) > 0:\r\n txt += '{0} instances where an outlet was not found after {1} consecutive segments!\\n' \\\r\n .format(len(circular_segs), self.sfr.nss)\r\n if self.level == 1:\r\n txt += ' '.join(map(str, circular_segs)) + '\\n'\r\n else:\r\n f = os.path.join(self.sfr.parent._model_ws,\r\n 'circular_routing.chk.csv')\r\n np.savetxt(f, circular_segs, fmt='%d', delimiter=',',\r\n header=txt)\r\n txt += 'See {} for details.'.format(f)\r\n if self.verbose:\r\n print(txt)\r\n self._txt_footer(headertxt, txt, 'circular routing', warning=False)\r\n\r\n # check reach connections for proximity\r\n if self.mg is not None or self.mg is not None:\r\n rd = self.sfr.reach_data\r\n rd.sort(order=['reachID'])\r\n try:\r\n xcentergrid, ycentergrid, zc = self.mg.get_cellcenters()\r\n del zc\r\n except AttributeError:\r\n xcentergrid = self.mg.xcellcenters\r\n ycentergrid = self.mg.ycellcenters\r\n\r\n x0 = xcentergrid[rd.i, rd.j]\r\n y0 = ycentergrid[rd.i, rd.j]\r\n loc = dict(zip(rd.reachID, zip(x0, y0)))\r\n\r\n # compute distances between node centers of connected reaches\r\n headertxt = 'Checking reach connections for proximity...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n dist = []\r\n for r in rd.reachID:\r\n x0, y0 = loc[r]\r\n outreach = rd.outreach[r - 1]\r\n if outreach == 0:\r\n dist.append(0)\r\n else:\r\n x1, y1 = loc[outreach]\r\n dist.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2))\r\n dist = np.array(dist)\r\n\r\n # compute max width of reach nodes (hypotenuse for rectangular nodes)\r\n delr = self.mg.delr\r\n delc = self.mg.delc\r\n\r\n dx = delr[rd.j] # (delr * self.sr.length_multiplier)[rd.j]\r\n dy = delc[rd.i] # (delc * self.sr.length_multiplier)[rd.i]\r\n hyp = np.sqrt(dx ** 2 + dy ** 2)\r\n\r\n # breaks are when the connection distance is greater than\r\n # max node with * a tolerance\r\n # 1.25 * hyp is greater than distance of two diagonally adjacent nodes\r\n # where one is 1.5x larger than the other\r\n breaks = np.where(dist > hyp * 1.25)\r\n breaks_reach_data = rd[breaks]\r\n segments_with_breaks = set(breaks_reach_data.iseg)\r\n if len(breaks) > 0:\r\n txt += '{0} segments '.format(len(segments_with_breaks)) + \\\r\n 'with non-adjacent reaches found.\\n'\r\n if self.level == 1:\r\n txt += 'At segments:\\n'\r\n txt += ' '.join(map(str, segments_with_breaks)) + '\\n'\r\n else:\r\n f = os.path.join(self.sfr.parent._model_ws,\r\n 'reach_connection_gaps.chk.csv')\r\n rd.tofile(f, sep='\\t')\r\n txt += 'See {} for details.'.format(f)\r\n if self.verbose:\r\n print(txt)\r\n self._txt_footer(headertxt, txt, 'reach connections',\r\n warning=False)\r\n else:\r\n txt += 'No DIS package or SpatialReference object; cannot ' + \\\r\n 'check reach proximities.'\r\n self._txt_footer(headertxt, txt, '')\r\n\r\n def overlapping_conductance(self, tol=1e-6):\r\n \"\"\"\r\n Checks for multiple SFR reaches in one cell; and whether more than\r\n one reach has Cond > 0\r\n\r\n \"\"\"\r\n headertxt = 'Checking for model cells with multiple non-zero ' + \\\r\n 'SFR conductances...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n # make nreach vectors of each conductance parameter\r\n reach_data = np.array(self.reach_data)\r\n # if no dis file was supplied, can't compute node numbers\r\n # make nodes based on unique row, col pairs\r\n # if np.diff(reach_data.node).max() == 0:\r\n # always use unique rc, since flopy assigns nodes by k, i, j\r\n uniquerc = {}\r\n for i, (r, c) in enumerate(reach_data[['i', 'j']]):\r\n if (r, c) not in uniquerc:\r\n uniquerc[(r, c)] = i + 1\r\n reach_data['node'] = [uniquerc[(r, c)] for r, c in\r\n reach_data[['i', 'j']]]\r\n\r\n K = reach_data['strhc1']\r\n if K.max() == 0:\r\n K = self.sfr._interpolate_to_reaches('hcond1', 'hcond2')\r\n b = reach_data['strthick']\r\n if b.max() == 0:\r\n b = self.sfr._interpolate_to_reaches('thickm1', 'thickm2')\r\n L = reach_data['rchlen']\r\n w = self.sfr._interpolate_to_reaches('width1', 'width2')\r\n\r\n # Calculate SFR conductance for each reach\r\n binv = np.zeros(b.shape, dtype=b.dtype)\r\n idx = b > 0.\r\n binv[idx] = 1. / b[idx]\r\n Cond = K * w * L * binv\r\n\r\n shared_cells = _get_duplicates(reach_data['node'])\r\n\r\n nodes_with_multiple_conductance = set()\r\n for node in shared_cells:\r\n\r\n # select the collocated reaches for this cell\r\n conductances = Cond[reach_data['node'] == node].copy()\r\n conductances.sort()\r\n\r\n # list nodes with multiple non-zero SFR reach conductances\r\n if (conductances[-1] != 0.0 and\r\n (conductances[0] / conductances[-1] > tol)):\r\n nodes_with_multiple_conductance.update({node})\r\n\r\n if len(nodes_with_multiple_conductance) > 0:\r\n txt += '{} model cells with multiple non-zero SFR conductances found.\\n' \\\r\n 'This may lead to circular routing between collocated reaches.\\n' \\\r\n .format(len(nodes_with_multiple_conductance))\r\n if self.level == 1:\r\n txt += 'Nodes with overlapping conductances:\\n'\r\n\r\n reach_data['strthick'] = b\r\n reach_data['strhc1'] = K\r\n\r\n cols = [c for c in reach_data.dtype.names if c in \\\r\n ['k', 'i', 'j', 'iseg', 'ireach', 'rchlen', 'strthick',\r\n 'strhc1', 'width', 'conductance']]\r\n\r\n reach_data = recfunctions.append_fields(\r\n reach_data,\r\n names=['width', 'conductance'], data=[w, Cond],\r\n usemask=False, asrecarray=False)\r\n has_multiple = np.array(\r\n [True if n in nodes_with_multiple_conductance\r\n else False for n in reach_data['node']])\r\n reach_data = reach_data[has_multiple]\r\n reach_data = reach_data[cols]\r\n txt += _print_rec_array(reach_data, delimiter='\\t')\r\n\r\n self._txt_footer(headertxt, txt, 'overlapping conductance')\r\n\r\n def elevations(self, min_strtop=-10, max_strtop=15000):\r\n \"\"\"\r\n Checks streambed elevations for downstream rises and inconsistencies\r\n with model grid\r\n\r\n \"\"\"\r\n headertxt = 'Checking for streambed tops of less ' + \\\r\n 'than {}...\\n'.format(min_strtop)\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n passed = False\r\n if self.sfr.isfropt in [1, 2, 3]:\r\n if np.diff(self.reach_data.strtop).max() == 0:\r\n txt += 'isfropt setting of 1,2 or 3 requires strtop information!\\n'\r\n else:\r\n is_less = self.reach_data.strtop < min_strtop\r\n if np.any(is_less):\r\n below_minimum = self.reach_data[is_less]\r\n txt += '{} instances of streambed top below minimum found.\\n'.format(\r\n len(below_minimum))\r\n if self.level == 1:\r\n txt += 'Reaches with low strtop:\\n'\r\n txt += _print_rec_array(below_minimum, delimiter='\\t')\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'strtop not specified for isfropt={}\\n'.format(\r\n self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'minimum streambed top', passed)\r\n\r\n headertxt = 'Checking for streambed tops of ' + \\\r\n 'greater than {}...\\n'.format(max_strtop)\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n passed = False\r\n if self.sfr.isfropt in [1, 2, 3]:\r\n if np.diff(self.reach_data.strtop).max() == 0:\r\n txt += 'isfropt setting of 1,2 or 3 ' + \\\r\n 'requires strtop information!\\n'\r\n else:\r\n is_greater = self.reach_data.strtop > max_strtop\r\n if np.any(is_greater):\r\n above_max = self.reach_data[is_greater]\r\n txt += '{} instances '.format(len(above_max)) + \\\r\n 'of streambed top above the maximum found.\\n'\r\n if self.level == 1:\r\n txt += 'Reaches with high strtop:\\n'\r\n txt += _print_rec_array(above_max, delimiter='\\t')\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'strtop not specified for isfropt={}\\n'.format(\r\n self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'maximum streambed top', passed)\r\n\r\n headertxt = 'Checking segment_data for ' + \\\r\n 'downstream rises in streambed elevation...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n # decide whether to check elevup and elevdn from items 6b/c\r\n # (see online guide to SFR input; Data Set 6b description)\r\n passed = False\r\n if self.sfr.isfropt in [0, 4, 5]:\r\n pers = sorted(self.segment_data.keys())\r\n for per in pers:\r\n segment_data = self.segment_data[per][\r\n self.segment_data[per].elevup > -999999]\r\n\r\n # enforce consecutive increasing segment numbers (for indexing)\r\n segment_data.sort(order='nseg')\r\n t = _check_numbers(len(segment_data), segment_data.nseg,\r\n level=1, datatype='Segment')\r\n if len(t) > 0:\r\n txt += 'Elevation check requires ' + \\\r\n 'consecutive segment numbering.'\r\n self._txt_footer(headertxt, txt, '')\r\n return\r\n\r\n # first check for segments where elevdn > elevup\r\n d_elev = segment_data.elevdn - segment_data.elevup\r\n segment_data = recfunctions.append_fields(segment_data,\r\n names='d_elev',\r\n data=d_elev,\r\n asrecarray=True)\r\n txt += self._boolean_compare(\r\n np.array(segment_data)[['nseg', 'outseg', 'elevup',\r\n 'elevdn', 'd_elev']],\r\n col1='d_elev', col2=np.zeros(len(segment_data)),\r\n level0txt='Stress Period {}: '.format(per + 1) + \\\r\n '{} segments encountered with elevdn > elevup.',\r\n level1txt='Backwards segments:',\r\n )\r\n\r\n # next check for rises between segments\r\n non_outlets = segment_data.outseg > 0\r\n non_outlets_seg_data = segment_data[\r\n non_outlets] # lake outsegs are < 0\r\n outseg_elevup = np.array(\r\n [segment_data.elevup[o - 1] for o in segment_data.outseg if\r\n o > 0])\r\n d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets]\r\n non_outlets_seg_data = recfunctions.append_fields(\r\n non_outlets_seg_data,\r\n names=['outseg_elevup', 'd_elev2'],\r\n data=[outseg_elevup, d_elev2],\r\n usemask=False, asrecarray=False)\r\n\r\n txt += self._boolean_compare(\r\n non_outlets_seg_data[['nseg', 'outseg', 'elevdn',\r\n 'outseg_elevup', 'd_elev2']],\r\n col1='d_elev2', col2=np.zeros(len(non_outlets_seg_data)),\r\n level0txt='Stress Period {}: '.format(per + 1) + \\\r\n '{} segments encountered with segments encountered ' \\\r\n 'with outseg elevup > elevdn.',\r\n level1txt='Backwards segment connections:',\r\n )\r\n\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'Segment elevup and elevdn not ' + \\\r\n 'specified for nstrm=' + \\\r\n '{} and isfropt={}\\n'.format(self.sfr.nstrm,\r\n self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'segment elevations', passed)\r\n\r\n headertxt = 'Checking reach_data for ' + \\\r\n 'downstream rises in streambed elevation...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n passed = False\r\n if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [\r\n 1, 2, 3]: # see SFR input instructions\r\n\r\n # compute outreaches if they aren't there already\r\n if np.diff(self.sfr.reach_data.outreach).max() == 0:\r\n self.sfr.set_outreaches()\r\n\r\n # compute changes in elevation\r\n rd = self.reach_data.copy()\r\n elev = dict(zip(rd.reachID, rd.strtop))\r\n dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0\r\n else -9999 for i, rid in enumerate(rd.reachID)}\r\n strtopdn = np.array([dnelev[r] for r in rd.reachID])\r\n diffs = np.array([(dnelev[i] - elev[i]) if dnelev[i] != -9999\r\n else -.001 for i in rd.reachID])\r\n\r\n reach_data = self.sfr.reach_data # inconsistent with other checks that work with\r\n # reach_data attribute of check class. Want to have get_outreaches as a method of sfr class\r\n # (for other uses). Not sure if other check methods should also copy reach_data directly from\r\n # SFR package instance for consistency.\r\n\r\n # use outreach values to get downstream elevations\r\n # non_outlets = reach_data[reach_data.outreach != 0]\r\n # outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach])\r\n # d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop\r\n rd = recfunctions.append_fields(\r\n rd, names=['strtopdn', 'd_strtop'], data=[strtopdn, diffs],\r\n usemask=False, asrecarray=False)\r\n\r\n txt += self._boolean_compare(\r\n rd[['k', 'i', 'j', 'iseg', 'ireach', 'strtop', 'strtopdn',\r\n 'd_strtop', 'reachID']],\r\n col1='d_strtop', col2=np.zeros(len(rd)),\r\n level0txt='{} reaches encountered with strtop < strtop of downstream reach.',\r\n level1txt='Elevation rises:',\r\n )\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\\n' \\\r\n .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'reach elevations', passed)\r\n\r\n headertxt = 'Checking reach_data for inconsistencies between streambed elevations and the model grid...\\n'\r\n if self.verbose:\r\n print(headertxt.strip())\r\n txt = ''\r\n if self.sfr.parent.dis is None:\r\n txt += 'No DIS file supplied; cannot check SFR elevations against model grid.'\r\n self._txt_footer(headertxt, txt, '')\r\n return\r\n passed = False\r\n warning = True\r\n if (self.sfr.nstrm < 0 or self.sfr.reachinput and\r\n self.sfr.isfropt in [1, 2, 3]): # see SFR input instructions\r\n reach_data = np.array(self.reach_data)\r\n i, j, k = reach_data['i'], reach_data['j'], reach_data['k']\r\n\r\n # check streambed bottoms in relation to respective cell bottoms\r\n bots = self.sfr.parent.dis.botm.array[k, i, j]\r\n streambed_bots = reach_data['strtop'] - reach_data['strthick']\r\n reach_data = recfunctions.append_fields(\r\n reach_data, names=['layerbot', 'strbot'],\r\n data=[bots, streambed_bots], usemask=False, asrecarray=False)\r\n\r\n txt += self._boolean_compare(\r\n reach_data[['k', 'i', 'j', 'iseg', 'ireach', 'strtop',\r\n 'strthick', 'strbot', 'layerbot', 'reachID']],\r\n col1='layerbot', col2='strbot',\r\n level0txt='{} reaches encountered with streambed bottom below layer bottom.',\r\n level1txt='Layer bottom violations:',\r\n )\r\n if len(txt) > 0:\r\n warning = False # this constitutes an error (MODFLOW won't run)\r\n # check streambed elevations in relation to model top\r\n tops = self.sfr.parent.dis.top.array[i, j]\r\n reach_data = recfunctions.append_fields(\r\n reach_data, names='modeltop', data=tops,\r\n usemask=False, asrecarray=False)\r\n\r\n txt += self._boolean_compare(\r\n reach_data[['k', 'i', 'j', 'iseg', 'ireach',\r\n 'strtop', 'modeltop', 'strhc1', 'reachID']],\r\n col1='strtop', col2='modeltop',\r\n level0txt='{} reaches encountered with streambed above model top.',\r\n level1txt='Model top violations:',\r\n )\r\n\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\\n' \\\r\n .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt,\r\n 'reach elevations vs. grid elevations', passed,\r\n warning=warning)\r\n\r\n # In cases where segment end elevations/thicknesses are used,\r\n # do these need to be checked for consistency with layer bottoms?\r\n\r\n headertxt = 'Checking segment_data for inconsistencies ' + \\\r\n 'between segment end elevations and the model grid...\\n'\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n passed = False\r\n if self.sfr.isfropt in [0, 4, 5]:\r\n reach_data = self.reach_data\r\n pers = sorted(self.segment_data.keys())\r\n for per in pers:\r\n segment_data = self.segment_data[per][\r\n self.segment_data[per].elevup > -999999]\r\n\r\n # enforce consecutive increasing segment numbers (for indexing)\r\n segment_data.sort(order='nseg')\r\n t = _check_numbers(len(segment_data), segment_data.nseg,\r\n level=1, datatype='Segment')\r\n if len(t) > 0:\r\n raise Exception(\r\n 'Elevation check requires consecutive segment numbering.')\r\n\r\n first_reaches = reach_data[reach_data.ireach == 1].copy()\r\n last_reaches = reach_data[\r\n np.append((np.diff(reach_data.iseg) == 1), True)].copy()\r\n segment_ends = recfunctions.stack_arrays(\r\n [first_reaches, last_reaches],\r\n asrecarray=True, usemask=False)\r\n segment_ends['strtop'] = np.append(segment_data['elevup'],\r\n segment_data['elevdn'])\r\n i, j = segment_ends.i, segment_ends.j\r\n tops = self.sfr.parent.dis.top.array[i, j]\r\n diff = tops - segment_ends.strtop\r\n segment_ends = recfunctions.append_fields(\r\n segment_ends,\r\n names=['modeltop', 'diff'], data=[tops, diff],\r\n usemask=False, asrecarray=False)\r\n\r\n txt += self._boolean_compare(segment_ends[['k', 'i', 'j', 'iseg',\r\n 'strtop', 'modeltop',\r\n 'diff',\r\n 'reachID']].copy(),\r\n col1=np.zeros(len(segment_ends)),\r\n col2='diff',\r\n level0txt='{} reaches encountered with streambed above model top.',\r\n level1txt='Model top violations:',\r\n )\r\n\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\\n' \\\r\n .format(self.sfr.nstrm, self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'segment elevations vs. model grid',\r\n passed)\r\n\r\n def slope(self, minimum_slope=1e-4, maximum_slope=1.0):\r\n \"\"\"Checks that streambed slopes are greater than or equal to a specified minimum value.\r\n Low slope values can cause \"backup\" or unrealistic stream stages with icalc options\r\n where stage is computed.\r\n \"\"\"\r\n headertxt = 'Checking for streambed slopes of less than {}...\\n'.format(\r\n minimum_slope)\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n passed = False\r\n if self.sfr.isfropt in [1, 2, 3]:\r\n if np.diff(self.reach_data.slope).max() == 0:\r\n txt += 'isfropt setting of 1,2 or 3 requires slope information!\\n'\r\n else:\r\n is_less = self.reach_data.slope < minimum_slope\r\n if np.any(is_less):\r\n below_minimum = self.reach_data[is_less]\r\n txt += '{} instances of streambed slopes below minimum found.\\n'.format(\r\n len(below_minimum))\r\n if self.level == 1:\r\n txt += 'Reaches with low slopes:\\n'\r\n txt += _print_rec_array(below_minimum, delimiter='\\t')\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'slope not specified for isfropt={}\\n'.format(\r\n self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'minimum slope', passed)\r\n\r\n headertxt = 'Checking for streambed slopes of greater than {}...\\n'.format(\r\n maximum_slope)\r\n txt = ''\r\n if self.verbose:\r\n print(headertxt.strip())\r\n\r\n passed = False\r\n if self.sfr.isfropt in [1, 2, 3]:\r\n if np.diff(self.reach_data.slope).max() == 0:\r\n txt += 'isfropt setting of 1,2 or 3 requires slope information!\\n'\r\n else:\r\n is_greater = self.reach_data.slope > maximum_slope\r\n\r\n if np.any(is_greater):\r\n above_max = self.reach_data[is_greater]\r\n txt += '{} instances of streambed slopes above maximum found.\\n'.format(\r\n len(above_max))\r\n if self.level == 1:\r\n txt += 'Reaches with high slopes:\\n'\r\n txt += _print_rec_array(above_max, delimiter='\\t')\r\n if len(txt) == 0:\r\n passed = True\r\n else:\r\n txt += 'slope not specified for isfropt={}\\n'.format(\r\n self.sfr.isfropt)\r\n passed = True\r\n self._txt_footer(headertxt, txt, 'maximum slope', passed)\r\n\r\n\r\ndef _check_numbers(n, numbers, level=1, datatype='reach'):\r\n \"\"\"\r\n Check that a sequence of numbers is consecutive\r\n (that the sequence is equal to the range from 1 to n+1, where n is\r\n the expected length of the sequence).\r\n\r\n Parameters\r\n ----------\r\n n : int\r\n Expected length of the sequence (i.e. number of stream segments)\r\n numbers : array\r\n Sequence of numbers (i.e. 'nseg' column from the segment_data array)\r\n level : int\r\n Check method analysis level. If level=0, summary checks are\r\n performed. If level=1, full checks are performed.\r\n datatype : str, optional\r\n Only used for reporting.\r\n \"\"\"\r\n txt = ''\r\n num_range = np.arange(1, n + 1)\r\n if not np.array_equal(num_range, numbers):\r\n txt += 'Invalid {} numbering\\n'.format(datatype)\r\n if level == 1:\r\n # consistent dimension for boolean array\r\n non_consecutive = np.append(np.diff(numbers) != 1,\r\n False)\r\n gaps = num_range[non_consecutive] + 1\r\n if len(gaps) > 0:\r\n gapstr = ' '.join(map(str, gaps))\r\n txt += 'Gaps in numbering at positions {}\\n'.format(gapstr)\r\n return txt\r\n\r\n\r\ndef _isnumeric(s):\r\n try:\r\n float(s)\r\n return True\r\n except:\r\n return False\r\n\r\n\r\ndef _markitzero(recarray, inds):\r\n \"\"\"\r\n Subtracts 1 from columns specified in inds argument, to convert from\r\n 1 to 0-based indexing\r\n\r\n \"\"\"\r\n lnames = [n.lower() for n in recarray.dtype.names]\r\n for idx in inds:\r\n if (idx in lnames):\r\n recarray[idx] -= 1\r\n\r\n\r\ndef _pop_item(line):\r\n try:\r\n return float(line.pop(0))\r\n except:\r\n return 0.\r\n\r\n\r\ndef _get_dataset(line, dataset):\r\n # interpret number supplied with decimal points as floats, rest as ints\r\n # this could be a bad idea (vs. explicitly formatting values for each dataset)\r\n for i, s in enumerate(line_parse(line)):\r\n try:\r\n n = int(s)\r\n except:\r\n try:\r\n n = float(s)\r\n except:\r\n break\r\n dataset[i] = n\r\n return dataset\r\n\r\n\r\ndef _get_duplicates(a):\r\n \"\"\"\r\n Returns duplicate values in an array, similar to pandas .duplicated()\r\n method\r\n http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array\r\n \"\"\"\r\n s = np.sort(a, axis=None)\r\n equal_to_previous_item = np.append(s[1:] == s[:-1],\r\n False) # maintain same dimension for boolean array\r\n return np.unique(s[equal_to_previous_item])\r\n\r\n\r\ndef _get_item2_names(nstrm, reachinput, isfropt, structured=False):\r\n \"\"\"\r\n Determine which variables should be in item 2, based on model grid type,\r\n reachinput specification, and isfropt.\r\n\r\n Returns\r\n -------\r\n names : list of str\r\n List of names (same as variables in SFR Package input instructions) of\r\n columns to assign (upon load) or retain (upon write) in reach_data\r\n array.\r\n\r\n Notes\r\n -----\r\n Lowercase is used for all variable names.\r\n\r\n \"\"\"\r\n names = []\r\n if structured:\r\n names += ['k', 'i', 'j']\r\n else:\r\n names += ['node']\r\n names += ['iseg', 'ireach', 'rchlen']\r\n if nstrm < 0 or reachinput:\r\n if isfropt in [1, 2, 3]:\r\n names += ['strtop', 'slope', 'strthick', 'strhc1']\r\n if isfropt in [2, 3]:\r\n names += ['thts', 'thti', 'eps']\r\n if isfropt == 3:\r\n names += ['uhc']\r\n return names\r\n\r\n\r\ndef _fmt_string(array, float_format='{!s}'):\r\n fmt_string = ''\r\n for field in array.dtype.descr:\r\n vtype = field[1][1].lower()\r\n if vtype == 'v':\r\n continue\r\n if vtype == 'i':\r\n fmt_string += '{:.0f} '\r\n elif vtype == 'f':\r\n fmt_string += '{} '.format(float_format)\r\n elif vtype == 'o':\r\n fmt_string += '{} '\r\n elif vtype == 's':\r\n raise Exception(\"MfList error: '\\str\\' type found it dtype.\" + \\\r\n \" This gives unpredictable results when \" + \\\r\n \"recarray to file - change to \\'object\\' type\")\r\n else:\r\n raise Exception(\"MfList.fmt_string error: unknown vtype \" + \\\r\n \"in dtype:\" + vtype)\r\n return fmt_string\r\n\r\n\r\ndef _fmt_string_list(array, float_format='{!s}'):\r\n fmt_string = []\r\n for field in array.dtype.descr:\r\n vtype = field[1][1].lower()\r\n if vtype == 'v':\r\n continue\r\n if (vtype == 'i'):\r\n fmt_string += ['{:.0f}']\r\n elif (vtype == 'f'):\r\n fmt_string += [float_format]\r\n elif (vtype == 'o'):\r\n fmt_string += ['{}']\r\n elif (vtype == 's'):\r\n raise Exception(\"MfList error: '\\str\\' type found it dtype.\" + \\\r\n \" This gives unpredictable results when \" + \\\r\n \"recarray to file - change to \\'object\\' type\")\r\n else:\r\n raise Exception(\"MfList.fmt_string error: unknown vtype \" + \\\r\n \"in dtype:\" + vtype)\r\n return fmt_string\r\n\r\n\r\ndef _print_rec_array(array, cols=None, delimiter=' ', float_format='{:.6f}'):\r\n \"\"\"\r\n Print out a numpy record array to string, with column names.\r\n\r\n Parameters\r\n ----------\r\n cols : list of strings\r\n List of columns to print.\r\n delimiter : string\r\n Delimited to use.\r\n\r\n Returns\r\n -------\r\n txt : string\r\n Text string of array.\r\n\r\n \"\"\"\r\n txt = ''\r\n if cols is not None:\r\n cols = [c for c in array.dtype.names if c in cols]\r\n else:\r\n cols = list(array.dtype.names)\r\n # drop columns with no data\r\n if np.shape(array)[0] > 1:\r\n cols = [c for c in cols if array[c].min() > -999999]\r\n # add _fmt_string call here\r\n array = np.array(array)[cols]\r\n fmts = _fmt_string_list(array, float_format=float_format)\r\n txt += delimiter.join(cols) + '\\n'\r\n txt += '\\n'.join(\r\n [delimiter.join(fmts).format(*r) for r in array.tolist()])\r\n return txt\r\n\r\n\r\ndef _parse_1c(line, reachinput, transroute):\r\n \"\"\"\r\n Parse Data Set 1c for SFR2 package.\r\n See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info\r\n\r\n Parameters\r\n ----------\r\n line : str\r\n line read from SFR package input file\r\n\r\n Returns\r\n -------\r\n a list of length 13 containing all variables for Data Set 6a\r\n\r\n \"\"\"\r\n na = 0\r\n # line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []])\r\n # line = line.strip().split()\r\n line = line_parse(line)\r\n\r\n nstrm = int(line.pop(0))\r\n nss = int(line.pop(0))\r\n nsfrpar = int(line.pop(0))\r\n nparseg = int(line.pop(0))\r\n const = float(line.pop(0))\r\n dleak = float(line.pop(0))\r\n ipakcb = int(line.pop(0))\r\n istcb2 = int(line.pop(0))\r\n\r\n isfropt, nstrail, isuzn, nsfrsets = na, na, na, na\r\n if reachinput:\r\n nstrm = abs(nstrm) # see explanation for dataset 1c in online guide\r\n isfropt = int(line.pop(0))\r\n if isfropt > 1:\r\n nstrail = int(line.pop(0))\r\n isuzn = int(line.pop(0))\r\n nsfrsets = int(line.pop(0))\r\n if nstrm < 0:\r\n isfropt = int(line.pop(0))\r\n if isfropt > 1:\r\n nstrail = int(line.pop(0))\r\n isuzn = int(line.pop(0))\r\n nsfrsets = int(line.pop(0))\r\n\r\n irtflg, numtim, weight, flwtol = na, na, na, na\r\n if nstrm < 0 or transroute:\r\n irtflg = int(_pop_item(line))\r\n if irtflg > 0:\r\n numtim = int(line.pop(0))\r\n weight = float(line.pop(0))\r\n flwtol = float(line.pop(0))\r\n\r\n # auxiliary variables (MODFLOW-LGR)\r\n option = [line[i] for i in np.arange(1, len(line)) if\r\n 'aux' in line[i - 1].lower()]\r\n\r\n return nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \\\r\n isfropt, nstrail, isuzn, nsfrsets, irtflg, numtim, weight, flwtol, \\\r\n option\r\n\r\n\r\ndef _parse_6a(line, option):\r\n \"\"\"\r\n Parse Data Set 6a for SFR2 package.\r\n See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info\r\n\r\n Parameters\r\n ----------\r\n line : str\r\n line read from SFR package input file\r\n\r\n Returns\r\n -------\r\n a list of length 13 containing all variables for Data Set 6a\r\n \"\"\"\r\n # line = line.strip().split()\r\n line = line_parse(line)\r\n\r\n xyz = []\r\n # handle any aux variables at end of line\r\n for s in line:\r\n if s.lower() in option:\r\n xyz.append(s.lower())\r\n\r\n na = 0\r\n nseg = int(_pop_item(line))\r\n icalc = int(_pop_item(line))\r\n outseg = int(_pop_item(line))\r\n iupseg = int(_pop_item(line))\r\n iprior = na\r\n nstrpts = na\r\n\r\n if iupseg > 0:\r\n iprior = int(_pop_item(line))\r\n if icalc == 4:\r\n nstrpts = int(_pop_item(line))\r\n\r\n flow = _pop_item(line)\r\n runoff = _pop_item(line)\r\n etsw = _pop_item(line)\r\n pptsw = _pop_item(line)\r\n roughch = na\r\n roughbk = na\r\n\r\n if icalc in [1, 2]:\r\n roughch = _pop_item(line)\r\n if icalc == 2:\r\n roughbk = _pop_item(line)\r\n\r\n cdpth, fdpth, awdth, bwdth = na, na, na, na\r\n if icalc == 3:\r\n cdpth, fdpth, awdth, bwdth = map(float, line)\r\n return nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \\\r\n pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth, xyz\r\n\r\n\r\ndef _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0):\r\n \"\"\"\r\n Parse Data Set 6b for SFR2 package.\r\n See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info\r\n\r\n Parameters\r\n ----------\r\n line : str\r\n line read from SFR package input file\r\n\r\n Returns\r\n -------\r\n a list of length 9 containing all variables for Data Set 6b\r\n\r\n \"\"\"\r\n nvalues = sum([_isnumeric(s) for s in line_parse(line)])\r\n line = _get_dataset(line, [0] * nvalues)\r\n\r\n hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = [0.0] * 9\r\n\r\n if isfropt in [0, 4, 5] and icalc <= 0:\r\n hcond = line.pop(0)\r\n thickm = line.pop(0)\r\n elevupdn = line.pop(0)\r\n width = line.pop(0)\r\n depth = line.pop(0)\r\n elif isfropt in [0, 4, 5] and icalc == 1:\r\n hcond = line.pop(0)\r\n if isfropt in [4, 5] and per > 0:\r\n pass\r\n else:\r\n thickm = line.pop(0)\r\n elevupdn = line.pop(0)\r\n width = line.pop(\r\n 0) # depth is not read if icalc == 1; see table in online guide\r\n thts = _pop_item(line)\r\n thti = _pop_item(line)\r\n eps = _pop_item(line)\r\n if isfropt == 5:\r\n uhc = line.pop(0)\r\n elif isfropt in [0, 4, 5] and icalc >= 2:\r\n hcond = line.pop(0)\r\n if isfropt in [4, 5] and per > 0 and icalc == 2:\r\n pass\r\n else:\r\n thickm = line.pop(0)\r\n elevupdn = line.pop(0)\r\n if isfropt in [4, 5] and per == 0:\r\n # table in online guide suggests that the following items should be present in this case\r\n # but in the example\r\n thts = _pop_item(line)\r\n thti = _pop_item(line)\r\n eps = _pop_item(line)\r\n if isfropt == 5:\r\n uhc = _pop_item(line)\r\n else:\r\n pass\r\n elif isfropt == 1 and icalc <= 1:\r\n width = line.pop(0)\r\n if icalc <= 0:\r\n depth = line.pop(0)\r\n elif isfropt in [2, 3] and icalc <= 1:\r\n if per > 0:\r\n pass\r\n else:\r\n width = line.pop(0)\r\n if icalc <= 0:\r\n depth = line.pop(0)\r\n else:\r\n pass\r\n return hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc\r\n\r\n\r\ndef find_path(graph, start, end=0, path=()):\r\n\r\n path = list(path) + [start]\r\n if start == end:\r\n return path\r\n if start not in graph:\r\n return None\r\n if not isinstance(graph[start], list):\r\n graph[start] = [graph[start]]\r\n for node in graph[start]:\r\n if node not in path:\r\n newpath = find_path(graph, node, end, path)\r\n if newpath: return newpath\r\n return None\r\n" ]
[ [ "numpy.array_equal", "numpy.min", "numpy.mean", "numpy.sign", "numpy.where", "numpy.sort", "numpy.cumsum", "numpy.dtype", "numpy.concatenate", "numpy.bincount", "numpy.max", "pandas.DataFrame", "numpy.interp", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.lib.recfunctions.stack_arrays", "numpy.in1d", "numpy.array", "numpy.savetxt", "numpy.zeros", "matplotlib.pyplot.autoscale", "numpy.lib.recfunctions.append_fields", "numpy.shape", "numpy.diff", "numpy.isscalar", "numpy.squeeze", "numpy.any", "numpy.atleast_1d", "numpy.abs", "numpy.lib.recfunctions.merge_arrays", "numpy.unique" ] ]
ted-hou/spike-sorting
[ "c71b05ce3a08c4136ea731a6d639c5b93cc7d35a" ]
[ "continuousdata.py" ]
[ "from __future__ import annotations # allows TreeItem type hint in its own constructor\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom datetime import datetime, timezone, timedelta\nfrom typing import Sequence\nimport numpy as np\nimport numpy.random\n\n\nclass ContinuousData:\n @dataclass\n class ChannelInfo:\n electrode_id: int # Electrode number.\n label: str # Label or name of the electrode (e.g. “chan1”).\n analog_units: str # Units of the analog range values (“mV”, “μV”).\n high_freq_cutoff: float # High frequency cutoff (Hz) used in bandpass filter: Inf = None\n low_freq_cutoff: float # Low frequency cutoff (Hz) used in bandpass filter: 0 = None\n conversion_factor: float # Multiply by this value to convert data from int16 to float64\n\n def __init__(self, electrode_id: int, label: str = '', analog_units: str = 'μV', high_freq_cutoff: float = None, low_freq_cutoff: float = None, conversion_factor: float = 1.0):\n self.electrode_id = electrode_id\n self.label = label\n self.analog_units = analog_units\n self.high_freq_cutoff = high_freq_cutoff\n self.low_freq_cutoff = low_freq_cutoff\n self.conversion_factor = conversion_factor\n\n file: str # path to continuous data file\n data: np.ndarray # int16 representation of continuous data with shape (num_samples, num_channels). Needs to be multiplied with conversion_factor for actual voltage\n time_origin: datetime # UTC time at first sample in file\n channels: [int] # zero-based indices indicating which channels were read from file\n electrodes: [int] # electrode ids read from file (this is usually a 1-based index)\n sample_rate: float # sampling rate in Hz\n channels_info: [ChannelInfo]\n n_samples: int\n n_channels: int\n\n def filter(self, low_freq=250.0, high_freq=5000.0) -> np.ndarray:\n pass\n\n @staticmethod\n def generate(n_channels=32, min_spike_rate=10.0, max_spike_rate=60.0, sample_rate=30000, n_samples=300000, seed: int = None):\n \"\"\"Generate simulated data.\"\"\"\n cd = ContinuousData()\n cd.file = None\n cd.time_origin = datetime.now(timezone.utc)\n cd.channels = list(range(n_channels))\n cd.electrodes = list(range(1, n_channels + 1))\n cd.sample_rate = sample_rate\n cd.n_samples = n_samples\n cd.n_channels = n_channels\n cd.channels_info = [ContinuousData.ChannelInfo(i + 1, label=f\"chan{i + 1}\", conversion_factor=0.25) for i in range(n_channels)]\n\n # Generate spike train\n # Random firing rates for each channel\n rng = numpy.random.default_rng(seed)\n fr = (max_spike_rate - min_spike_rate) * rng.random(size=n_channels) + min_spike_rate\n\n # Firing probability for each discrete time point is (fr * dt), equivalent to (ft / sample_rate)\n spike_train = rng.random(size=(n_samples, n_channels)) * sample_rate < fr\n\n # Generate a standard waveform for each channel\n def generate_waveform(phase_durations=(.0002, .0003, .0005), depolarization_voltage=-200.0, hyperpolarization_voltage=50.0):\n # Generate discrete times and corresponding voltages (td, vd)\n dur = list(phase_durations)\n dur.insert(0, 0)\n dur.insert(1, dur[1] * 0.05)\n dur[2] *= 0.95\n dur.insert(4, dur[4] * 0.5)\n dur[5] *= 0.5\n td = np.asarray(dur).cumsum()\n vd = np.asarray([0, 0, depolarization_voltage, hyperpolarization_voltage, hyperpolarization_voltage*0.367879, 0])\n # Spline interpolation of td, vd -> ts, vs\n from scipy.interpolate import splrep, splev\n spl = splrep(td, vd, per=True)\n ts = np.arange(0, td[-1], 1/sample_rate)\n vs = splev(ts, spl)\n return vs\n\n phase_durations_mean = np.asarray((.0001, .00015, .00025))\n phase_durations_sd_scale = np.asarray(0.1)\n phase_durations = rng.normal(\n loc=phase_durations_mean,\n scale=phase_durations_sd_scale * phase_durations_mean,\n size=(n_channels, 3)\n )\n\n # Convolve spike-train with waveform shape\n data_f = np.empty((n_samples, n_channels), dtype=np.float64)\n for i_channel in range(n_channels):\n waveform = generate_waveform(phase_durations=phase_durations[i_channel, :],\n depolarization_voltage=rng.normal(loc=-300, scale=50),\n hyperpolarization_voltage=rng.normal(loc=100, scale=50))\n data_f[:, i_channel] = np.convolve(spike_train[:, i_channel], waveform, 'same')\n\n # Add noise and convert to int\n white_noise = 100.0 * rng.standard_normal(size=data_f.shape, dtype=np.float64)\n from scipy import signal\n sos = signal.butter(2, 7500, btype='lowpass', output='sos', fs=sample_rate)\n data_f += signal.sosfilt(sos, white_noise)\n cd.data = np.rint(data_f * 4).astype(np.int16)\n\n return cd\n\n\nclass BlackrockContinuousData(ContinuousData):\n packet_index: int\n packet_sample_offset: int\n\n @dataclass\n class ChannelInfo(ContinuousData.ChannelInfo):\n type: str # Always set to “CC” for “Continuous Channels”\n bank: int # Physical system connector or module connected to the electrode (e.g. Front-End Bank A, B, C,\n # D are 1, 2, 3, 4).\n pin: int # Physical system connector pin or channel connected to the electrode (e.g. 1-37 on bank A, B, C, D).\n min_digital_value: int # Minimum digital value of the signal (e.g. -8192).\n max_digital_value: int # Maximum digital value of the signal (e.g. 8192).\n min_analog_value: int # Minimum analog value of the signal (e.g. -5000 mV).\n max_analog_value: int # Maximum analog value of the signal (e.g. 5000 mV).\n high_freq_order: int # Order of the filter used for high frequency cutoff: 0 = NONE\n high_filter_type: str # Type of filter used for high frequency cutoff: 0 = NONE, 1 = Butterworth\n low_freq_order: int # Order of the filter used for high frequency cutoff: 0 = NONE\n low_filter_type: str # Type of filter used for high frequency cutoff: 0 = NONE, 1 = Butterworth\n\n def __init__(self, data: bytes):\n self.type = data[0:2].decode()\n self.electrode_id = int.from_bytes(data[2:4], byteorder='little', signed=False)\n self.label = data[4:20].rstrip(b'\\x00').decode()\n self.bank = int(data[20])\n self.pin = int(data[21])\n self.min_digital_value = int.from_bytes(data[22:24], byteorder='little', signed=True)\n self.max_digital_value = int.from_bytes(data[24:26], byteorder='little', signed=True)\n self.min_analog_value = int.from_bytes(data[26:28], byteorder='little', signed=True)\n self.max_analog_value = int.from_bytes(data[28:30], byteorder='little', signed=True)\n self.conversion_factor = self.max_analog_value / self.max_digital_value\n self.analog_units = data[30:46].rstrip(b'\\x00').decode()\n self.high_freq_cutoff = int.from_bytes(data[46:50], byteorder='little', signed=False) / 1000\n self.high_freq_order = int.from_bytes(data[50:54], byteorder='little', signed=False)\n self.high_filter_type = 'Butterworth' if int.from_bytes(data[54:56], byteorder='little',\n signed=False) > 0 else 'None'\n self.low_freq_cutoff = int.from_bytes(data[56:60], byteorder='little', signed=False) / 1000\n self.low_freq_order = int.from_bytes(data[60:64], byteorder='little', signed=False)\n self.low_filter_type = 'Butterworth' if int.from_bytes(data[64:66], byteorder='little',\n signed=False) > 0 else 'None'\n\n def __init__(self, file: str, data: np.ndarray, sample_rate: float, channels: Sequence[int], electrodes: Sequence[int], channels_info: Sequence[ChannelInfo], time_origin: datetime, packet_index: int = 0, packet_sample_offset: int = 0):\n self.file = file\n self.data = data\n self.sample_rate = sample_rate\n self.channels = channels\n self.electrodes = electrodes\n self.channels_info = channels_info\n self.packet_index = packet_index\n self.packet_sample_offset = packet_sample_offset\n if packet_sample_offset > 0:\n self.time_origin = time_origin + timedelta(seconds=packet_sample_offset/sample_rate)\n else:\n self.time_origin = time_origin\n self.n_samples = data.shape[0]\n self.n_channels = data.shape[1]\n\n @staticmethod\n def fromfile(file: str, electrodes: Sequence[int] = None, channels: Sequence[int] = None, n_samples: int = 0xFFFFFFFF, packet_mode='last'):\n \"\"\"Read header + continuous data from NSx file.\n\n :param file: path to .NSx file\n :param electrodes: list of electrode IDs to read frsom. Used to search in __NSxHeader.channels_info.id (default None reads all electrodes). This takes priority over 'channels'\n :param channels: list of (0-based) channel indices to read from. Range from 0 to N-1, where N is number of recorded channels in file. (default None reads all channels)\n :param n_samples: number of samples to read. (default/max value: 0xFFFFFFFF)\n :param packet_mode: read 'first', 'last', or 'all', only used in case multiple data packets are in one NSx file\n :return: BlackrockContinuousData object, or list[BlackrockContinuousData] if packet_mode is 'all'\n \"\"\"\n if n_samples > 0xFFFFFFFF:\n warnings.warn(f\"Parameter n_samples ({n_samples:x}) is capped at {0xFFFFFFFF:x}\")\n n_samples = 0xFFFFFFFF\n\n file_type_id, bytes_in_header, sample_rate, time_origin, n_channels_in_file, channels_info = BlackrockContinuousData._read_header(file)\n\n channels, electrodes, channels_info = BlackrockContinuousData._validate_sel_channels(channels, electrodes, n_channels_in_file, channels_info)\n\n packet_index = -1\n packet_sample_offset = 0\n packet_n_samples = 0\n prev_packet_n_samples = 0\n packet_start_pos = 0\n\n # Read all packets\n if packet_mode == 'all':\n n_samples_read = 0\n is_eof = False\n packet_end_pos = bytes_in_header\n cd_list = []\n while not is_eof and n_samples_read < n_samples:\n # Scan size of next data packet\n packet_index += 1\n is_eof, packet_start_pos, packet_n_samples, packet_end_pos, packet_sample_offset = \\\n BlackrockContinuousData._scan_next_packet(file, file_type_id, n_channels_in_file, packet_end_pos)\n\n # Timing correction required\n if packet_index > 0 and packet_sample_offset == 0:\n time_origin += timedelta(seconds=prev_packet_n_samples/sample_rate)\n prev_packet_n_samples = packet_n_samples\n\n # Read next data packet\n n_samples_to_read_from_packet = min(n_samples - n_samples_read, packet_n_samples)\n data = BlackrockContinuousData._read_next_packet(file, packet_start_pos, channels,\n n_samples_to_read_from_packet, n_channels_in_file)\n n_samples_read -= n_samples_to_read_from_packet\n cd_list.append(BlackrockContinuousData(file, data, sample_rate, channels, electrodes, channels_info,\n time_origin, packet_index, packet_sample_offset))\n return cd_list\n # Only read first packet\n elif packet_mode == 'first':\n packet_index += 1\n _, packet_start_pos, packet_n_samples, _, packet_sample_offset = \\\n BlackrockContinuousData._scan_next_packet(file, file_type_id, n_channels_in_file, bytes_in_header)\n # Only read last packet (this is what makes sense for my 2-rig recording setup)\n elif packet_mode == 'last':\n is_eof = False\n packet_end_pos = bytes_in_header\n while not is_eof:\n packet_index += 1\n is_eof, packet_start_pos, packet_n_samples, packet_end_pos, packet_sample_offset = \\\n BlackrockContinuousData._scan_next_packet(file, file_type_id, n_channels_in_file, packet_end_pos)\n\n # Timing correction required\n if packet_index > 0 and packet_sample_offset == 0:\n time_origin += timedelta(seconds=prev_packet_n_samples/sample_rate)\n prev_packet_n_samples = packet_n_samples\n\n else:\n raise ValueError(f\"Unknown packet_mode '{packet_mode}', only 'first', 'last', and 'all' are supported.\")\n\n # Below: only executed when packet_mode is 'first' or 'last'\n if n_samples > packet_n_samples:\n warnings.warn(f\"Requested {n_samples} samples but the {packet_mode} data packet only contains {packet_n_samples}.\")\n n_samples = min(n_samples, packet_n_samples)\n data = BlackrockContinuousData._read_next_packet(file, packet_start_pos, channels, n_samples, n_channels_in_file)\n\n cd = BlackrockContinuousData(file, data, sample_rate, channels, electrodes, channels_info, time_origin, packet_index, packet_sample_offset)\n return cd\n\n @staticmethod\n def _read_header(file):\n \"\"\"\n :return:\n file_type_id: str, Always set to “BRSMPGRP” for “Neural Continuous Data”. Note: In prior versions of the file, this field was set to “NEURALSG” or “NEURALCD”.\n bytes_in_headers: int, The total number of bytes in both headers (Standard and Extended). This value can also be considered to be a zeroindexed pointer to the first data packet.\n label: str, Label of the sampling group e.g. “1 kS/s” or “LFP Low”. Must be ’0’ terminated.\n sample_rate: float, Sampling rate in Hz\n time_origin: datetime, Windows UTC time at start of acquisition, corresponds to timestamp = 0.\n n_channels: int, Number of channels per data point.\n channels_info = []\n \"\"\"\n with open(file, mode='rb') as file:\n file_type_id = file.read(8).decode()\n file.seek(2, os.SEEK_CUR) # file_spec = ord(file.read(1)) + 0.1 * ord(file.read(1))\n bytes_in_header = int.from_bytes(file.read(4), byteorder='little', signed=False)\n file.seek(16, os.SEEK_CUR) # label = file.read(16).rstrip(b'\\x00').decode()\n file.seek(256, os.SEEK_CUR) # Comments\n period = int.from_bytes(file.read(4), byteorder='little', signed=False)\n assert period == 1\n sample_rate = int.from_bytes(file.read(4), byteorder='little', signed=False) / period\n\n # Convert file-start windows-time (UTC) to datetime object (UTC)\n time_origin = file.read(16)\n yy = int.from_bytes(time_origin[0:2], byteorder='little', signed=False)\n mo = int.from_bytes(time_origin[2:4], byteorder='little', signed=False)\n dd = int.from_bytes(time_origin[6:8], byteorder='little', signed=False)\n hh = int.from_bytes(time_origin[8:10], byteorder='little', signed=False)\n mm = int.from_bytes(time_origin[10:12], byteorder='little', signed=False)\n ss = int.from_bytes(time_origin[12:14], byteorder='little', signed=False)\n us = int.from_bytes(time_origin[14:16], byteorder='little', signed=False) * 1000\n time_origin = datetime(yy, mo, dd, hh, mm, ss, us, tzinfo=timezone.utc)\n\n n_channels = int.from_bytes(file.read(4), byteorder='little', signed=False)\n channels_info = [BlackrockContinuousData.ChannelInfo(file.read(66)) for _ in range(0, n_channels)]\n if file_type_id == 'NEURALSG':\n raise ValueError(f\"Blackrock NSx file type '{file_type_id}' (2.1) is not supported. Must be 'NEURALCD' or 'BRSMPGRP'\")\n\n return file_type_id, bytes_in_header, sample_rate, time_origin, n_channels, channels_info\n\n @staticmethod\n def _scan_next_packet(file, file_type_id: str, n_channels: int, offset: int) -> (bool, int):\n \"\"\"\n :return:\n packet_end_is_eof: True if this packet ends at the end of file\n packet_end_pos: use this as base for scanning next packet\n packet_sample_offset: this / sample_rate + header.time_origin is packet start time\n \"\"\"\n with open(file, mode='rb') as file:\n file.seek(offset, os.SEEK_SET)\n\n # Each packet starts with a 0x01\n packet_header = file.read(1)\n if packet_header != b'\\x01':\n raise ValueError(f\"Blackrock data packets should always start with 0x01, got {packet_header.hex()} instead\")\n\n if file_type_id == 'NEURALCD':\n packet_sample_offset = int.from_bytes(file.read(4), byteorder='little', signed=False)\n elif file_type_id == 'BRSMPGRP':\n packet_sample_offset = int.from_bytes(file.read(8), byteorder='little', signed=False)\n else:\n raise ValueError(f\"Blackrock NSx file type '{file_type_id}' is not supported. Must be 'NEURALCD' or 'BRSMPGRP'\")\n\n packet_n_samples = int.from_bytes(file.read(4), byteorder='little', signed=False)\n packet_start_pos = file.tell()\n packet_end_pos = file.seek(2 * packet_n_samples * n_channels, os.SEEK_CUR)\n file_end_pos = file.seek(0, os.SEEK_END)\n packet_end_is_eof = packet_end_pos == file_end_pos\n\n return packet_end_is_eof, packet_start_pos, packet_n_samples, packet_end_pos, packet_sample_offset\n\n @staticmethod\n def _read_next_packet(file, packet_start, channels: Sequence[int], n_samples: int, n_channels_in_file: int):\n with open(file, mode='rb') as file:\n file.seek(packet_start, os.SEEK_SET)\n\n # Simple case, read all channels\n n_channels = len(channels)\n if n_channels == n_channels_in_file:\n data = np.fromfile(file, dtype=np.int16, count=n_channels * n_samples)\n data = np.reshape(data, (n_samples, n_channels), order='C')\n # Read subset of channels\n else:\n range_samples = range(n_samples)\n data = np.ndarray((n_samples, n_channels), dtype=np.int16)\n\n cur_chn = 0 # Absolute channel index from file\n for i_sample in range_samples:\n i = 0 # Consolidated channel index\n while i < len(channels):\n file.seek(2*(channels[i] - cur_chn), os.SEEK_CUR)\n data[i_sample, i] = np.frombuffer(file.read(2), dtype=np.int16) # Should default to little-endian order\n cur_chn = channels[i] + 1\n i += 1\n file.seek(2 * (n_channels_in_file - cur_chn), os.SEEK_CUR)\n cur_chn = 0\n return data\n\n @staticmethod\n def _validate_sel_channels(channels, electrodes, n_channels_in_file: int, channels_info: list[ChannelInfo]):\n # Determine which channels to read\n # Select via electrode ID\n if electrodes is not None and electrodes:\n sel_electrodes = [*electrodes].copy()\n electrodes_in_file = [ci.electrode_id for ci in channels_info]\n electrodes = [e for e in electrodes if e in electrodes_in_file]\n channels = [electrodes_in_file.index(e) for e in electrodes]\n if not channels:\n raise ValueError(f'None of the specified electrodes are in file {[*sel_electrodes]}.')\n # Select via channel index (0-based)\n elif channels is not None and channels:\n sel_channels = [*channels].copy()\n channels = [c for c in channels if c < n_channels_in_file]\n electrodes = [channels_info[c].electrode_id for c in channels]\n if not channels:\n raise ValueError(f'None of the specified channels are in file {[*sel_channels]}.')\n # Read all channels (no selection criteria provided)\n else:\n channels = [*range(n_channels_in_file)]\n electrodes = [channels_info[c].electrode_id for c in channels]\n\n trimmed_channels_info = [channels_info[c] for c in channels]\n\n return channels, electrodes, trimmed_channels_info\n" ]
[ [ "numpy.empty", "numpy.asarray", "scipy.signal.sosfilt", "numpy.reshape", "scipy.interpolate.splrep", "numpy.rint", "scipy.signal.butter", "numpy.ndarray", "numpy.arange", "numpy.fromfile", "scipy.interpolate.splev", "numpy.convolve" ] ]
JesseAllardice/Action-Recognition
[ "7b3149d5083ea655a71af62555dca8321b0d078e" ]
[ "predictors/posepredictor.py" ]
[ "\"\"\"\ninherients from predictor\n\"\"\"\n# Standard packages\nimport time\nfrom collections import deque\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\n# unique modules\nfrom predictors.predictor import Predictor\n\nclass PosePredictor(Predictor):\n \"\"\"\n Instantisation methods\n \"\"\"\n def __init__(self, num_threads=8):\n if tf.__version__ < '2.3.1':\n raise Exception(\"Tensorflow 2.3.1 or greater is needed for multi-tread\")\n self.model_path = 'models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite'\n self.output_stride = 32\n self.num_threads = num_threads\n # load TFLite model and allocate tensors.\n self.interpreter = tf.lite.Interpreter(\n model_path=self.model_path,\n num_threads=num_threads,\n )\n # allocate tensors\n self.interpreter.allocate_tensors()\n # Get input and output tensors.\n self.input_details = self.interpreter.get_input_details()\n self.output_details = self.interpreter.get_output_details()\n # check the type of the input tensor\n self.floating_model = self.input_details[0]['dtype'] == np.float32\n # NxHxWxC, H:1, W:2\n self.input_height = self.input_details[0]['shape'][1]\n self.input_width = self.input_details[0]['shape'][2]\n # inputs\n self.input_data = None\n # outputs\n self.output_data = None\n self.offset_data = None\n # image specfics\n self.image = None\n self.image_height = None\n self.image_width = None\n # initialise coordinates\n self.model_positions = None\n self.image_positions = None\n\n \"\"\"\n Inheriteted abstract methods\n \"\"\"\n def predict(self, data: deque) -> np.ndarray:\n # extract the image data and the input data\n self.set_image_and_input(data)\n # invoke model\n self.invoke_pose_prediction(self.input_data)\n # calculate the model and image positions\n self.calculate_coordinates()\n return self.image_positions\n\n # def transform(self): pass\n\n # def fit(self): pass\n\n # def fit_transform(self): pass\n\n \"\"\"\n Methods\n \"\"\"\n def set_image_and_input(self, data: deque):\n # use only the most recent image\n image = data[-1]\n # set the image parameters, image, h and w\n self.image = image\n self.image_height, self.image_width, _ = image.shape\n # extract the model's image size\n model_img_size = (self.input_height, self.input_width)\n # downsample the image to the size for the model input\n image = cv2.resize(image, model_img_size, interpolation=cv2.INTER_LINEAR)\n # add N dim\n input_data = np.expand_dims(image, axis=0)\n # rescale to [-1,1)\n if self.floating_model:\n input_data = (np.float32(input_data) - 127.5) / 127.5\n self.input_data = input_data\n\n def predict_on_random(self) -> np.ndarray:\n # set the image_shape\n self.image_height = self.input_details[0]['shape'][1]\n self.image_width = self.input_details[0]['shape'][2]\n # get the model's input shape\n input_shape = self.input_details[0][\"shape\"]\n # creat a matrix of the correct shape filled with random values\n self.image = np.array(np.random.random_sample(input_shape), dtype=np.float32)\n # creat a input matrix\n self.input_data = self.image # no reshaping required here\n # invoke model\n self.invoke_pose_prediction(self.input_data)\n # calculate the model and image positions\n self.calculate_coordinates()\n return self.image_positions\n\n def invoke_pose_prediction(self, input_data):\n # Set the tensors\n self.interpreter.set_tensor(self.input_details[0][\"index\"], input_data)\n # run inference\n #start_time = time.time()\n self.interpreter.invoke()\n #stop_time = time.time()\n #print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))\n # the function 'get_tensor()' returns a copy of the tensor data.\n # whereas, use 'tensor()' in order to get a pointer to the tensor.\n self.output_data = self.interpreter.get_tensor(self.output_details[0][\"index\"])\n self.offset_data = self.interpreter.get_tensor(self.output_details[1]['index'])\n\n def calculate_coordinates(self):\n # remove the first dimension\n output_results = np.squeeze(self.output_data)\n offset_results = np.squeeze(self.offset_data)\n # set the stride value\n output_stride = self.output_stride\n # calculate the coordinates from the output and offset\n scores = self.sigmoid(output_results)\n num_keypoints = scores.shape[2]\n heatmap_positions = []\n offset_vectors = []\n confidences = []\n for ki in range(0, num_keypoints):\n x, y = np.unravel_index(np.argmax(scores[:, :, ki]), scores[:, :, ki].shape)\n confidences.append(scores[x, y, ki])\n offset_vector = (offset_results[x, y, ki], offset_results[x, y, num_keypoints + ki])\n heatmap_positions.append((x, y))\n offset_vectors.append(offset_vector)\n model_positions = np.add(np.array(heatmap_positions) * output_stride, offset_vectors)\n self.model_positions = model_positions\n self.image_positions = self.model_to_image_positions(model_positions)\n\n def model_to_image_positions(self, model_positions: np.ndarray) -> np.ndarray:\n scaling_x = self.image_height / self.input_height\n scaling_y = self.image_width / self.input_width\n scaling_matric = np.diag([scaling_x, scaling_y])\n image_positions = model_positions @ scaling_matric\n return image_positions\n\n \"\"\"\n Get Methods\n \"\"\"\n def get_model_positions(self):\n if self.model_positions is None:\n raise Exception(\"model_positions not calculated yet\")\n return self.model_positions\n\n def get_image_positions(self):\n if self.image_positions is None:\n raise Exception(\"image_positions not calculated yet\")\n return self.image_positions\n\n def get_input_data(self):\n if self.input_data is None:\n raise Exception(\"input_data not initialised yet\")\n return self.input_data\n\n \"\"\"\n Static Methods\n \"\"\"\n @staticmethod\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef main():\n posepredictor = PosePredictor()\n image_positions = posepredictor.predict_on_random()\n print(image_positions)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array", "numpy.squeeze", "tensorflow.lite.Interpreter", "numpy.exp", "numpy.random.random_sample", "numpy.float32", "numpy.argmax", "numpy.diag", "numpy.expand_dims" ] ]
jacke121/HRNet-Facial-Landmark-Detection
[ "6d29324ce8bf203518bb8e92d1df919145a7063c" ]
[ "test.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Created by Tianheng Cheng([email protected])\n# ------------------------------------------------------------------------------\n\nimport os\nimport pprint\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nimport sys\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\nimport lib.models as models\nfrom lib.config import config, update_config\nfrom lib.utils import utils\nfrom lib.datasets import get_dataset\nfrom lib.core import function\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description='Train Face Alignment')\n\n parser.add_argument('--cfg', help='experiment configuration filename',\n default='./experiments/wflw/face_alignment_wflw_hrnet_w18.yaml', type=str)\n parser.add_argument('--model-file', help='model parameters', default=\"./HR18-WFLW.pth\", type=str)\n # parser.add_argument('--model-file', help='model parameters', default=\"../hrnetv2_w18_imagenet_pretrained.pth\", type=str)\n\n args = parser.parse_args()\n update_config(config, args)\n return args\n\n\ndef main():\n\n args = parse_args()\n\n logger, final_output_dir, tb_log_dir = \\\n utils.create_logger(config, args.cfg, 'test')\n\n logger.info(pprint.pformat(args))\n logger.info(pprint.pformat(config))\n\n cudnn.benchmark = config.CUDNN.BENCHMARK\n cudnn.determinstic = config.CUDNN.DETERMINISTIC\n cudnn.enabled = config.CUDNN.ENABLED\n\n config.defrost()\n config.MODEL.INIT_WEIGHTS = False\n config.freeze()\n model = models.get_face_alignment_net(config)\n\n gpus = list(config.GPUS)\n model = nn.DataParallel(model, device_ids=gpus).cuda()\n\n # load model\n state_dict = torch.load(args.model_file)\n if 'state_dict' in state_dict.keys():\n state_dict = state_dict['state_dict']\n model.module.load_state_dict(state_dict)\n\n dataset_type = get_dataset(config)\n\n test_loader = DataLoader(\n dataset=dataset_type(config,\n is_train=False),\n batch_size=1,\n shuffle=False,\n num_workers=config.WORKERS,\n pin_memory=config.PIN_MEMORY\n )\n\n nme, predictions = function.inference(config, test_loader, model)\n\n torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "torch.load", "torch.nn.DataParallel" ] ]
KiLJ4EdeN/torchxrayvision
[ "18985291b217d51bd7d46c8a0dc069a78a82755e" ]
[ "scripts/process_image.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\nimport os,sys\nsys.path.insert(0,\"..\")\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport skimage, skimage.io\nimport pprint\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision, torchvision.transforms\n\nimport torchxrayvision as xrv\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', type=str, default=\"\", help='')\nparser.add_argument('img_path', type=str)\nparser.add_argument('-weights', type=str,default=\"all\")\nparser.add_argument('-feats', default=False, help='', action='store_true')\nparser.add_argument('-cuda', default=False, help='', action='store_true')\n\ncfg = parser.parse_args()\n\n\nimg = skimage.io.imread(cfg.img_path)\nimg = xrv.datasets.normalize(img, 255) \n\n# Check that images are 2D arrays\nif len(img.shape) > 2:\n img = img[:, :, 0]\nif len(img.shape) < 2:\n print(\"error, dimension lower than 2 for image\")\n\n# Add color channel\nimg = img[None, :, :]\n\ntransform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),\n xrv.datasets.XRayResizer(224)])\n\nimg = transform(img)\n\n\nmodel = xrv.models.DenseNet(weights=cfg.weights)\n\noutput = {}\nwith torch.no_grad():\n img = torch.from_numpy(img).unsqueeze(0)\n if cfg.cuda:\n img = img.cuda()\n model = model.cuda()\n \n if cfg.feats:\n feats = model.features(img)\n feats = F.relu(feats, inplace=True)\n feats = F.adaptive_avg_pool2d(feats, (1, 1))\n output[\"feats\"] = list(feats.cpu().detach().numpy().reshape(-1))\n\n preds = model(img).cpu()\n output[\"preds\"] = dict(zip(xrv.datasets.default_pathologies,preds[0].detach().numpy()))\n \nif cfg.feats:\n print(output)\nelse:\n pprint.pprint(output)\n \n \n" ]
[ [ "torch.nn.functional.relu", "torch.no_grad", "torch.nn.functional.adaptive_avg_pool2d", "torch.from_numpy" ] ]
endrizzimarco/trading-bot
[ "3df3f32db5cd1ff23d9ad45a994fdc1e37f58c8b" ]
[ "src/plot.py" ]
[ "import matplotlib.pyplot as plt\nimport mplfinance as mpf\nfrom endpoints.instrument import Instrument\nfrom indicators import *\n\n\ndf = Instrument(\n \"EUR_USD\", params={\"from\": \"2020-9-10\", \"to\": \"2021-01-01\", \"granularity\": \"H4\"}\n).df\n\ndf[\"tenkanSen\"] = tenkanSen(df)\ndf[\"kijunSen\"] = kijunSen(df)\ndf[\"senkouSpanA\"] = senkouSpanA(df)\ndf[\"senkouSpanB\"] = senkouSpanB(df)\ndf[\"chikouSpan\"] = chikouSpan(df)\n\nindicator = df.drop([\"Volume\", \"Open\", \"Close\", \"High\", \"Volume\", \"Low\"], axis=1)\n\nmpf.plot(\n df,\n type=\"candle\",\n style=\"charles\",\n title=\"EURUSD STUFF\",\n ylabel=\"Price ($)\",\n addplot=mpf.make_addplot(indicator),\n)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
vanshhhhh/federated
[ "20fdca66d01051c55413868310d60c068c84b35d" ]
[ "tensorflow_federated/python/tests/map_reduce_form_test.py" ]
[ "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\n\ndef construct_example_training_comp():\n \"\"\"Constructs a `tff.templates.IterativeProcess` via the FL API.\"\"\"\n np.random.seed(0)\n\n input_spec = collections.OrderedDict(\n x=tf.TensorSpec(shape=[None, 2], dtype=tf.float32),\n y=tf.TensorSpec(shape=[None, 1], dtype=tf.int32))\n\n def model_fn():\n \"\"\"Constructs keras model.\"\"\"\n keras_model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(\n 1,\n activation=tf.nn.softmax,\n kernel_initializer='zeros',\n input_shape=(2,))\n ])\n\n return tff.learning.from_keras_model(\n keras_model,\n input_spec=input_spec,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])\n\n return tff.learning.build_federated_averaging_process(\n model_fn,\n client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.01))\n\n\nclass MapReduceFormTest(tff.test.TestCase):\n\n def test_map_reduce_form_with_learning_structure_contains_only_one_broadcast_and_one_aggregate(\n self):\n ip = construct_example_training_comp()\n\n cf = tff.backends.mapreduce.get_map_reduce_form_for_iterative_process(ip)\n\n # This type spec test actually carries the meaning that TFF's vanilla path\n # to canonical form will broadcast and aggregate exactly one copy of the\n # parameters. So the type test below in fact functions as a regression test\n # for the TFF compiler pipeline.\n # pyformat: disable\n self.assertEqual(\n '(<\\n'\n ' <\\n'\n ' x=float32[?,2],\\n'\n ' y=int32[?,1]\\n'\n ' >*,\\n'\n ' <\\n'\n ' <\\n'\n ' trainable=<\\n'\n ' float32[2,1],\\n'\n ' float32[1]\\n'\n ' >,\\n'\n ' non_trainable=<>\\n'\n ' >\\n'\n ' >\\n'\n '> -> <\\n'\n ' <\\n'\n ' <\\n'\n ' float32[2,1],\\n'\n ' float32[1]\\n'\n ' >,\\n'\n ' float32,\\n'\n ' <\\n'\n ' sparse_categorical_accuracy=<\\n'\n ' float32,\\n'\n ' float32\\n'\n ' >,\\n'\n ' loss=<\\n'\n ' float32,\\n'\n ' float32\\n'\n ' >\\n'\n ' >,\\n'\n ' <\\n'\n ' num_examples=int64\\n'\n ' >\\n'\n ' >,\\n'\n ' <>,\\n'\n ' <>,\\n'\n ' <>\\n'\n '>)',\n cf.work.type_signature.formatted_representation())\n # pyformat: enable\n\n def test_map_reduce_form_with_learning_structure_does_not_change_execution_of_iterative_process(\n self):\n if tf.config.list_logical_devices('GPU'):\n self.skipTest(\n 'b/137602785: bring GPU test back after the fix for `wrap_function`')\n ip_1 = construct_example_training_comp()\n # We disable Grappler to prevent a single TF function from being pulled into\n # the eager TF runtime with multiple definitions.\n grappler_config = tf.compat.v1.ConfigProto()\n grappler_config.graph_options.rewrite_options.disable_meta_optimizer = True\n cf = tff.backends.mapreduce.get_map_reduce_form_for_iterative_process(\n ip_1, grappler_config=grappler_config)\n ip_2 = tff.backends.mapreduce.get_iterative_process_for_map_reduce_form(cf)\n\n ip_1.initialize.type_signature.check_equivalent_to(\n ip_2.initialize.type_signature)\n # The next functions type_signatures may not be equal, since we may have\n # appended an empty tuple as client side-channel outputs if none existed.\n ip_1.next.type_signature.parameter.check_equivalent_to(\n ip_2.next.type_signature.parameter)\n ip_1.next.type_signature.result.check_equivalent_to(\n ip_2.next.type_signature.result)\n\n sample_batch = collections.OrderedDict(\n x=np.array([[1., 1.]], dtype=np.float32),\n y=np.array([[0]], dtype=np.int32),\n )\n client_data = [sample_batch]\n state_1 = ip_1.initialize()\n server_state_1, server_output_1 = ip_1.next(state_1, [client_data])\n server_state_1 = tff.structure.from_container(\n server_state_1, recursive=True)\n server_output_1 = tff.structure.from_container(\n server_output_1, recursive=True)\n server_state_1_arrays = tff.structure.flatten(server_state_1)\n server_output_1_arrays = tff.structure.flatten(server_output_1)\n state_2 = ip_2.initialize()\n server_state_2, server_output_2 = ip_2.next(state_2, [client_data])\n server_state_2_arrays = tff.structure.flatten(server_state_2)\n server_output_2_arrays = tff.structure.flatten(server_output_2)\n\n self.assertEmpty(server_state_1.model_broadcast_state)\n # Note that we cannot simply use assertEqual because the values may differ\n # due to floating point issues.\n self.assertTrue(\n tff.structure.is_same_structure(server_state_1, server_state_2))\n self.assertTrue(\n tff.structure.is_same_structure(server_output_1, server_output_2))\n self.assertAllClose(server_state_1_arrays, server_state_2_arrays)\n self.assertAllClose(server_output_1_arrays[:2], server_output_2_arrays[:2])\n\n\nif __name__ == '__main__':\n tff.backends.test.set_test_execution_context()\n tff.test.main()\n" ]
[ [ "tensorflow.TensorSpec", "tensorflow.keras.optimizers.SGD", "numpy.array", "numpy.random.seed", "tensorflow.compat.v1.ConfigProto", "tensorflow.config.list_logical_devices", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.metrics.SparseCategoricalAccuracy" ] ]
ku2482/rltorch
[ "7819af49d95bfa268e00413a7606564b0e7286a7" ]
[ "rltorch/agent/sac_discrete/actor.py" ]
[ "import os\nfrom time import time\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import SacDiscreteAgent\nfrom rltorch.memory import DummyMultiStepMemory, DummyPrioritizedMemory\nfrom rltorch.policy import ConvCategoricalPolicy\nfrom rltorch.q_function import TwinedDiscreteConvQNetwork\nfrom rltorch.agent import to_batch, hard_update\n\n\nclass SacDiscreteActor(SacDiscreteAgent):\n space_size = 65\n\n def __init__(self, env, log_dir, shared_memory, shared_weights,\n actor_id, num_actors=1, memory_size=1e4, gamma=0.99,\n multi_step=3, per=True, alpha=0.6, beta=0.4,\n beta_annealing=0.001, start_steps=10000, log_interval=10,\n memory_save_interval=5, model_load_interval=5, cuda=True,\n seed=0):\n\n self.actor_id = actor_id\n self.env = env\n torch.manual_seed(seed)\n np.random.seed(seed)\n self.env.seed(seed)\n\n self.shared_memory = shared_memory\n self.shared_weights = shared_weights\n\n self.device = torch.device(\n \"cuda\" if cuda and torch.cuda.is_available() else \"cpu\")\n\n self.policy = ConvCategoricalPolicy(\n self.env.observation_space.shape[0],\n self.env.action_space.n).to(self.device).eval()\n self.critic = TwinedDiscreteConvQNetwork(\n self.env.observation_space.shape[0],\n self.env.action_space.n).to(self.device).eval()\n self.critic_target = TwinedDiscreteConvQNetwork(\n self.env.observation_space.shape[0],\n self.env.action_space.n).to(self.device).eval()\n hard_update(self.critic_target, self.critic)\n\n if per:\n self.memory = DummyPrioritizedMemory(\n memory_size, self.env.observation_space.shape,\n (1,), self.device, gamma, multi_step,\n alpha=alpha, beta=beta, beta_annealing=beta_annealing)\n else:\n self.memory = DummyMultiStepMemory(\n memory_size, self.env.observation_space.shape,\n (1,), self.device, gamma, multi_step)\n\n self.log_dir = log_dir\n self.summary_dir = os.path.join(\n log_dir, 'summary', f'actor-{self.actor_id}')\n if not os.path.exists(self.summary_dir):\n os.makedirs(self.summary_dir)\n self.writer = SummaryWriter(log_dir=self.summary_dir)\n\n self.episodes = 0\n self.steps = 0\n self.per = per\n self.multi_step = multi_step\n self.start_steps = start_steps\n self.gamma_n = gamma ** multi_step\n self.log_interval = log_interval\n self.memory_save_interval = memory_save_interval\n self.model_load_interval = model_load_interval\n\n load = False\n while load is False:\n load = self.load_weights()\n\n def run(self):\n self.time = time()\n while True:\n self.episodes += 1\n self.act_episode()\n self.interval()\n\n def act_episode(self):\n episode_reward = 0.\n episode_steps = 0\n done = False\n state = self.env.reset()\n\n while not done:\n action = self.act(state)\n next_state, reward, done, _ = self.env.step(action)\n self.steps += 1\n episode_steps += 1\n episode_reward += reward\n\n if episode_steps >= self.env._max_episode_steps:\n masked_done = False\n else:\n masked_done = done\n\n clipped_reward = max(min(reward, 1.0), -1.0)\n\n if self.per:\n batch = to_batch(\n state, action, clipped_reward,\n next_state, masked_done, self.device)\n with torch.no_grad():\n curr_q1, curr_q2 = self.calc_current_q(*batch)\n target_q = self.calc_target_q(*batch)\n error = torch.abs(curr_q1 - target_q).item()\n\n self.memory.append(\n state, action, clipped_reward, next_state,\n masked_done, error, episode_done=done)\n else:\n self.memory.append(\n state, action, clipped_reward, next_state, masked_done,\n episode_done=done)\n\n state = next_state\n\n if self.episodes % self.log_interval == 0:\n self.writer.add_scalar(\n 'reward/train', episode_reward, self.steps)\n\n now = time()\n print(' '*self.space_size,\n f'Actor {self.actor_id:<2} '\n f'episode: {self.episodes:<4} '\n f'episode steps: {episode_steps:<4} '\n f'reward: {episode_reward:<5.1f} '\n f'time: {now - self.time:3.3f}')\n self.time = now\n\n def interval(self):\n if self.episodes % self.model_load_interval == 0:\n self.load_weights()\n if self.episodes % self.memory_save_interval == 0:\n self.save_memory()\n" ]
[ [ "numpy.random.seed", "torch.no_grad", "torch.manual_seed", "torch.abs", "torch.cuda.is_available", "torch.utils.tensorboard.SummaryWriter" ] ]
Astech34/pymms
[ "165925d3641090c7a8487ec63ef9fc70d47cb6e9" ]
[ "pymms/sql/data_export_sql.py" ]
[ "import numpy as np\nimport datetime as dt\nimport spacepy\nfrom spacepy import pycdf\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport os.path\nimport pymms\nfrom pymms import mms_utils\nimport pdb\n\n## Creating the pymms object\n# First, we create an instance of the object that communicates with the SDC. For the sake of this\n# example, we will start with data from `2015-10-16` because there are several magnetopause crossings\n# and a well-studied electron diffusion region event. Also, for simplicity we will work with data from\n# the MMS1 spacecraft. Data from other spacecraft can be loaded by changing the `sc` property\n# below to `'mms2'`, `'mms3'`, or `'mms4'`.\n\n# Create an instance of SDC object\nsdc = pymms.MrMMS_SDC_API()\n\ndef data_export(spacecraft, level, start_date, end_date):\n # Define the spacecraft. We will use the variable later when accessing the CDF files.\n sc = spacecraft\n #level = 'sitl' # 'l2' or 'sitl'\n level = level\n start_date = start_date\n end_date = end_date\n data_root = os.path.expanduser('/data/colin/mms/')\n # Specifying data_root=\"~/\" does not expand the tilde yet\n # However, if data_root=None, then ~/data is the default\n \n # Set attributes\n sdc.sc = sc\n sdc.start_date = start_date\n sdc.end_date = end_date\n sdc.data_root = data_root\n \n ## Working with CDF Files\n # CDF files are somewhat like netCDF or HDF5 files in that the contain data as well as metadata.\n # Data is associated with variable names and variable metadata, or variable attributes. The file itself\n # has metadata in the form of global attributes. For our purpose, we are interested in determining the\n # variable names, what they mean, then selecting the subset of variables that are relevant to us. To do that,\n # we will need to download an MMS CDF data file and make use of pycdf from the spacepy package.\n \n # Downloading an MMS CDF File\n # Here, we will give a brief example of how to download a CDF file using the `pymms` package. We pick a\n # file from the fluxgate magnetometer (FGM) team containing magnetic field data. For demonstration purposes,\n # we select a science-quality data file (`level='l2'`) when the instrument was sampling in survey\n # mode (`mode='srvy'`). [Since the Geocentric Solar Ecliptic](https://sscweb.gsfc.nasa.gov/users_guide/Appendix_C.html)\n # (GSE) coordinate system is the standard for MMS data, we will extract non-scalar data in this system.\n \n \n # First, define variables, as they will be used in creating variable names\n fgm_mode = 'srvy'\n fgm_coords = 'gse'\n fgm_instr = 'fgm'\n fgm_level = 'l2'\n \n # Set object properties and get data\n sdc.instr = fgm_instr\n sdc.mode = fgm_mode\n sdc.level = fgm_level\n files = sdc.Download()\n \n print('FGM Files:')\n print(*files, sep='\\n')\n \n \n ## CDF Attributes and Variables\n \n # In order to access data in a CDF file, it is necessary to know the names of the variables contained within. Often,\n # it is also important to know additional information about the file contents or variable data. This metadata\n # is contained in the global and variable attributes.\n # The most important variable attributes are CATDESC, which describes the variable, FILLVAL, which gives the\n # value used for bad or missing data, and DEPEND_[0-3], which list the dependent variables of a data variable.\n # Typically, the dimensions of CDF variables are ordered as [nRecs, nDep1, nDep2, nDep3], where nRecs is the total\n # number of records, each record having dimensions [nDep1, nDep2, nDep3]. The value of DEPEND_0 is typically 'Epoch',\n # indicating that the CDF variable 'Epoch' is a dependency. The 'Epoch' variable contains a CDF Epoch time stamp at\n # each of the nRecs number of records. Similarly DEPEND_[1-3] variables point to other variables in the CDF file\n # that act as dependencies. If you want to plot a variable, you will need to also extract its 'DEPEND_0' variables.\n \n # Variables and attributes are described in more detail in the ISTP CDF Guidelines. Below, we demonstrate\n # how to obtain attribute and variable names and values.\n \n \n # Open the file and pr\n cdf = pycdf.CDF(files[0])\n \n # Show global attribute\n print('Global Attributes:')\n for gAttrName in cdf.attrs:\n print('\\t' + gAttrName)\n \n # Show variable names\n print('\\nVariable Names:')\n for varName in cdf:\n print('\\t' + varName)\n \n # Select the magnetic field variable\n vname = '_'.join((sc, fgm_instr, 'b', fgm_coords, fgm_mode, fgm_level))\n \n # Show variable attributes for a particular variable\n print('\\nVariable Attributes for \"' + vname + '\":')\n for vAttrName in cdf[vname].attrs:\n print('\\t' + vAttrName)\n \n # Important variable attributes:\n print('\\nValues of Important Variable Attributes:')\n print('\\t', 'CATDESC: ', cdf[vname].attrs['CATDESC'])\n print('\\t', 'FILLVAL: ', cdf[vname].attrs['FILLVAL'])\n print('\\t', 'DEPEND_0: ', cdf[vname].attrs['DEPEND_0'])\n \n \n ## FGM\n # The FGM dataset contains magnetic field data from the fluxgate magnetometer (FGM).\n # [Since the Geocentric Solar Ecliptic](https://sscweb.gsfc.nasa.gov/users_guide/Appendix_C.html) (GSE)\n # coordinate system is the standard for MMS data, we will extract non-scalar data in this system.\n \n # Download & Read Data\n # Now we can read data and its corresponding time stamps for a CDF variable. We choose the `'mms1_fgm_b_gse_srvy_l2'`\n # variable because, as shown above, its `CATDESC` attribute describes it as the magnetic field in GSE coordinates.\n # In order to be completely general, I will build the variable names from the attributes we have already defined.\n # Variable names have the convention of `sc_instr_param_coords_optdesc_mode_level`, where `param` describes the\n # quantity and `coords` is the coordinate system whenever relevant. Other components are similar to the file name\n # conventions.\n \n # Update instrument-specific variables\n fgm_mode = 'srvy'\n if level == 'sitl':\n fgm_coords = 'dmpa'\n fgm_instr = 'dfg'\n fgm_level = 'ql'\n else:\n fgm_coords = 'gse'\n fgm_instr = 'fgm'\n fgm_level = 'l2'\n \n # Set object properties\n sdc.instr = fgm_instr\n sdc.mode = fgm_mode\n sdc.level = fgm_level\n \n # Download data\n files = sdc.Download()\n files = mms_utils.sort_files(files)[0]\n \n # Read the magnetic field and its time stamps\n if level == 'l2':\n b_vname = '_'.join((sc, fgm_instr, 'b', fgm_coords, fgm_mode, fgm_level))\n else:\n b_vname = '_'.join((sc, fgm_instr, fgm_mode, fgm_coords))\n \n fgm_t = []\n fgm_b = []\n \n print('FGM Files:')\n for file in files:\n # Open the file\n cdf = pycdf.CDF(file)\n \n # Read the data\n # - Convert numpy arrays to lists to make appending easier\n fgm_t += list(cdf[cdf[b_vname].attrs['DEPEND_0']][:])\n fgm_b += list(cdf[b_vname][:])\n \n # Close the file\n cdf.close()\n print(' ' + file)\n \n # Convert back to numpy arrays\n fgm_t = np.array(fgm_t)\n fgm_b = np.array(fgm_b)\n \n # Compute clock and normal angles\n fgm_ca = np.rad2deg(np.arctan2(fgm_b[:,1], fgm_b[:,2]))\n fgm_tbn = np.rad2deg(np.arctan2(fgm_b[:,0], fgm_b[:,2]))\n \n ## Data Frame\n \n # Create a dictionary\n fgm_data = {\n 'Time' : fgm_t,\n 'Bx' : fgm_b[:,0],\n 'By' : fgm_b[:,1],\n 'Bz' : fgm_b[:,2],\n 'Bmag' : fgm_b[:,3],\n 'clock_angle' : fgm_ca,\n 'normal_angle' : fgm_tbn\n }\n \n # Convert dictionary to data from\n fgm_data = pd.DataFrame(fgm_data, columns=fgm_data.keys())\n \n \n ## EDP\n # Now for electric field and spacecraft potential data from the Electric Field Double Pobles (EDP).\n \n # Download & Read\n \n # Update instrument-specific variables\n edp_instr = 'edp'\n edp_mode = 'fast'\n edp_level = level\n dce_optdesc = 'dce'\n scpot_optdesc = 'scpot'\n \n if level == 'l2':\n edp_coords = 'gse'\n else:\n edp_coords = 'dsl'\n \n # EDP variable names\n e_vname = '_'.join((sc, 'edp', dce_optdesc, edp_coords, edp_mode, edp_level))\n scpot_vname = '_'.join((sc, 'edp', scpot_optdesc, edp_mode, edp_level))\n \n # Download DCE files\n sdc.instr = edp_instr\n sdc.mode = edp_mode\n sdc.level = edp_level\n sdc.optdesc = dce_optdesc\n dce_files = sdc.Download()\n dce_files = mms_utils.sort_files(dce_files)[0]\n \n # Download SCPOT files\n sdc.optdesc = scpot_optdesc\n scpot_files = sdc.Download()\n scpot_files = mms_utils.sort_files(scpot_files)[0]\n \n # Read the data\n edp_t = []\n edp_e = []\n edp_v = []\n print('EDP Files:')\n for ifile, file in enumerate(dce_files):\n # Open the file\n dce_cdf = pycdf.CDF(dce_files[ifile])\n scpot_cdf = pycdf.CDF(scpot_files[ifile])\n \n # Read data and replace fill value with NaN\n e = dce_cdf[e_vname][:]\n v = scpot_cdf[scpot_vname][:]\n e[e == dce_cdf[e_vname].attrs['FILLVAL']] = np.nan\n v[v == scpot_cdf[scpot_vname].attrs['FILLVAL']] = np.nan\n \n # Read the data\n # - Convert numpy arrays to lists to make appending easier\n edp_t += list(dce_cdf[dce_cdf[e_vname].attrs['DEPEND_0']][:])\n edp_e += list(e)\n edp_v += list(v)\n \n # Close the file\n dce_cdf.close()\n scpot_cdf.close()\n print(' ' + dce_files[ifile])\n print(' ' + scpot_files[ifile])\n \n # Convert back to numpy arrays\n edp_t = np.array(edp_t)\n edp_e = np.array(edp_e)\n edp_v = np.array(edp_v)\n \n \n ## Data Frame\n \n # Create a dictionary\n edp_data = {\n 'Time' : edp_t,\n 'Ex' : edp_e[:,0],\n 'Ey' : edp_e[:,1],\n 'Ez' : edp_e[:,2],\n 'scpot' : edp_v\n }\n \n # Convert dictionary to data from\n edp_data = pd.DataFrame(edp_data, columns=edp_data.keys())\n \n ## FPI\n # Next, we will repeat the process for the Fast Plasma Instrument (FPI), which consists of the Dual Electron\n # Spectrometer (DES) and the Dual Ion Spectrometer (DIS). These measure characteristics of the electron and\n # ion plasmas, respectively. Here, we are interested in the density, velocity, and temperature.\n \n # Normally, survey mode files are a combination of fast and slow survey data and span an entire day. Because FPI\n # produces so much data, however, it is only operated in fast survey mode and its \"daily files\" are broken up\n # into several files of shorter time intervals.\n \n \n # DIS: Download and Read\n \n # Update instrument-specific variables\n dis_instr = 'fpi'\n dis_mode = 'fast'\n \n if level == 'sitl':\n dis_coords = 'dbcs'\n dis_level = 'ql'\n dis_optdesc = 'dis'\n else:\n dis_coords = 'gse'\n dis_level = level\n dis_optdesc = 'dis-moms'\n \n # Set attributes\n sdc.instr = dis_instr\n sdc.mode = dis_mode\n sdc.level = dis_level\n sdc.optdesc = dis_optdesc\n \n # DIS variable names\n n_vname = '_'.join((sc, 'dis', 'numberdensity', dis_mode))\n v_vname = '_'.join((sc, 'dis', 'bulkv', dis_coords, dis_mode))\n t_para_vname = '_'.join((sc, 'dis', 'temppara', dis_mode))\n t_perp_vname = '_'.join((sc, 'dis', 'tempperp', dis_mode))\n espec_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', dis_mode))\n \n # Open the file\n files = sdc.Download()\n files = mms_utils.sort_files(files)[0]\n \n # Read the data\n dis_t = []\n dis_n = []\n dis_v = []\n dis_temp_para = []\n dis_temp_perp = []\n dis_espec = []\n dis_e = []\n print('DIS Files:')\n for file in files:\n # Open the file\n cdf = pycdf.CDF(file)\n \n # Read timee and shift to center of interval\n # - There must be a bug in the CDF package because the Epoch_plus_var variables\n # are read as empty but really contain scalar values\n t = cdf[cdf[n_vname].attrs['DEPEND_0']][:]\n # dt_minus = t.attrs['DELTA_MINUS_VAR']\n # dt_plus = t.attrs['DELTA_PLUS_VAR']\n dt_minus = 0\n dt_plus = 4.5\n t += dt.timedelta(seconds=(dt_plus - dt_minus) / 2.0)\n \n # Read the data\n # - Convert numpy arrays to lists to make appending easier\n dis_t += list(t)\n dis_n += list(cdf[n_vname][:])\n dis_v += list(cdf[v_vname][:])\n dis_temp_para += list(cdf[t_para_vname][:])\n dis_temp_perp += list(cdf[t_perp_vname][:])\n dis_espec += list(cdf[espec_vname][:])\n dis_e += list(cdf[cdf[espec_vname].attrs['DEPEND_1']][:])\n \n # Close the file\n cdf.close()\n print(' ' + file)\n \n # Convert back to numpy arrays\n dis_t = np.array(dis_t)\n dis_n = np.array(dis_n)\n dis_v = np.array(dis_v)\n dis_temp_para = np.array(dis_temp_para)\n dis_temp_perp = np.array(dis_temp_perp)\n dis_espec = np.array(dis_espec)\n dis_e = np.array(dis_e)\n \n # Compute velocity magnitude\n dis_vmag = np.sqrt(dis_v[:,0]**2.0 + dis_v[:,1]**2.0 + dis_v[:,2]**2.0)\n \n # Compute scalar temperature\n dis_temp = 1.0/3.0 * (2.0*dis_temp_perp + dis_temp_para)\n \n ## Data Frame\n \n # Create a dictionary\n dis_data = pd.DataFrame()\n dis_data['Time'] = pd.Series(dis_t)\n dis_data['N'] = pd.Series(dis_t)\n dis_data['Vx'] = pd.Series(dis_t)\n dis_data['Vy'] = pd.Series(dis_t)\n dis_data['Vz'] = pd.Series(dis_t)\n dis_data['Vmag'] = pd.Series(dis_t)\n dis_data['Tpara'] = pd.Series(dis_t)\n dis_data['Tperp'] = pd.Series(dis_t)\n dis_data['T'] = pd.Series(dis_temp)\n dis_data = {\n 'Time' : dis_t,\n 'N' : dis_n,\n 'Vx' : dis_v[:,0],\n 'Vy' : dis_v[:,1],\n 'Vz' : dis_v[:,2],\n 'Vmag' : dis_vmag,\n 'Tpara' : dis_temp_para,\n 'Tperp' : dis_temp_perp,\n 'T' : dis_temp\n # 'ESpec': dis_espec,\n # 'Energy': dis_e,\n }\n \n # Convert dictionary to data from\n dis_data = pd.DataFrame(dis_data, columns=dis_data.keys())\n \n # Add dis_espec to dis_data\n for i in range(dis_espec.shape[1]):\n dis_data['ESpec_E{:02}'.format(i)] = Series(data=dis_espec[:,i])\n \n \n ## DES\n \n # Update instrument-specific variables\n des_instr = 'fpi'\n des_mode = 'fast'\n \n if level == 'sitl':\n des_coords = 'dbcs'\n des_level = 'ql'\n des_optdesc = 'des'\n else:\n des_coords = 'gse'\n des_level = level\n des_optdesc = 'des-moms'\n \n # Set attributes\n sdc.instr = des_instr\n sdc.mode = des_mode\n sdc.level = des_level\n sdc.optdesc = des_optdesc\n \n # DIS variable names\n n_vname = '_'.join((sc, 'des', 'numberdensity', des_mode))\n v_vname = '_'.join((sc, 'des', 'bulkv', des_coords, des_mode))\n t_para_vname = '_'.join((sc, 'des', 'temppara', des_mode))\n t_perp_vname = '_'.join((sc, 'des', 'tempperp', des_mode))\n espec_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', des_mode))\n pad_low_vname = '_'.join((sc, 'des', 'pitchangdist', 'lowen', des_mode))\n pad_mid_vname = '_'.join((sc, 'des', 'pitchangdist', 'miden', des_mode))\n pad_high_vname = '_'.join((sc, 'des', 'pitchangdist', 'highen', des_mode))\n \n \n # Open the file\n files = sdc.Download()\n files = mms_utils.sort_files(files)[0]\n \n # Read the data\n des_t = []\n des_n = []\n des_v = []\n des_temp_para = []\n des_temp_perp = []\n des_espec = []\n des_energy = []\n des_pad_low = []\n des_pad_mid = []\n des_pad_high = []\n des_pa = []\n print('DES Files:')\n for file in files:\n # Open the file\n cdf = pycdf.CDF(file)\n \n # Read timee and shift to center of interval\n # - There must be a bug in the CDF package because the Epoch_plus_var variables\n # are read as empty but really contain scalar values\n t = cdf[cdf[n_vname].attrs['DEPEND_0']][:]\n # dt_minus = t.attrs['DELTA_MINUS_VAR']\n # dt_plus = t.attrs['DELTA_PLUS_VAR']\n dt_minus = 0\n dt_plus = 4.5\n t += dt.timedelta(seconds=(dt_plus - dt_minus) / 2.0)\n \n # Read the data\n des_t += list(t)\n des_n += list(cdf[n_vname][:])\n des_v += list(cdf[v_vname][:])\n des_temp_para += list(cdf[t_para_vname][:])\n des_temp_perp += list(cdf[t_perp_vname][:])\n des_espec += list(cdf[espec_vname][:])\n des_energy += list(cdf[cdf[espec_vname].attrs['DEPEND_1']][:])\n des_pad_low += list(cdf[pad_low_vname][:])\n des_pad_mid += list(cdf[pad_mid_vname][:])\n des_pad_high += list(cdf[pad_high_vname][:])\n des_pa += list(cdf[cdf[pad_low_vname].attrs['DEPEND_1']][:])\n \n # Close the file\n cdf.close()\n print(' ' + file)\n \n # Convert back to numpy arrays\n des_t = np.array(des_t)\n des_n = np.array(des_n)\n des_v = np.array(des_v)\n des_temp_para = np.array(des_temp_para)\n des_temp_perp = np.array(des_temp_perp)\n des_espec = np.array(des_espec)\n des_energy = np.array(des_energy)\n des_pad_low = np.array(des_pad_low)\n des_pad_mid = np.array(des_pad_mid)\n des_pad_high = np.array(des_pad_high)\n des_pa = np.array(des_pa)\n \n # Compute velocity magnitude\n des_vmag = np.sqrt(des_v[:,0]**2.0 + des_v[:,1]**2.0 + des_v[:,2]**2.0)\n \n # Compute scalar temperature\n des_temp = 1.0/3.0*(2.0*des_temp_perp + des_temp_para)\n \n # Compute pich angle distribution\n des_pad = (des_pad_low + des_pad_mid + des_pad_high) / 3.0\n \n # Create a dictionary\n des_data = {\n 'Time' : des_t,\n 'N' : des_n,\n 'Vx' : des_v[:,0],\n 'Vy' : des_v[:,1],\n 'Vz' : des_v[:,2],\n 'Vmag' : des_vmag,\n 'Tpara' : des_temp_para,\n 'Tperp' : des_temp_perp,\n 'T' : des_temp\n # 'ESpec': dis_espec,\n # 'Energy': dis_e,\n # 'PAD': des_pad,\n # 'PA': des_pa\n }\n \n # Convert dictionary to data from\n des_data = pd.DataFrame(des_data, columns=des_data.keys())\n \n # Add des_espec to des_data\n for i in range(des_espec.shape[1]):\n des_data['ESpec_E{:02}'.format(i)] = Series(data=des_espec[:,i])\n \n # EDI - Should work, but is disabled until verified\n \n # Update instrument-specific variables\n edi_instr = 'edi'\n edi_mode = 'srvy'\n edi_optdesc = None # Get whatever is available\n \n if level == 'sitl':\n edi_level = 'ql'\n else:\n edi_level = level\n \n # Set attributes\n sdc.instr = edi_instr\n sdc.mode = edi_mode\n sdc.level = edi_level\n sdc.optdesc = 'amb'\n \n # Figure out which data product is available\n files = sdc.FileNames()\n parts = mms_utils.parse_filename(files)\n edi_optdesc = [p[4] for p in parts]\n \n # EDI variable names\n cts1_0_vname = '_'.join((sc, edi_instr, 'flux1', '0', edi_mode, 'l2'))\n cts1_180_vname = '_'.join((sc, edi_instr, 'flux1', '180', edi_mode, 'l2'))\n \n # Open the file\n files = sdc.Download()\n files = mms_utils.sort_files(files)[0]\n \n # Read the data\n edi_t = []\n edi_cts1_0 = []\n edi_cts1_180 = []\n \n print('EDI Files:')\n for file in files:\n # Open the file\n cdf = pycdf.CDF(file)\n \n # Read the datafi\n edi_t += list(cdf[cdf[cts1_0_vname].attrs['DEPEND_0']][:])\n edi_cts1_0 += list(cdf[cts1_0_vname][:])\n edi_cts1_180 += list(cdf[cts1_180_vname][:])\n \n # Close the file\n cdf.close()\n print(' ' + file)\n \n # Convert back to numpy arrays\n edi_t = np.array(edi_t)\n edi_cts1_0 = np.array(edi_cts1_0)\n edi_cts1_180 = np.array(edi_cts1_180)\n \n #TODO: This is disabled for now, because the data is not a scalar\n \n # Create a dictionary\n edi_data = {\n 'Time' : edi_t,\n 'cts1_0' : edi_cts1_0,\n 'cts1_180' : edi_cts1_180\n }\n \n #Convert dictionary to data from\n print(type(edi_data))\n print(edi_data.keys())\n print(edi_data['Time'].shape)\n edi_data = pd.DataFrame(edi_data, columns=edi_data.keys())\n \n ## Interpolate All Values to `t_des`\n # In this step, we need to get all variables into the same time basis. We will interpolate data\n # from FGM and DIS onto the time tags of DES.\n \n # Convert datetime objects to floats\n des_t_stamp = [t.timestamp() for t in des_t]\n fgm_t_stamp = [t.timestamp() for t in fgm_t]\n dis_t_stamp = [t.timestamp() for t in dis_t]\n edp_t_stamp = [t.timestamp() for t in edp_t]\n edi_t_stamp = [t.timestamp() for t in edi_t]\n \n # Interpolate FGM data\n # - An Nx4 array, ordered as (Bx, By, Bz, |B|)\n nTimes = len(des_t_stamp)\n nComps = np.size(fgm_b, 1)\n fgm_b_interp = np.zeros([nTimes, nComps], dtype=float)\n for idx in range(nComps):\n fgm_b_interp[:,idx] = np.interp(des_t_stamp, fgm_t_stamp, fgm_b[:,idx])\n fgm_clock_angle_interp = np.interp(des_t_stamp, fgm_t_stamp, fgm_ca)\n fgm_normal_angle_interp = np.interp(des_t_stamp, fgm_t_stamp, fgm_tbn)\n \n # Interpolate DIS data\n dis_n_interp = np.interp(des_t_stamp, dis_t_stamp, dis_n)\n dis_temp_para_interp = np.interp(des_t_stamp, dis_t_stamp, dis_temp_para)\n dis_temp_perp_interp = np.interp(des_t_stamp, dis_t_stamp, dis_temp_perp)\n dis_temp_interp = np.interp(des_t_stamp, dis_t_stamp, dis_temp)\n # An Nx3 array, ordered as (Vx, Vy, Vz)\n nComps = np.size(dis_v, 1)\n dis_v_interp = np.zeros([nTimes, nComps])\n for idx in range(nComps):\n dis_v_interp[:,idx] = np.interp(des_t_stamp, dis_t_stamp, dis_v[:,idx])\n # An Nx32 array, ordered as (ESpec_00, ESpec_01, ... , ESpec_30, ESpec_31)\n nComps = np.size(dis_espec, 1)\n dis_espec_interp = np.zeros([nTimes, nComps])\n for idx in range(nComps):\n dis_espec_interp[:, idx] = np.interp(des_t_stamp, dis_t_stamp, dis_espec[:, idx])\n \n # Interpolate EDP data\n # An Nx3 array, ordered as (Ex, Ey, Ez)\n nComps = np.size(edp_e, 1)\n edp_e_interp = np.zeros([nTimes, nComps])\n for idx in range(nComps):\n edp_e_interp[:,idx] = np.interp(des_t_stamp, edp_t_stamp, edp_e[:,idx])\n edp_scpot_interp = np.interp(des_t_stamp, edp_t_stamp, edp_v)\n \n # Interpolate EDI data\n edi_cts1_0_interp = np.interp(des_t_stamp, edi_t_stamp, edi_cts1_0)\n edi_cts1_180_interp = np.interp(des_t_stamp, edi_t_stamp, edi_cts1_180)\n \n # Print results\n print('Time: ', np.shape(des_t), des_t.dtype)\n print('DES Density: ', np.shape(des_n), des_n.dtype)\n print('DES Velocity: ', np.shape(des_v), des_v.dtype)\n print('DES Temperature (para): ', np.shape(des_temp_para), des_temp_para.dtype)\n print('DES Temperature (perp): ', np.shape(des_temp_perp), des_temp_perp.dtype)\n print('FGM Magnetic Field: ', np.shape(fgm_b_interp), fgm_b_interp.dtype)\n print('DIS Density: ', np.shape(dis_n_interp), dis_n_interp.dtype)\n print('DIS Velocity: ', np.shape(dis_v_interp), dis_v_interp.dtype)\n print('DIS Temperature (para): ', np.shape(dis_temp_para_interp), dis_temp_para_interp.dtype)\n print('DIS Temperature (perp): ', np.shape(dis_temp_perp_interp), dis_temp_perp_interp.dtype)\n \n ## Write a CSV file\n # Open file and write data\n data = {\n 'Time' : des_t,\n 'DES N' : des_n,\n 'DES Vx' : des_v[:,0],\n 'DES Vy' : des_v[:,1],\n 'DES Vz' : des_v[:,2],\n 'DES T_para' : des_temp_para,\n 'DES T_perp' : des_temp_perp,\n 'FGM Bx' : fgm_b_interp[:,0],\n 'FGM By' : fgm_b_interp[:,1],\n 'FGM Bz' : fgm_b_interp[:,2],\n 'FGM Bt' : fgm_b_interp[:,3],\n 'FGM Clock_angle' : fgm_clock_angle_interp, # Needed?\n 'FGM Normal_angle' : fgm_normal_angle_interp, # Needed?\n 'DIS N' : dis_n_interp,\n 'DIS Vx' : dis_v_interp[:,0],\n 'DIS Vy' : dis_v_interp[:,1],\n 'DIS Vz' : dis_v_interp[:,2],\n 'DIS T_para' : dis_temp_para_interp,\n 'DIS T_perp' : dis_temp_perp_interp,\n 'DIS Temp' : dis_temp_interp, # Needed?\n 'EDP Ex' : edp_e_interp[:,0],\n 'EDP Ey' : edp_e_interp[:,1],\n 'EDP Ez' : edp_e_interp[:,2],\n 'EDP Scpot' : edp_scpot_interp, # Needed?\n 'EDI cts1_0' : edi_cts1_0_interp,\n 'EDI cts1_180' : edi_cts1_180_interp \n }\n \n # Add des_espec data en masse\n for col in range(np.size(des_espec, 1)):\n data['DES ESpec_{0:02d}'.format(col)] = des_espec[:,col]\n \n # Add dis_espec_interp data en masse\n for col in range(np.size(dis_espec_interp, 1)):\n data['DIS ESpec_{0:02d}'.format(col)] = dis_espec_interp[:,col]\n \n # Create a data frame\n data = pd.DataFrame(data, columns=data.keys())\n \n # Export each sc's data to CSVs\n file_name = '_'.join([spacecraft, level, start_date, 'to']) + end_date + '.csv'\n data.to_csv(\"/home/colin/pymms/sql/\" + file_name, index=False)\n \n# Instrument-specific output disabled for mass data download\n# des_data.to_csv(\"~/data/des_output.csv\", index=False)\n# dis_data.to_csv(\"~/data/dis_output.csv\", index=False)\n# fgm_data.to_csv(\"~/data/fgm_output.csv\", index=False)\n# edp_data.to_csv(\"~/data/edp_output.csv\", index=False)\n# edi_data.to_csv(\"~/data/edi_output.csv\", index=False)" ]
[ [ "numpy.array", "numpy.zeros", "pandas.DataFrame", "numpy.interp", "numpy.shape", "numpy.arctan2", "numpy.size", "numpy.sqrt", "pandas.Series" ] ]
kingqicai/myfast
[ "15c3233ba60a554fe5a8fd493048e9c36400ad18" ]
[ "decoding.py" ]
[ "\"\"\" decoding utilities\"\"\"\nimport json\nimport re\nimport os\nfrom os.path import join\nimport pickle as pkl\nfrom itertools import starmap\n\nfrom cytoolz import curry\n\nimport torch\n\nfrom utils import PAD, UNK, START, END\nfrom model.copy_summ import CopySumm\nfrom model.extract import ExtractSumm, PtrExtractSumm\nfrom model.rl import ActorCritic\nfrom data.batcher import conver2id, pad_batch_tensorize\nfrom data.data import CnnDmDataset\n\n\ntry:\n DATASET_DIR = os.environ['DATA']\nexcept KeyError:\n print('please use environment variable to specify data directories')\n\nclass DecodeDataset(CnnDmDataset):\n \"\"\" get the article sentences only (for decoding use)\"\"\"\n def __init__(self, split):\n assert split in ['val', 'test']\n super().__init__(split, DATASET_DIR)\n\n def __getitem__(self, i):\n js_data = super().__getitem__(i)\n art_sents = js_data['article']\n return art_sents\n\n\ndef make_html_safe(s):\n \"\"\"Rouge use html, has to make output html safe\"\"\"\n return s.replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\n\n\ndef load_best_ckpt(model_dir, reverse=False):\n \"\"\" reverse=False->loss, reverse=True->reward/score\"\"\"\n ckpts = os.listdir(join(model_dir, 'ckpt'))\n ckpt_matcher = re.compile('^ckpt-.*-[0-9]*')\n ckpts = sorted([c for c in ckpts if ckpt_matcher.match(c)],\n key=lambda c: float(c.split('-')[1]), reverse=reverse)\n print('loading checkpoint {}...'.format(ckpts[0]))\n ckpt = torch.load(\n join(model_dir, 'ckpt/{}'.format(ckpts[0]))\n )['state_dict']\n return ckpt\n\n\nclass Abstractor(object):\n def __init__(self, abs_dir, max_len=30, cuda=True):\n abs_meta = json.load(open(join(abs_dir, 'meta.json')))\n assert abs_meta['net'] == 'base_abstractor'\n abs_args = abs_meta['net_args']\n abs_ckpt = load_best_ckpt(abs_dir)\n word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))\n abstractor = CopySumm(**abs_args)\n abstractor.load_state_dict(abs_ckpt,strict=False)\n self._device = torch.device('cuda' if cuda else 'cpu')\n self._net = abstractor.to(self._device)\n self._word2id = word2id\n self._id2word = {i: w for w, i in word2id.items()}\n self._max_len = max_len\n\n def _prepro(self, raw_article_sents):\n ext_word2id = dict(self._word2id)\n ext_id2word = dict(self._id2word)\n for raw_words in raw_article_sents:\n for w in raw_words:\n if not w in ext_word2id:\n ext_word2id[w] = len(ext_word2id)\n ext_id2word[len(ext_id2word)] = w\n articles = conver2id(UNK, self._word2id, raw_article_sents)\n art_lens = [len(art) for art in articles]\n article = pad_batch_tensorize(articles, PAD, cuda=False\n ).to(self._device)\n extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)\n extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False\n ).to(self._device)\n extend_vsize = len(ext_word2id)\n dec_args = (article, art_lens, extend_art, extend_vsize,\n START, END, UNK, self._max_len)\n return dec_args, ext_id2word\n\n def __call__(self, raw_article_sents):\n self._net.eval()\n dec_args, id2word = self._prepro(raw_article_sents)\n decs, attns = self._net.batch_decode(*dec_args)\n def argmax(arr, keys):\n return arr[max(range(len(arr)), key=lambda i: keys[i].item())]\n dec_sents = []\n for i, raw_words in enumerate(raw_article_sents):\n dec = []\n for id_, attn in zip(decs, attns):\n if id_[i] == END:\n break\n elif id_[i] == UNK:\n dec.append(argmax(raw_words, attn[i]))\n else:\n dec.append(id2word[id_[i].item()])\n dec_sents.append(dec)\n return dec_sents\n\n\nclass BeamAbstractor(Abstractor):\n def __call__(self, raw_article_sents, beam_size=5, diverse=1.0):\n self._net.eval()\n dec_args, id2word = self._prepro(raw_article_sents)\n dec_args = (*dec_args, beam_size, diverse)\n all_beams = self._net.batched_beamsearch(*dec_args)\n all_beams = list(starmap(_process_beam(id2word),\n zip(all_beams, raw_article_sents)))\n return all_beams\n\n@curry\ndef _process_beam(id2word, beam, art_sent):\n def process_hyp(hyp):\n seq = []\n for i, attn in zip(hyp.sequence[1:], hyp.attns[:-1]):\n if i == UNK:\n copy_word = art_sent[max(range(len(art_sent)),\n key=lambda j: attn[j].item())]\n seq.append(copy_word)\n else:\n seq.append(id2word[i])\n hyp.sequence = seq\n del hyp.hists\n del hyp.attns\n return hyp\n return list(map(process_hyp, beam))\n\n\nclass Extractor(object):\n def __init__(self, ext_dir, max_ext=5, cuda=True):\n ext_meta = json.load(open(join(ext_dir, 'meta.json')))\n if ext_meta['net'] == 'ml_ff_extractor':\n ext_cls = ExtractSumm\n elif ext_meta['net'] == 'ml_rnn_extractor':\n ext_cls = PtrExtractSumm\n else:\n raise ValueError()\n ext_ckpt = load_best_ckpt(ext_dir)\n ext_args = ext_meta['net_args']\n extractor = ext_cls(**ext_args)\n extractor.load_state_dict(ext_ckpt)\n word2id = pkl.load(open(join(ext_dir, 'vocab.pkl'), 'rb'))\n self._device = torch.device('cuda' if cuda else 'cpu')\n self._net = extractor.to(self._device)\n self._word2id = word2id\n self._id2word = {i: w for w, i in word2id.items()}\n self._max_ext = max_ext\n\n def __call__(self, raw_article_sents):\n self._net.eval()\n n_art = len(raw_article_sents)\n articles = conver2id(UNK, self._word2id, raw_article_sents)\n article = pad_batch_tensorize(articles, PAD, cuda=False\n ).to(self._device)\n indices = self._net.extract([article], k=min(n_art, self._max_ext))\n return indices\n\n\nclass ArticleBatcher(object):\n def __init__(self, word2id, cuda=True):\n self._device = torch.device('cuda' if cuda else 'cpu')\n self._word2id = word2id\n self._device = torch.device('cuda' if cuda else 'cpu')\n\n def __call__(self, raw_article_sents):\n articles = conver2id(UNK, self._word2id, raw_article_sents)\n article = pad_batch_tensorize(articles, PAD, cuda=False\n ).to(self._device)\n return article\n\nclass RLExtractor(object):\n def __init__(self, ext_dir, cuda=True):\n ext_meta = json.load(open(join(ext_dir, 'meta.json')))\n assert ext_meta['net'] == 'rnn-ext_abs_rl'\n ext_args = ext_meta['net_args']['extractor']['net_args']\n word2id = pkl.load(open(join(ext_dir, 'agent_vocab.pkl'), 'rb'))\n extractor = PtrExtractSumm(**ext_args)\n agent = ActorCritic(extractor._sent_enc,\n extractor._art_enc,\n extractor._extractor,\n ArticleBatcher(word2id, cuda))\n ext_ckpt = load_best_ckpt(ext_dir, reverse=True)\n agent.load_state_dict(ext_ckpt)\n self._device = torch.device('cuda' if cuda else 'cpu')\n self._net = agent.to(self._device)\n self._word2id = word2id\n self._id2word = {i: w for w, i in word2id.items()}\n\n def __call__(self, raw_article_sents):\n self._net.eval()\n indices = self._net(raw_article_sents)\n return indices\n" ]
[ [ "torch.device" ] ]
afarahi/tatter
[ "e206b761baec0deb79ddc692a25bfd063f8d5ce1" ]
[ "tatter/KL_estimator.py" ]
[ "from __future__ import absolute_import, division, print_function\nimport numpy as np\n\ndef KL_divergence_estimator(X, Y, k=1):\n \"\"\" Estimate symmetric version of KL divergence. \n The symmetric version is 0.5 * [ D(P|Q) + D(Q|P) ].\n \n Parameters\n ----------\n X, Y: numpy array\n 2-dimensional array where each row is a sample.\n k: int optional\n k-NN to be used. The default is k=1.\n \n return\n ------\n numpy array : estimated D(P|Q)\n \"\"\"\n if not (isinstance(k, int) or k is None):\n raise ValueError('k has incorrect type.')\n if k is not None and k <= 0:\n raise ValueError('k cannot be <= 0')\n\n kl1 = KL_divergence_estimator_sub(X, Y, k=k)\n kl2 = KL_divergence_estimator_sub(Y, X, k=k)\n \n return (kl1 + kl2) / 2.0\n\n\ndef KL_divergence_estimator_sub(X, Y, k=1):\n \"\"\" KL-Divergence estimator universal k-NN estimator.\n \n Parameters\n ----------\n X, Y: numpy array\n 2-dimensional array where each row is a sample.\n k: int, optional\n k-NN to be used. The default is k=1.\n\n return\n ------\n numpy array : estimated D(P|Q)\n \"\"\"\n n, m = len(X), len(Y)\n D = np.log(m / (n - 1))\n d = float(X.shape[1])\n\n for xi in X:\n nu = knn_distance(xi, Y, k-1)\n rho = knn_distance(xi, X, k)\n D += (d/n)*np.log(nu/rho)\n\n return D\n\n\ndef knn_distance(point, sample, k):\n \"\"\" Euclidean distance from `point` to it's `k`-Nearest\n Neighbour in `sample`.\n \n Parameters\n ----------\n point: float\n a data point from a sample.\n sample: numpy array\n 2-dimensional array where each row is a sample.\n k: int\n k-NN to be used.\n\n return\n ------\n numpy-array : `k`-Nearest Neighbour in `sample` \n \"\"\"\n norms = np.linalg.norm(sample-point, axis=1)\n return np.sort(norms)[k]\n\n" ]
[ [ "numpy.linalg.norm", "numpy.sort", "numpy.log" ] ]
franklintandy/MegazordPSC2
[ "11152c63f55e3d94be3e8aebe1c106bdd0365abb" ]
[ "src/zerg_rush.py" ]
[ "import sys, os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\n\nimport numpy as np\nfrom sc2.position import Point2, Point3\n\nimport sc2\nfrom sc2.data import Result\nfrom sc2 import Race, Difficulty\nfrom sc2.constants import *\nfrom sc2.player import Bot, Computer\nfrom sc2.unit import Unit\nfrom sc2.units import Units\n\n\nclass ZergRushBot(sc2.BotAI):\n async def on_start(self):\n self.client.game_step = 2\n\n async def on_step(self, iteration):\n if iteration == 0:\n await self.chat_send(\"(glhf)\")\n\n # Draw creep pixelmap for debugging\n # self.draw_creep_pixelmap()\n\n # If townhall no longer exists: attack move with all units to enemy start location\n if not self.townhalls:\n for unit in self.units.exclude_type({UnitTypeId.EGG, UnitTypeId.LARVA}):\n unit.attack(self.enemy_start_locations[0])\n return\n\n hatch: Unit = self.townhalls[0]\n\n # Pick a target location\n target: Point2 = self.enemy_structures.not_flying.random_or(self.enemy_start_locations[0]).position\n\n # Give all zerglings an attack command\n for zergling in self.units(UnitTypeId.ZERGLING):\n zergling.attack(target)\n\n # Inject hatchery if queen has more than 25 energy\n for queen in self.units(UnitTypeId.QUEEN):\n if queen.energy >= 25 and not hatch.has_buff(BuffId.QUEENSPAWNLARVATIMER):\n queen(AbilityId.EFFECT_INJECTLARVA, hatch)\n\n # Pull workers out of gas if we have almost enough gas mined, this will stop mining when we reached 100 gas mined\n if self.vespene >= 88 or self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED) > 0:\n gas_drones: Units = self.workers.filter(lambda w: w.is_carrying_vespene and len(w.orders) < 2)\n drone: Unit\n for drone in gas_drones:\n minerals: Units = self.mineral_field.closer_than(10, hatch)\n if minerals:\n mineral: Unit = minerals.closest_to(drone)\n drone.gather(mineral, queue=True)\n\n # If we have 100 vespene, this will try to research zergling speed once the spawning pool is at 100% completion\n if self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED) == 0 and self.can_afford(\n UpgradeId.ZERGLINGMOVEMENTSPEED\n ):\n spawning_pools_ready: Units = self.structures(UnitTypeId.SPAWNINGPOOL).ready\n if spawning_pools_ready:\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)\n\n # If we have less than 2 supply left and no overlord is in the queue: train an overlord\n if self.supply_left < 2 and self.already_pending(UnitTypeId.OVERLORD) < 1:\n self.train(UnitTypeId.OVERLORD, 1)\n\n # While we have less than 88 vespene mined: send drones into extractor one frame at a time\n if (\n self.gas_buildings.ready\n and self.vespene < 88\n and self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED) == 0\n ):\n extractor: Unit = self.gas_buildings.first\n if extractor.surplus_harvesters < 0:\n self.workers.random.gather(extractor)\n\n # If we have lost of minerals, make a macro hatchery\n if self.minerals > 500:\n for d in range(4, 15):\n pos: Point2 = hatch.position.towards(self.game_info.map_center, d)\n if (await self.can_place(UnitTypeId.HATCHERY, [pos]))[0]:\n self.workers.random.build(UnitTypeId.HATCHERY, pos)\n break\n\n # While we have less than 16 drones, make more drones\n if self.can_afford(UnitTypeId.DRONE) and self.supply_workers < 16:\n self.train(UnitTypeId.DRONE)\n\n # If our spawningpool is completed, start making zerglings\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready and self.larva and self.can_afford(UnitTypeId.ZERGLING):\n amount_trained: int = self.train(UnitTypeId.ZERGLING, self.larva.amount)\n\n # If we have no extractor, build extractor\n if (\n self.gas_buildings.amount + self.already_pending(UnitTypeId.EXTRACTOR) == 0\n and self.can_afford(UnitTypeId.EXTRACTOR)\n and self.workers\n ):\n drone: Unit = self.workers.random\n target: Unit = self.vespene_geyser.closest_to(drone)\n drone.build_gas(target)\n\n # If we have no spawning pool, try to build spawning pool\n elif self.structures(UnitTypeId.SPAWNINGPOOL).amount + self.already_pending(UnitTypeId.SPAWNINGPOOL) == 0:\n if self.can_afford(UnitTypeId.SPAWNINGPOOL):\n for d in range(4, 15):\n pos: Point2 = hatch.position.towards(self.game_info.map_center, d)\n if (await self.can_place(UnitTypeId.SPAWNINGPOOL, [pos]))[0]:\n drone: Unit = self.workers.closest_to(pos)\n drone.build(UnitTypeId.SPAWNINGPOOL, pos)\n\n # If we have no queen, try to build a queen if we have a spawning pool compelted\n elif (\n self.units(UnitTypeId.QUEEN).amount + self.already_pending(UnitTypeId.QUEEN) < self.townhalls.amount\n and self.structures(UnitTypeId.SPAWNINGPOOL).ready\n ):\n if self.can_afford(UnitTypeId.QUEEN):\n self.train(UnitTypeId.QUEEN)\n\n def draw_creep_pixelmap(self):\n for (y, x), value in np.ndenumerate(self.state.creep.data_numpy):\n p = Point2((x, y))\n h2 = self.get_terrain_z_height(p)\n pos = Point3((p.x, p.y, h2))\n # Red if there is no creep\n color = Point3((255, 0, 0))\n if value == 1:\n # Green if there is creep\n color = Point3((0, 255, 0))\n self._client.debug_box2_out(pos, half_vertex_length=0.25, color=color)\n\n async def on_end(self, game_result: Result):\n print(f\"{self.time_formatted} On end was called\")\n\n\ndef main():\n sc2.run_game(\n sc2.maps.get(\"WintersGateLE\"),\n [Bot(Race.Zerg, ZergRushBot()), Computer(Race.Protoss, Difficulty.Medium)],\n realtime=False,\n save_replay_as=\"ZvT.SC2Replay\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ndenumerate" ] ]
jd-aig/aves2_algorithm_components
[ "12e06717596b824f7b0db115b95856606b97cd82" ]
[ "src/ml/classification/gbdt/run.py" ]
[ "from sklearn.ensemble import GradientBoostingClassifier \nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix\nfrom sklearn.externals import joblib\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport os\nimport json\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data_dir\", type=str, default=\"../data/\")\nparser.add_argument(\"--output_path\", type=str, default=\"./output/\")\nparser.add_argument(\"--target\", type=str, default=\"virginica\")\nparser.add_argument(\"--loss\", type=str, default=\"deviance\")#deviance,exponential\nparser.add_argument(\"--n_estimators\", type=int, default=100)\nargs = parser.parse_args()\n\ntrain_dataset = os.path.join(args.data_dir,'train.csv')\ntrain_data = pd.read_csv(train_dataset)\n\nlst = train_data.columns.values.tolist()\nidx = lst.index(args.target)\ndel lst[idx]\n\ny_train = train_data.ix[:,args.target].values\nx_train = train_data.ix[:,lst].values\n\nmodel = GradientBoostingClassifier(loss=args.loss,n_estimators=args.n_estimators)\nmodel.fit(x_train,y_train)\n\nsave_path = os.path.join(args.output_path,'model.m')\njoblib.dump(model,save_path)\n" ]
[ [ "pandas.read_csv", "sklearn.externals.joblib.dump", "sklearn.ensemble.GradientBoostingClassifier" ] ]
a4aleem/GymROS-drones
[ "043eab8212d2670e543735294508462a1636256e" ]
[ "src/openai_ros/src/openai_ros/task_envs/wamv/wamv_nav_twosets_buoys.py" ]
[ "import rospy\nimport numpy\nfrom gym import spaces\nfrom openai_ros.robot_envs import wamv_env\nfrom gym.envs.registration import register\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Vector3\nfrom tf.transformations import euler_from_quaternion\nfrom openai_ros.task_envs.task_commons import LoadYamlFileParamsTest\nfrom openai_ros.openai_ros_common import ROSLauncher\nimport os\n\nclass WamvNavTwoSetsBuoysEnv(wamv_env.WamvEnv):\n def __init__(self):\n \"\"\"\n Make Wamv learn how to move straight from The starting point\n to a desired point inside the designed corridor.\n http://robotx.org/images/files/RobotX_2018_Task_Summary.pdf\n Demonstrate Navigation Control\n \"\"\"\n\n # This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there\n ros_ws_abspath = rospy.get_param(\"/wamv/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"robotx_gazebo\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/wamv/config\",\n yaml_file_name=\"wamv_nav_twosets_buoys.yaml\")\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(WamvNavTwoSetsBuoysEnv, self).__init__(ros_ws_abspath)\n\n # Only variable needed to be set here\n\n rospy.logdebug(\"Start WamvNavTwoSetsBuoysEnv INIT...\")\n number_actions = rospy.get_param('/wamv/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n \n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n \n \n # Actions and Observations\n self.propeller_high_speed = rospy.get_param('/wamv/propeller_high_speed')\n self.propeller_low_speed = rospy.get_param('/wamv/propeller_low_speed')\n self.max_angular_speed = rospy.get_param('/wamv/max_angular_speed')\n self.max_distance_from_des_point = rospy.get_param('/wamv/max_distance_from_des_point')\n \n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/wamv/desired_point/x\")\n self.desired_point.y = rospy.get_param(\"/wamv/desired_point/y\")\n self.desired_point.z = rospy.get_param(\"/wamv/desired_point/z\")\n self.desired_point_epsilon = rospy.get_param(\"/wamv/desired_point_epsilon\")\n \n self.work_space_x_max = rospy.get_param(\"/wamv/work_space/x_max\")\n self.work_space_x_min = rospy.get_param(\"/wamv/work_space/x_min\")\n self.work_space_y_max = rospy.get_param(\"/wamv/work_space/y_max\")\n self.work_space_y_min = rospy.get_param(\"/wamv/work_space/y_min\")\n \n self.dec_obs = rospy.get_param(\"/wamv/number_decimals_precision_obs\")\n \n \n # We place the Maximum and minimum values of observations\n\n high = numpy.array([self.work_space_x_max,\n self.work_space_y_max,\n 1.57,\n 1.57,\n 3.14,\n self.propeller_high_speed,\n self.propeller_high_speed,\n self.max_angular_speed,\n self.max_distance_from_des_point\n ])\n \n low = numpy.array([ self.work_space_x_min,\n self.work_space_y_min,\n -1*1.57,\n -1*1.57,\n -1*3.14,\n -1*self.propeller_high_speed,\n -1*self.propeller_high_speed,\n -1*self.max_angular_speed,\n 0.0\n ])\n\n \n self.observation_space = spaces.Box(low, high)\n \n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\"+str(self.observation_space))\n \n # Rewards\n \n self.done_reward =rospy.get_param(\"/wamv/done_reward\")\n self.closer_to_point_reward = rospy.get_param(\"/wamv/closer_to_point_reward\")\n\n self.cumulated_steps = 0.0\n \n rospy.logdebug(\"END WamvNavTwoSetsBuoysEnv INIT...\")\n\n def _set_init_pose(self):\n \"\"\"\n Sets the two proppelers speed to 0.0 and waits for the time_sleep\n to allow the action to be executed\n \"\"\"\n\n right_propeller_speed = 0.0\n left_propeller_speed = 0.0\n self.set_propellers_speed( right_propeller_speed,\n left_propeller_speed,\n time_sleep=1.0)\n\n return True\n\n\n def _init_env_variables(self):\n \"\"\"\n Inits variables needed to be initialised each time we reset at the start\n of an episode.\n :return:\n \"\"\"\n\n # For Info Purposes\n self.cumulated_reward = 0.0\n # We get the initial pose to mesure the distance from the desired point.\n odom = self.get_odom()\n current_position = Vector3()\n current_position.x = odom.pose.pose.position.x\n current_position.y = odom.pose.pose.position.y\n self.previous_distance_from_des_point = self.get_distance_from_desired_point(current_position)\n\n \n\n def _set_action(self, action):\n \"\"\"\n It sets the joints of wamv based on the action integer given\n based on the action number given.\n :param action: The action integer that sets what movement to do next.\n \"\"\"\n \n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n \n \n right_propeller_speed = 0.0\n left_propeller_speed = 0.0\n \n if action == 0: # Go Forwards\n right_propeller_speed = self.propeller_high_speed\n left_propeller_speed = self.propeller_high_speed\n elif action == 1: # Go BackWards\n right_propeller_speed = -1*self.propeller_high_speed\n left_propeller_speed = -1*self.propeller_high_speed\n elif action == 2: # Turn Left\n right_propeller_speed = self.propeller_high_speed\n left_propeller_speed = -1*self.propeller_high_speed\n elif action == 3: # Turn Right\n right_propeller_speed = -1*self.propeller_high_speed\n left_propeller_speed = self.propeller_high_speed\n\n \n # We tell wamv the propeller speeds\n self.set_propellers_speed( right_propeller_speed,\n left_propeller_speed,\n time_sleep=1.0)\n \n rospy.logdebug(\"END Set Action ==>\"+str(action))\n\n def _get_obs(self):\n \"\"\"\n Here we define what sensor data defines our robots observations\n To know which Variables we have access to, we need to read the\n WamvEnv API DOCS.\n :return: observation\n \"\"\"\n rospy.logdebug(\"Start Get Observation ==>\")\n\n odom = self.get_odom()\n base_position = odom.pose.pose.position\n base_orientation_quat = odom.pose.pose.orientation\n base_roll, base_pitch, base_yaw = self.get_orientation_euler(base_orientation_quat)\n base_speed_linear = odom.twist.twist.linear\n base_speed_angular_yaw = odom.twist.twist.angular.z\n \n distance_from_desired_point = self.get_distance_from_desired_point(base_position)\n\n observation = []\n observation.append(round(base_position.x,self.dec_obs))\n observation.append(round(base_position.y,self.dec_obs))\n \n observation.append(round(base_roll,self.dec_obs))\n observation.append(round(base_pitch,self.dec_obs))\n observation.append(round(base_yaw,self.dec_obs))\n \n observation.append(round(base_speed_linear.x,self.dec_obs))\n observation.append(round(base_speed_linear.y,self.dec_obs))\n \n observation.append(round(base_speed_angular_yaw,self.dec_obs))\n \n observation.append(round(distance_from_desired_point,self.dec_obs))\n\n return observation\n \n\n def _is_done(self, observations):\n \"\"\"\n We consider the episode done if:\n 1) The wamvs is ouside the workspace\n 2) It got to the desired point\n \"\"\"\n distance_from_desired_point = observations[8]\n\n current_position = Vector3()\n current_position.x = observations[0]\n current_position.y = observations[1]\n \n is_inside_corridor = self.is_inside_workspace(current_position)\n has_reached_des_point = self.is_in_desired_position(current_position, self.desired_point_epsilon)\n \n done = not(is_inside_corridor) or has_reached_des_point\n \n return done\n\n def _compute_reward(self, observations, done):\n \"\"\"\n We Base the rewards in if its done or not and we base it on\n if the distance to the desired point has increased or not\n :return:\n \"\"\"\n\n # We only consider the plane, the fluctuation in z is due mainly to wave\n current_position = Point()\n current_position.x = observations[0]\n current_position.y = observations[1]\n \n distance_from_des_point = self.get_distance_from_desired_point(current_position)\n distance_difference = distance_from_des_point - self.previous_distance_from_des_point\n\n\n if not done:\n \n # If there has been a decrease in the distance to the desired point, we reward it\n if distance_difference < 0.0:\n rospy.logwarn(\"DECREASE IN DISTANCE GOOD\")\n reward = self.closer_to_point_reward\n else:\n rospy.logerr(\"ENCREASE IN DISTANCE BAD\")\n reward = -1*self.closer_to_point_reward\n\n else:\n \n if self.is_in_desired_position(current_position, self.desired_point_epsilon):\n reward = self.done_reward\n else:\n reward = -1*self.done_reward\n\n\n self.previous_distance_from_des_point = distance_from_des_point\n\n\n rospy.logdebug(\"reward=\" + str(reward))\n self.cumulated_reward += reward\n rospy.logdebug(\"Cumulated_reward=\" + str(self.cumulated_reward))\n self.cumulated_steps += 1\n rospy.logdebug(\"Cumulated_steps=\" + str(self.cumulated_steps))\n\n return reward\n\n\n # Internal TaskEnv Methods\n \n def is_in_desired_position(self,current_position, epsilon=0.05):\n \"\"\"\n It return True if the current position is similar to the desired poistion\n \"\"\"\n \n is_in_desired_pos = False\n \n \n x_pos_plus = self.desired_point.x + epsilon\n x_pos_minus = self.desired_point.x - epsilon\n y_pos_plus = self.desired_point.y + epsilon\n y_pos_minus = self.desired_point.y - epsilon\n \n x_current = current_position.x\n y_current = current_position.y\n \n x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)\n y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)\n \n is_in_desired_pos = x_pos_are_close and y_pos_are_close\n \n rospy.logdebug(\"###### IS DESIRED POS ? ######\")\n rospy.logdebug(\"current_position\"+str(current_position))\n rospy.logdebug(\"x_pos_plus\"+str(x_pos_plus)+\",x_pos_minus=\"+str(x_pos_minus))\n rospy.logdebug(\"y_pos_plus\"+str(y_pos_plus)+\",y_pos_minus=\"+str(y_pos_minus))\n rospy.logdebug(\"x_pos_are_close\"+str(x_pos_are_close))\n rospy.logdebug(\"y_pos_are_close\"+str(y_pos_are_close))\n rospy.logdebug(\"is_in_desired_pos\"+str(is_in_desired_pos))\n rospy.logdebug(\"############\")\n \n return is_in_desired_pos\n \n def get_distance_from_desired_point(self, current_position):\n \"\"\"\n Calculates the distance from the current position to the desired point\n :param start_point:\n :return:\n \"\"\"\n distance = self.get_distance_from_point(current_position,\n self.desired_point)\n \n return distance\n \n def get_distance_from_point(self, pstart, p_end):\n \"\"\"\n Given a Vector3 Object, get distance from current position\n :param p_end:\n :return:\n \"\"\"\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n \n distance = numpy.linalg.norm(a - b)\n \n return distance\n \n def get_orientation_euler(self, quaternion_vector):\n # We convert from quaternions to euler\n orientation_list = [quaternion_vector.x,\n quaternion_vector.y,\n quaternion_vector.z,\n quaternion_vector.w]\n \n roll, pitch, yaw = euler_from_quaternion(orientation_list)\n return roll, pitch, yaw\n \n def is_inside_workspace(self,current_position):\n \"\"\"\n Check if the Wamv is inside the Workspace defined\n \"\"\"\n is_inside = False\n\n rospy.logwarn(\"##### INSIDE WORK SPACE? #######\")\n rospy.logwarn(\"XYZ current_position\"+str(current_position))\n rospy.logwarn(\"work_space_x_max\"+str(self.work_space_x_max)+\",work_space_x_min=\"+str(self.work_space_x_min))\n rospy.logwarn(\"work_space_y_max\"+str(self.work_space_y_max)+\",work_space_y_min=\"+str(self.work_space_y_min))\n rospy.logwarn(\"############\")\n\n if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:\n if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:\n is_inside = True\n \n return is_inside\n \n \n\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]
BarbeauGroup/pygama
[ "5224ab6cacc9f354e49f3234fb32c09fcd99f760" ]
[ "pygama/dsp/_processors/multi_t_filter.py" ]
[ "import numpy as np\nfrom numba import guvectorize\nfrom pygama.dsp._processors.time_point_thresh import time_point_thresh\n\n \n@guvectorize([\"void(float32[:],float32[:],float32[:])\",\n \"void(float64[:],float64[:],float64[:])\"],\n \"(n),(n) -> (n)\", nopython=True, cache=True)\ndef remove_duplicates(t_in, vt_min_in, t_out):\n \"\"\" \n time_point_thresh has issues with afterpulsing in waveforms that causes \n an aferpulse peak's tp0 to be sent to 0 or the same index as the tp0 for the first pulse.\n This only happens when the relative minimum between the first pulse and \n the afterpulse is greater than the threshold. So, we sweep through the array again \n to ensure there are no duplicate indices. If there are duplicate indicies caused by a\n misidentified tp0 of an afterpulse, we replace its index by that of the corresponding minimum\n found using the get_multi_local_extrema function. It also checks to make sure that the maximum of a waveform\n isn't right at index 0.\n ----------\n t_in : array-like\n The array of indices that we want to remove duplicates from \n vt_min_in : array-like\n List of indicies of minima that we want to replace duplicates in t_out with\n t_out: array-like\n The array we want to return that will have no duplicate indices in it\n \"\"\"\n # initialize arrays\n t_out[:] = np.nan\n \n # checks\n if (np.isnan(t_in).all() and np.isnan(vt_min_in).all()): # we pad these with NaNs, so only return if there is nothing to analyze\n return\n \n # check if any later indexed values are equal to the earliest instance\n k=0\n for index1 in range(len(t_in)):\n for index2 in range(len(t_in[index1+1:])):\n if t_in[index1] == t_in[index2+index1+1]: \n t_out[index2+index1+1] = vt_min_in[k]\n k+=1 # this makes sure that the index of the misidentified afterpulse tp0 is replaced with the correct corresponding minimum\n \n # Fill up the output with the rest of the values from the input that weren't repeats \n for index in range(len(t_in)): \n if (np.isnan(t_out[index]) and not np.isnan(t_in[index])):\n t_out[index] = t_in[index]\n \n # makes sure that the first maximum found isn't the start of the waveform\n if not np.isnan(t_out[0]):\n if int(t_out[0]) == 0: \n t_out[:] = np.append(t_out[1:],np.nan)\n\n\n\n@guvectorize([\"void(float32[:], float32[:], float32[:], float32[:], float32[:])\",\n \"void(float64[:], float64[:], float64[:], float64[:], float64[:])\"],\n \"(n),(),(m),(m),(m)\", forceobj=True, cache=True)\ndef multi_t_filter(w_in, a_threshold_in, vt_max_in, vt_min_in, t_out):\n \"\"\"\n Gets list of indices of the start of leading edges of multiple peaks within a waveform.\n Is built to handle afterpulses/delayed cross talk and trains of pulses.\n The multi_t_filter works by calling the vectorized functions \n \"get_multi_local_extrema\" which returns a list of the maxima and minima in a waveform,\n and then the list of maxima is fed into \"time_point_thresh\" which returns \n the final times that waveform is less than a specified threshold. \n Parameters\n ----------\n w_in : array-like\n The array of data within which the list of tp0s will be found\n a_threshold_in: scalar \n Threshold to search for using time_point_thresh\n vt_maxs_in : array-like\n The array of max positions for each wf\n vt_mins_in : array-like\n The array of min positions for each wf\n Returns\n -------\n t_out : array-like\n Array of fixed length (padded with nans) that hold the indices of\n the identified initial rise times of peaks in the signal\n \n \"\"\"\n \n # initialize arrays, padded with the elements we want\n t_out[:] = np.nan \n \n # checks \n if (np.isnan(w_in).any() or np.isnan(a_threshold_in)):\n return\n if (np.isnan(vt_max_in).all() and np.isnan(vt_min_in).all()):\n return \n if (not len(t_out)<=len(w_in)):\n raise DSPFatal('The length of your return array must be smaller than the length of your waveform')\n\n # Initialize an intermediate array to hold the tp0 values before we remove duplicates from it\n intermediate_t_out = np.full_like(t_out, np.nan, dtype=np.float32)\n \n # Go through the list of maxima, calling time_point_thresh (the refactored version ignores the nan padding)\n time_point_thresh(w_in, a_threshold_in, vt_max_in, 0, intermediate_t_out)\n\n # Remove duplicates from the t_out list\n remove_duplicates(intermediate_t_out, vt_min_in, t_out)" ]
[ [ "numpy.append", "numpy.isnan", "numpy.full_like" ] ]
cesar-rocha/HorizontalConvection
[ "73773607291be2e5d963cb5513c39981b3606080" ]
[ "Code/Figure6.py" ]
[ "\n\"\"\"\n Script for 'The heat flux of horizontal convection: \n definition of the Nusselt number and scaling second paper,' \n by C.B. Rocha, T. Bossy, N.C. Constantinou, S.G. Llewellyn Smith \n & W.R. Young, submitted to JFM.\n\n Figure6.py: log vs. log Ra-Nu diagram.\n\n Cesar Rocha et al.\n WHOI, Spring 2019\n \n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nplt.close('all')\n\n# Load data\ndata = np.load(\"../Data/NuVsRa.npz\")\n\n# Plotting\nfig = plt.figure(figsize=(8.5,6.5))\n\nax = fig.add_subplot(111)\n\nplt.loglog(data['Ra_2D_FS'],data['Nu_2D_FS'],'b.',markersize=10,\n markerfacecolor='none')\nplt.loglog(data['Ra_2D_NS'],data['Nu_2D_NS'],'rs',markersize=4,\n markerfacecolor='none')\nplt.loglog(data['Ra_3D_NS'],data['Nu_3D_NS'],'rs',markersize=4)\nplt.loglog(data['Ra_3D_FS'],data['Nu_3D_FS'],'bo',markersize=5)\n\nRas = 8*np.array([4e5,1e11])\nplt.loglog(Ras,.13*(Ras**(1/5)),'k',linewidth=1)\nRas = 8*np.array([1e11,1e13])\nplt.loglog(Ras,.033*(Ras**(1/4)),'k',linewidth=1)\n\nplt.text(6.4e9,10,r'Ra$^{1/5}$')\nplt.text(1.2e13,52,r'Ra$^{1/4}$')\n\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\n\nRas = np.linspace(1e-1, 5e3, 100)\nRas1 = np.linspace(1e-1, 2e4, 100)\nplt.plot(8*Ras, np.ones(np.size(Ras)) + (Ras/2695.9)**2,'k--',linewidth=1)\nplt.plot(8*Ras1, np.ones(np.size(Ras1)) + (Ras1/10973.7)**2,'k--',linewidth=1)\n\nplt.plot(.4e10,2.4,'rs')\nplt.plot(.7e10,2.4,'bo')\nplt.text(1.275e10,2.325,'3D')\n\nplt.plot(.4e10,2.0,'rs',markerfacecolor='none')\nplt.plot(.7e10,2.0,'bo',markerfacecolor='none')\nplt.text(1.275e10,1.925,'2D')\n\nplt.text(2.5e9,1.45,r'free-slip',color='b')\nplt.text(2.5e9,1.15,r'no-slip',color='r')\n\nplt.xlim(1,1e14)\nplt.yticks([1,2,5,10,20,40,80,160],[\"1\",\"2\",\"5\",\"10\",\"20\",\"40\",\"80\",\"160\"])\n\nplt.ylabel('Nu')\nplt.xlabel(r'Ra')\n\nRas = np.linspace(1e-1, 1e4, 100)\nsub_axes = plt.axes([.2, .525, .25, .25])\nsub_axes.plot(data['Ra_2D_FS'],data['Nu_2D_FS'],'b.',markersize=10,\n markerfacecolor='none')\nsub_axes.plot(data['Ra_2D_NS'],data['Nu_2D_NS'],'rs',markersize=4,\n markerfacecolor='none')\nsub_axes.plot(8*Ras, np.ones(np.size(Ras)) + (Ras/2695.9)**2,'k--',linewidth=1)\nsub_axes.plot(8*Ras, np.ones(np.size(Ras)) + (Ras/10973.7)**2,'k--',linewidth=1)\nsub_axes.spines['top'].set_visible(False)\nsub_axes.spines['right'].set_visible(False)\nsub_axes.set_yticks([1,1.1])\nsub_axes.set_xlabel(r'Ra$\\times 10^{-4}$')\nsub_axes.set_ylabel('Nu')\nsub_axes.set_xlim(0,4e4)\nsub_axes.set_ylim(0.99,1.15)\nsub_axes.yaxis.set_label_coords(-0.09,0.45)\nsub_axes.xaxis.set_label_coords(0.5,-0.175)\n\nplt.xticks([0,1e4,2e4,3e4,4e4],[\"0\",\"1\",\"2\",\"3\",\"4\"])\n\nplt.savefig(\"../Figz/Figure6.png\",dpi=800)\nplt.savefig(\"../Figz/Figure6.eps\")\n" ]
[ [ "matplotlib.pyplot.text", "numpy.array", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.close", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "numpy.size", "numpy.linspace", "matplotlib.pyplot.axes", "matplotlib.pyplot.loglog", "matplotlib.pyplot.xticks" ] ]
emperorjnx/EmotionRecognition
[ "db808135ebc1aa07f8de4f9c0253afd68b561213" ]
[ "train_model.py" ]
[ "from __future__ import print_function\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization\nfrom keras.layers import Conv2D, MaxPooling2D\nimport tensorflow as tf\nfrom tensorflow.keras import regularizers\nimport matplotlib.pyplot as plt\nimport numpy\n\nimport os\n\n\nnum_classes = 5 # we have 5 kinds of emotions\nimg_rows, img_cols = 48, 48\nbatch_size = 32\n\n# Dataset Pathb\ntrain_data_dir = os.path.join(\"data\",\"train\")\nvalidation_data_dir = os.path.join(\"data\",\"validation\")\n\n\n# gen of images from one image\ntrain_datagen = ImageDataGenerator(\n\n\t\t\t\t\trescale = 1./255,\n\t\t\t\t\trotation_range = 30,\n\t\t\t\t\tshear_range = 0.3,\n\t\t\t\t\tzoom_range = 0.3,\n\t\t\t\t\twidth_shift_range = 0.4,\n\t\t\t\t\theight_shift_range = 0.4,\n\t\t\t\t\thorizontal_flip = True,\n\t\t\t\t\tfill_mode = 'nearest'\n\t\t\t\t\t\t\t\t)\n\n# gen of validation images by rescaling\nvalidation_datagen = ImageDataGenerator(rescale = 1./255)\n\n# \ntrain_generator = train_datagen.flow_from_directory(\n\n\t\t\t\t\t\ttrain_data_dir,\n\t\t\t\t\t\tcolor_mode = 'grayscale',\n\t\t\t\t\t\ttarget_size = (img_rows, img_cols),\n\t\t\t\t\t\tbatch_size = batch_size,\n\t\t\t\t\t\tclass_mode = 'categorical',\n\t\t\t\t\t\tshuffle = True\n\t\t\t\t\t\t\t\t\t)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n\n\t\t\t\t\t\t\tvalidation_data_dir,\n\t\t\t\t\t\t\tcolor_mode = 'grayscale',\n\t\t\t\t\t\t\ttarget_size = (img_rows, img_cols),\n\t\t\t\t\t\t\tbatch_size = batch_size,\n\t\t\t\t\t\t\tclass_mode = 'categorical',\n\t\t\t\t\t\t\tshuffle = True\n\t\t\t\t\t\t\t\t\t)\n\n# Now we define our CNN\n\nmodel = Sequential()\n\n# Block 1 of our CNN\n\nmodel.add(Conv2D(32,(3,3), padding = 'same', kernel_initializer='he_normal', input_shape=(img_rows,img_cols,1) ))\n\n# model.add(Activation('elu'))\nmodel.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(32,(3,3), padding = 'same', kernel_initializer='he_normal', input_shape=(img_rows,img_cols,1) ))\n\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\n# Block 2 of our CNN\nmodel.add(Conv2D(64,(3,3), padding = 'same', kernel_initializer='he_normal' ))\n\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(64,(3,3), padding = 'same', kernel_initializer='he_normal' ))\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\n# Block 3 of our CNN\nmodel.add(Conv2D(128,(3,3), padding = 'same', kernel_initializer='he_normal' ))\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(128,(3,3), padding = 'same', kernel_initializer='he_normal' ))\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\n# Block 4 of our CNN\nmodel.add(Conv2D(256,(3,3), padding = 'same', kernel_initializer='he_normal' ))\n\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(256,(3,3), padding = 'same', kernel_initializer='he_normal' ))\n\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\n# Block 5 >>> CNN is completed now flattening will start\nmodel.add(Flatten())\nmodel.add(Dense(64, kernel_initializer='he_normal'))\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.3))\n\n# Block 6\nmodel.add(Dense(64, kernel_initializer='he_normal'))\nmodel.add(Activation('elu'))\n# model.add(Activation('relu'))\n\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.4))\n\n# Block 7\nmodel.add(Dense(num_classes,kernel_initializer='he_normal'))\nmodel.add(Activation('softmax'))\n\n\n\nprint(model.summary())\n\n\n# Abhi training krenge\nfrom keras.optimizers import RMSprop, SGD, Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n\n\nname_of_model = os.path.join(\"model\",\"mach_six.h5\")\n\n\ncheckpoint = ModelCheckpoint(\n\t\t\t\t\tname_of_model,\n\t\t\t\t\t# monitor = 'val_loss',\n\t\t\t\t\tmonitor = 'val_accuracy',\n\t\t\t\t\tmode = 'max',\n\t\t\t\t\tsave_best_only = True,\n\t\t\t\t\tverbose = 1\n\t\t\t\t\t\t)\n\nearlystop = EarlyStopping(\n\n\t\t\t\tmonitor = 'val_accuracy',\n\t\t\t\tmin_delta = 0,\n\t\t\t\tpatience = 7,\n\t\t\t\tverbose = 1,\n\t\t\t\trestore_best_weights = True\n\t\t\t\t\t\t)\n\nreduce_lr = ReduceLROnPlateau(\n\t\t\t\tmonitor = 'val_accuracy',\n\t\t\t\tfactor = 0.8,\n\t\t\t\tpatience = 2,\n\t\t\t\tverbose = 1,\n\t\t\t\tmin_delta = 0.0001\n\t\t\t\t\t\t)\n\ncallbacks = [earlystop, checkpoint, reduce_lr ]\n# callbacks = [checkpoint, reduce_lr]\n\n\n\n\nmodel.compile(loss='categorical_crossentropy',\n\t\t\t\toptimizer = Adam(lr=0.0025),\n\t\t\t\tmetrics = ['accuracy']\n\t\t\t\t\t)\n\nnb_train_samples = 18907\nnb_validation_samples = 6791\nepochs = 32\n\n\nhistory = model.fit_generator(\n\t\t\ttrain_generator,\n\t\t\tsteps_per_epoch = nb_train_samples//batch_size,\n\t\t\tepochs = epochs,\n\t\t\tcallbacks = callbacks,\n\t\t\tvalidation_data = validation_generator,\n\t\t\tvalidation_steps = nb_validation_samples//batch_size\n\t\t\t\t)\n\n\n# list all data in history\nprint(history.history.keys())\n\n# summarize history for accuracy\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\n# plt.plot(history.history['lr'])\n\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\n\nplt.legend(['train acc', 'val acc', 'train loss', 'val loss'], loc='upper right')\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
rbtsbg/captum
[ "b9d88a9f0b65c25f55337ab8b6617abffe87acc7" ]
[ "captum/attr/_utils/common.py" ]
[ "#!/usr/bin/env python3\nimport typing\nfrom inspect import signature\nfrom typing import TYPE_CHECKING, Any, Callable, List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom ..._utils.common import _format_baseline, _format_input, _format_output\nfrom ..._utils.common import _validate_input as _validate_input_basic\nfrom ..._utils.typing import (\n BaselineType,\n Literal,\n TargetType,\n TensorOrTupleOfTensorsGeneric,\n)\nfrom .approximation_methods import SUPPORTED_METHODS\n\nif TYPE_CHECKING:\n from .attribution import GradientAttribution\n\n\ndef _validate_target(num_samples: int, target: TargetType) -> None:\n if isinstance(target, list) or (\n isinstance(target, torch.Tensor) and torch.numel(target) > 1\n ):\n assert num_samples == len(target), (\n \"The number of samples provied in the\"\n \"input {} does not match with the number of targets. {}\".format(\n num_samples, len(target)\n )\n )\n\n\ndef _validate_input(\n inputs: Tuple[Tensor, ...],\n baselines: Tuple[Union[Tensor, int, float], ...],\n n_steps: int = 50,\n method: str = \"riemann_trapezoid\",\n draw_baseline_from_distrib: bool = False,\n) -> None:\n _validate_input_basic(inputs, baselines, draw_baseline_from_distrib)\n assert (\n n_steps >= 0\n ), \"The number of steps must be a positive integer. \" \"Given: {}\".format(n_steps)\n\n assert (\n method in SUPPORTED_METHODS\n ), \"Approximation method must be one for the following {}. \" \"Given {}\".format(\n SUPPORTED_METHODS, method\n )\n\n\ndef _validate_noise_tunnel_type(\n nt_type: str, supported_noise_tunnel_types: List[str]\n) -> None:\n assert nt_type in supported_noise_tunnel_types, (\n \"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. \"\n \"Given {}\".format(nt_type)\n )\n\n\[email protected]\ndef _format_input_baseline(\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n baselines: Union[Tensor, Tuple[Tensor, ...]],\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\[email protected]\ndef _format_input_baseline(\n inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType\n) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:\n ...\n\n\ndef _format_input_baseline(\n inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType\n) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:\n inputs = _format_input(inputs)\n baselines = _format_baseline(baselines, inputs)\n return inputs, baselines\n\n\n# This function can potentially be merged with the `format_baseline` function\n# however, since currently not all algorithms support baselines of type\n# callable this will be kept in a separate function.\[email protected]\ndef _format_callable_baseline(\n baselines: Union[\n None,\n Callable[..., Union[Tensor, Tuple[Tensor, ...]]],\n Tensor,\n Tuple[Tensor, ...],\n ],\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n) -> Tuple[Tensor, ...]:\n ...\n\n\[email protected]\ndef _format_callable_baseline(\n baselines: Union[\n None,\n Callable[..., Union[Tensor, Tuple[Tensor, ...]]],\n Tensor,\n int,\n float,\n Tuple[Union[Tensor, int, float], ...],\n ],\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n) -> Tuple[Union[Tensor, int, float], ...]:\n ...\n\n\ndef _format_callable_baseline(\n baselines: Union[\n None,\n Callable[..., Union[Tensor, Tuple[Tensor, ...]]],\n Tensor,\n int,\n float,\n Tuple[Union[Tensor, int, float], ...],\n ],\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n) -> Tuple[Union[Tensor, int, float], ...]:\n if callable(baselines):\n # Note: this assumes that if baselines is a function and if it takes\n # arguments, then the first argument is the `inputs`.\n # This can be expanded in the future with better type checks\n baseline_parameters = signature(baselines).parameters\n if len(baseline_parameters) == 0:\n baselines = baselines()\n else:\n baselines = baselines(inputs)\n return _format_baseline(baselines, _format_input(inputs))\n\n\ndef _format_and_verify_strides(\n strides: Union[None, int, Tuple[int, ...], Tuple[Union[int, Tuple[int, ...]], ...]],\n inputs: Tuple[Tensor, ...],\n) -> Tuple[Union[int, Tuple[int, ...]], ...]:\n # Formats strides, which are necessary for occlusion\n # Assumes inputs are already formatted (in tuple)\n if strides is None:\n strides = tuple(1 for input in inputs)\n if len(inputs) == 1 and not (isinstance(strides, tuple) and len(strides) == 1):\n strides = (strides,) # type: ignore\n assert isinstance(strides, tuple) and len(strides) == len(\n inputs\n ), \"Strides must be provided for each input tensor.\"\n for i in range(len(inputs)):\n assert isinstance(strides[i], int) or (\n isinstance(strides[i], tuple)\n and len(strides[i]) == len(inputs[i].shape) - 1 # type: ignore\n ), (\n \"Stride for input index {} is {}, which is invalid for input with \"\n \"shape {}. It must be either an int or a tuple with length equal to \"\n \"len(input_shape) - 1.\"\n ).format(\n i, strides[i], inputs[i].shape\n )\n\n return strides\n\n\ndef _format_and_verify_sliding_window_shapes(\n sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],\n inputs: Tuple[Tensor, ...],\n) -> Tuple[Tuple[int, ...], ...]:\n # Formats shapes of sliding windows, which is necessary for occlusion\n # Assumes inputs is already formatted (in tuple)\n if isinstance(sliding_window_shapes[0], int):\n sliding_window_shapes = (sliding_window_shapes,) # type: ignore\n sliding_window_shapes: Tuple[Tuple[int, ...], ...]\n assert len(sliding_window_shapes) == len(\n inputs\n ), \"Must provide sliding window dimensions for each input tensor.\"\n for i in range(len(inputs)):\n assert (\n isinstance(sliding_window_shapes[i], tuple)\n and len(sliding_window_shapes[i]) == len(inputs[i].shape) - 1\n ), (\n \"Occlusion shape for input index {} is {} but should be a tuple with \"\n \"{} dimensions.\"\n ).format(\n i, sliding_window_shapes[i], len(inputs[i].shape) - 1\n )\n return sliding_window_shapes\n\n\[email protected]\ndef _compute_conv_delta_and_format_attrs(\n attr_algo: \"GradientAttribution\",\n return_convergence_delta: bool,\n attributions: Tuple[Tensor, ...],\n start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],\n end_point: Union[Tensor, Tuple[Tensor, ...]],\n additional_forward_args: Any,\n target: TargetType,\n is_inputs_tuple: Literal[False] = False,\n) -> Union[Tensor, Tuple[Tensor, Tensor]]:\n ...\n\n\[email protected]\ndef _compute_conv_delta_and_format_attrs(\n attr_algo: \"GradientAttribution\",\n return_convergence_delta: bool,\n attributions: Tuple[Tensor, ...],\n start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],\n end_point: Union[Tensor, Tuple[Tensor, ...]],\n additional_forward_args: Any,\n target: TargetType,\n is_inputs_tuple: Literal[True],\n) -> Union[Tuple[Tensor, ...], Tuple[Tuple[Tensor, ...], Tensor]]:\n ...\n\n\n# FIXME: GradientAttribution is provided as a string due to a circular import.\n# This should be fixed when common is refactored into separate files.\ndef _compute_conv_delta_and_format_attrs(\n attr_algo: \"GradientAttribution\",\n return_convergence_delta: bool,\n attributions: Tuple[Tensor, ...],\n start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],\n end_point: Union[Tensor, Tuple[Tensor, ...]],\n additional_forward_args: Any,\n target: TargetType,\n is_inputs_tuple: bool = False,\n) -> Union[\n Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]\n]:\n if return_convergence_delta:\n # computes convergence error\n delta = attr_algo.compute_convergence_delta(\n attributions,\n start_point,\n end_point,\n additional_forward_args=additional_forward_args,\n target=target,\n )\n return _format_output(is_inputs_tuple, attributions), delta\n else:\n return _format_output(is_inputs_tuple, attributions)\n\n\ndef _tensorize_baseline(\n inputs: Tuple[Tensor, ...], baselines: Tuple[Union[int, float, Tensor], ...]\n) -> Tuple[Tensor, ...]:\n def _tensorize_single_baseline(baseline, input):\n if isinstance(baseline, (int, float)):\n return torch.full_like(input, baseline)\n if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:\n return torch.cat([baseline] * input.shape[0])\n return baseline\n\n assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (\n \"inputs and baselines must\"\n \"have tuple type but found baselines: {} and inputs: {}\".format(\n type(baselines), type(inputs)\n )\n )\n return tuple(\n _tensorize_single_baseline(baseline, input)\n for baseline, input in zip(baselines, inputs)\n )\n\n\ndef _reshape_and_sum(\n tensor_input: Tensor, num_steps: int, num_examples: int, layer_size: Tuple[int, ...]\n) -> Tensor:\n # Used for attribution methods which perform integration\n # Sums across integration steps by reshaping tensor to\n # (num_steps, num_examples, (layer_size)) and summing over\n # dimension 0. Returns a tensor of size (num_examples, (layer_size))\n return torch.sum(\n tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0\n )\n\n\ndef _call_custom_attribution_func(\n custom_attribution_func: Callable[..., Tuple[Tensor, ...]],\n multipliers: Tuple[Tensor, ...],\n inputs: Tuple[Tensor, ...],\n baselines: Tuple[Tensor, ...],\n) -> Tuple[Tensor, ...]:\n assert callable(custom_attribution_func), (\n \"`custom_attribution_func`\"\n \" must be a callable function but {} provided\".format(\n type(custom_attribution_func)\n )\n )\n custom_attr_func_params = signature(custom_attribution_func).parameters\n\n if len(custom_attr_func_params) == 1:\n return custom_attribution_func(multipliers)\n elif len(custom_attr_func_params) == 2:\n return custom_attribution_func(multipliers, inputs)\n elif len(custom_attr_func_params) == 3:\n return custom_attribution_func(multipliers, inputs, baselines)\n else:\n raise AssertionError(\n \"`custom_attribution_func` must take at least one and at most 3 arguments.\"\n )\n\n\ndef _find_output_mode_and_verify(\n initial_eval: Union[int, float, Tensor],\n num_examples: int,\n perturbations_per_eval: int,\n feature_mask: Union[None, TensorOrTupleOfTensorsGeneric],\n) -> bool:\n \"\"\"\n This method identifies whether the model outputs a single output for a batch\n (agg_output_mode = True) or whether it outputs a single output per example\n (agg_output_mode = False) and returns agg_output_mode. The method also\n verifies that perturbations_per_eval is 1 in the case that agg_output_mode is True\n and also verifies that the first dimension of each feature mask if the model\n returns a single output for a batch.\n \"\"\"\n if isinstance(initial_eval, (int, float)) or (\n isinstance(initial_eval, torch.Tensor)\n and (\n len(initial_eval.shape) == 0\n or (num_examples > 1 and initial_eval.numel() == 1)\n )\n ):\n agg_output_mode = True\n assert (\n perturbations_per_eval == 1\n ), \"Cannot have perturbations_per_eval > 1 when function returns scalar.\"\n if feature_mask is not None:\n for single_mask in feature_mask:\n assert single_mask.shape[0] == 1, (\n \"Cannot provide different masks for each example when function \"\n \"returns a scalar.\"\n )\n else:\n agg_output_mode = False\n assert (\n isinstance(initial_eval, torch.Tensor) and initial_eval[0].numel() == 1\n ), \"Target should identify a single element in the model output.\"\n return agg_output_mode\n" ]
[ [ "torch.cat", "torch.numel", "torch.full_like" ] ]
GreenWaves-Technologies/nncf
[ "5cdcb3bdd9b22e3666ff979ff7df3070e6182be8" ]
[ "examples/tensorflow/segmentation/evaluation.py" ]
[ "\"\"\"\n Copyright (c) 2022 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport sys\n\nimport tensorflow as tf\n\nfrom examples.tensorflow.common.utils import close_strategy_threadpool\nfrom nncf.tensorflow import create_compressed_model\nfrom nncf.tensorflow import register_default_init_args\nfrom nncf.tensorflow.helpers.model_manager import TFModelManager\nfrom nncf.tensorflow.utils.state import TFCompressionState\nfrom nncf.tensorflow.utils.state import TFCompressionStateLoader\n\nfrom examples.tensorflow.common.argparser import get_common_argument_parser\nfrom examples.tensorflow.common.distributed import get_distribution_strategy\nfrom examples.tensorflow.common.logger import logger\nfrom examples.tensorflow.common.object_detection.datasets.builder import COCODatasetBuilder\nfrom examples.tensorflow.common.object_detection.checkpoint_utils import get_variables\nfrom examples.tensorflow.common.sample_config import create_sample_config\nfrom examples.tensorflow.common.sample_config import SampleConfig\nfrom examples.tensorflow.common.utils import configure_paths\nfrom examples.tensorflow.common.utils import get_saving_parameters\nfrom examples.tensorflow.common.utils import print_args\nfrom examples.tensorflow.common.utils import SummaryWriter\nfrom examples.tensorflow.common.utils import write_metrics\nfrom examples.tensorflow.common.utils import Timer\nfrom examples.tensorflow.segmentation.models.model_selector import get_predefined_config\nfrom examples.tensorflow.segmentation.models.model_selector import get_model_builder\n\n\ndef get_argument_parser():\n parser = get_common_argument_parser(mode=False,\n weights=False,\n epochs=False,\n precision=False,\n save_checkpoint_freq=False,\n to_h5=False,\n dataset_type=False)\n\n parser.add_argument(\n '--mode',\n '-m',\n nargs='+',\n choices=['train', 'test', 'export'],\n default='train',\n help='train: performs validation of a checkpoint that was saved during training '\n '(use --checkpoint-save-dir to specify a path to the train-time checkpoint directory) ;'\n ' test: tests the model checkpoint (use --resume to specify the checkpoint file itself);'\n ' export: exports the model.')\n\n parser.add_argument(\n '--eval-timeout',\n default=None,\n type=int,\n help='The maximum number of seconds to wait between checkpoints. '\n 'If left as None, then the process will wait indefinitely.'\n )\n\n parser.add_argument(\n '--weights',\n default=None,\n type=str,\n help='Path to pretrained weights in ckpt format.'\n )\n\n return parser\n\n\ndef get_config_from_argv(argv, parser):\n args = parser.parse_args(args=argv)\n\n sample_config = SampleConfig(\n {'dataset_type': 'tfrecords'}\n )\n\n config_from_json = create_sample_config(args, parser)\n predefined_config = get_predefined_config(config_from_json.model)\n\n sample_config.update(predefined_config)\n sample_config.update(config_from_json)\n configure_paths(sample_config)\n\n return sample_config\n\n\ndef get_dataset_builders(config, num_devices):\n val_builder = COCODatasetBuilder(config=config,\n is_train=False,\n num_devices=num_devices)\n config_ = config.deepcopy()\n config_.batch_size = val_builder.batch_size\n calibration_builder = COCODatasetBuilder(config=config_,\n is_train=True,\n num_devices=1)\n return val_builder, calibration_builder\n\n\ndef load_checkpoint(checkpoint, ckpt_path):\n logger.info('Load from checkpoint is enabled')\n if tf.io.gfile.isdir(ckpt_path):\n path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)\n logger.info('Latest checkpoint: {}'.format(path_to_checkpoint))\n else:\n path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + '.index') else None\n logger.info('Provided checkpoint: {}'.format(path_to_checkpoint))\n\n if not path_to_checkpoint:\n logger.info('No checkpoint detected')\n return 0\n\n logger.info('Checkpoint file {} found and restoring from checkpoint'.format(path_to_checkpoint))\n status = checkpoint.restore(path_to_checkpoint)\n status.expect_partial()\n logger.info('Completed loading from checkpoint')\n\n return None\n\n\ndef load_compression_state(ckpt_path: str):\n checkpoint = tf.train.Checkpoint(compression_state=TFCompressionStateLoader())\n load_checkpoint(checkpoint, ckpt_path)\n return checkpoint.compression_state.state\n\n\ndef evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq):\n \"\"\"Runs evaluation steps and aggregate metrics\"\"\"\n timer = Timer()\n timer.tic()\n\n logger.info('Testing...')\n for batch_idx, x in enumerate(test_dist_dataset):\n labels, outputs = test_step(x)\n metric.update_state(labels, outputs)\n\n if batch_idx % print_freq == 0:\n time = timer.toc(average=False)\n logger.info('Predict for batch: {}/{} Time: {:.3f} sec'.format(batch_idx, num_batches, time))\n timer.tic()\n\n logger.info('Total time: {:.3f} sec'.format(timer.total_time))\n\n timer.reset()\n\n logger.info('Evaluating predictions...')\n timer.tic()\n result = metric.result()\n timer.toc(average=False)\n logger.info('Total time: {:.3f} sec'.format(timer.total_time))\n\n return result\n\n\ndef create_test_step_fn(strategy, model, predict_post_process_fn):\n \"\"\"Creates a distributed test step\"\"\"\n\n def _test_step_fn(inputs):\n inputs, labels = inputs\n model_outputs = model(inputs, training=False)\n labels, prediction_outputs = predict_post_process_fn(labels, model_outputs)\n\n return labels, prediction_outputs\n\n @tf.function\n def test_step(dataset_inputs):\n labels, outputs = strategy.run(_test_step_fn, args=(dataset_inputs,))\n outputs = tf.nest.map_structure(strategy.experimental_local_results, outputs)\n labels = tf.nest.map_structure(strategy.experimental_local_results, labels)\n\n return labels, outputs\n\n return test_step\n\n\ndef restore_compressed_model(config, strategy, model_builder, ckpt_path = None):\n compression_state = None\n if ckpt_path:\n compression_state = load_compression_state(ckpt_path)\n\n with TFModelManager(model_builder.build_model,\n config.nncf_config,\n weights=config.get('weights', None),\n is_training=False) as model:\n with strategy.scope():\n compression_ctrl, compress_model = create_compressed_model(model,\n config.nncf_config,\n compression_state)\n\n variables = get_variables(compress_model)\n checkpoint = tf.train.Checkpoint(variables=variables,\n compression_state=TFCompressionState(compression_ctrl),\n step=tf.Variable(0))\n if ckpt_path:\n load_checkpoint(checkpoint, config.ckpt_path)\n\n return compression_ctrl, compress_model, checkpoint\n\n\ndef run_evaluation(config, eval_timeout=None):\n \"\"\"Runs evaluation on checkpoint save directory\"\"\"\n strategy = get_distribution_strategy(config)\n if config.metrics_dump is not None:\n write_metrics(0, config.metrics_dump)\n\n validation_builder, calibration_builder = get_dataset_builders(config, strategy.num_replicas_in_sync)\n calibration_dataset = calibration_builder.build()\n val_dataset = validation_builder.build()\n num_batches = validation_builder.steps_per_epoch\n test_dist_dataset = strategy.experimental_distribute_dataset(val_dataset)\n\n config.nncf_config = register_default_init_args(nncf_config=config.nncf_config,\n data_loader=calibration_dataset,\n batch_size=validation_builder.global_batch_size)\n\n # We use `model_batch_size` to create input layer for model\n config.model_batch_size = validation_builder.batch_size\n\n model_builder = get_model_builder(config)\n eval_metric = model_builder.eval_metrics()\n predict_post_process_fn = model_builder.post_processing\n\n if 'test' in config.mode:\n compression_ctrl, compress_model, _ = restore_compressed_model(config, strategy, model_builder,\n config.ckpt_path)\n test_step = create_test_step_fn(strategy, compress_model, predict_post_process_fn)\n\n statistics = compression_ctrl.statistics()\n logger.info(statistics.to_str())\n metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq)\n eval_metric.reset_states()\n logger.info('Test metric = {}'.format(metric_result))\n\n if 'export' in config.mode:\n save_path, save_format = get_saving_parameters(config)\n compression_ctrl.export_model(save_path, save_format)\n logger.info(\"Saved to {}\".format(save_path))\n\n elif 'train' in config.mode:\n validation_summary_writer = SummaryWriter(config.log_dir, 'validation')\n\n is_first_checkpoint = True\n for checkpoint_path in tf.train.checkpoints_iterator(config.checkpoint_save_dir, config.eval_timeout):\n if is_first_checkpoint:\n is_first_checkpoint = False\n _, compress_model, checkpoint = restore_compressed_model(config, strategy, model_builder,\n checkpoint_path)\n test_step = create_test_step_fn(strategy, compress_model, predict_post_process_fn)\n else:\n checkpoint.restore(checkpoint_path).expect_partial()\n\n logger.info('Checkpoint file {} found and restoring from checkpoint'.format(checkpoint_path))\n logger.info('Checkpoint step: {}'.format(checkpoint.step.numpy()))\n metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq)\n\n current_step = checkpoint.step.numpy()\n validation_summary_writer(metrics=metric_result, step=current_step)\n\n eval_metric.reset_states()\n logger.info('Validation metric = {}'.format(metric_result))\n\n validation_summary_writer.close()\n\n if config.metrics_dump is not None:\n write_metrics(metric_result['AP'], config.metrics_dump)\n\n close_strategy_threadpool(strategy)\n\n\ndef export(config):\n model_builder = get_model_builder(config)\n\n strategy = tf.distribute.get_strategy()\n compression_ctrl, _, _ = restore_compressed_model(config, strategy, model_builder, config.ckpt_path)\n\n save_path, save_format = get_saving_parameters(config)\n compression_ctrl.export_model(save_path, save_format)\n logger.info(\"Saved to {}\".format(save_path))\n\n\ndef main(argv):\n tf.get_logger().setLevel('INFO')\n parser = get_argument_parser()\n config = get_config_from_argv(argv, parser)\n print_args(config)\n\n if config.dataset_type != 'tfrecords':\n raise RuntimeError('The train.py does not support TensorFlow Datasets (TFDS). '\n 'Please use TFRecords.')\n\n if 'train' in config.mode or 'test' in config.mode:\n run_evaluation(config)\n elif 'export' in config.mode:\n export(config)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "tensorflow.train.latest_checkpoint", "tensorflow.distribute.get_strategy", "tensorflow.Variable", "tensorflow.io.gfile.isdir", "tensorflow.nest.map_structure", "tensorflow.io.gfile.exists", "tensorflow.train.checkpoints_iterator", "tensorflow.get_logger" ] ]
shuu-tatsu/qagan
[ "15c76655cfecba4f6073940728d930b58a305eec" ]
[ "src/discriminator.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nタスク:2つのセンテンス間の関連度を算出する.\n用途:QAとCDからの引用.\n'''\n\nimport sys\nsys.path.append('./')\nimport utils\nimport load\nimport index\nimport glove_pre_trained_vectors\nfrom torch.nn.parameter import Parameter\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport time\nimport config\nimport os\nimport pickle\n\n\nclass EncoderRNN(nn.Module):\n\n def __init__(self, vocab, hidden_size, glove_file, embedding_dim):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.gru = nn.GRUCell(hidden_size, hidden_size)\n self.vocab = vocab\n vocab_size = vocab.n_words\n # one_hot と embeds の内積を取って,センテンス中の各単語をベクトル化し,\n # それらを concat したセンテンスベクトルを取得\n self.embedding = nn.Embedding(vocab_size, hidden_size)\n\n # weight の glove による初期化\n self.embedding.weight = self.glove_init(glove_file, vocab, embedding_dim)\n\n def forward(self, input, hidden):\n embedded = self.embedding(input)\n output = self.gru(embedded, hidden)\n return output\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size, device=config.device)\n\n def glove_init(self, glove_file, vocab, embedding_dim):\n if os.path.exists(config.glove_pre_trained_pickled_file):\n print('Glove_pre_trained_pickled_file is already exists. Loading now ...')\n else:\n print('Glove_pre_trained_pickled_file is not exists. Getting weight now ...')\n glove_loader = glove_pre_trained_vectors.GloVeLoader(glove_file, vocab, embedding_dim)\n glove_loader.get_weight()\n\n with open(config.glove_pre_trained_pickled_file, 'rb') as file_path:\n weight = pickle.load(file_path)\n return Parameter(weight)\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, encoder, hidden_size):\n super().__init__()\n self.encoder = encoder\n self.linear = nn.Linear(encoder.hidden_size * 2, hidden_size)\n self.linear2 = nn.Linear(hidden_size, 2)\n\n def forward(self, answer_tensor, question_tensor):\n '''\n 回答センテンス→固定長ベクトルA\n 質問センテンス→固定長ベクトルQ\n 固定長ベクトルAと固定長ベクトルQのconcatをLogSoftmaxに通し,2次元テンソルで出力\n '''\n # Feed answer into GRU and get fixed answer vector\n answer_fixed_tensor = self.get_fiexed_vector(answer_tensor)\n\n # Feed question into GRU and get fixed question vector\n question_fixed_tensor = self.get_fiexed_vector(question_tensor)\n\n vectors_cat_tensor = torch.cat((question_fixed_tensor, answer_fixed_tensor), dim=1)\n\n # LogSoftmax() whose output dimention size is 2\n sequence_fixed_tensor = F.log_softmax(self.linear2(F.relu(self.linear(vectors_cat_tensor))), dim=1)\n\n return sequence_fixed_tensor\n\n def get_fiexed_vector(self, sequence_tensor):\n encoder_hidden = self.encoder.initHidden()\n answer_length = sequence_tensor.size(0)\n # encoder_outputs = []\n for ei in range(answer_length):\n encoder_hidden = self.encoder.forward(sequence_tensor[ei], encoder_hidden)\n # encoder_outputs.append(encoder_output)\n # return torch.mean(torch.cat(encoder_outputs, dim=0), dim=0, keepdim=True)\n return encoder_hidden\n\n\ndef tensor_from_sentence(sentence):\n return torch.tensor(sentence, dtype=torch.long, device=config.device).view(-1, 1)\n\n\ndef tensors_from_pair(pair):\n # answerを入力,questionを出力\n # pair[1]: answer_index_list as input\n # pair[0]: question_index_list as target\n # pair[2]: label\n input_tensor = tensor_from_sentence(pair[1])\n target_tensor = tensor_from_sentence(pair[0])\n return (input_tensor, target_tensor, pair[2])\n\n\ndef random_choice_pair_from_pairs(pairs):\n num_pairs = len(pairs[0])\n i = random.randrange(num_pairs)\n return (pairs[0][i], pairs[1][i], pairs[2][i])\n\n\ndef get_label_tensor(label_int):\n if label_int == 1: # positive\n return torch.LongTensor([1])\n elif label_int == 0: # negative\n return torch.LongTensor([0])\n else:\n print('Labeling Error')\n\n\ndef train_iters(train_pairs, dev_pairs, model, n_iters, batch_size,\n print_every, learning_rate=config.args.learning_rate):\n # pairs[0]: questions_index_list\n # pairs[1]: answers_index_list\n # pairs[2]: label 0 or 1\n #\n # pairs: ([[q1_index_list], [q2_index_list], ..., [qn_index_list]],\n # [[a1_index_list], [a2_index_list], ..., [an_index_list]],\n # [[l1_int], [l2_int], ..., [ln_int]])\n start = time.time()\n print_loss_total = 0 # Reset every print_every\n\n # optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n # print(list(model.parameters()))\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n training_pairs_list = [tensors_from_pair(random_choice_pair_from_pairs(train_pairs))\n for i in range(n_iters)]\n criterion = nn.NLLLoss()\n batch_loss = 0.0\n\n # 1 iter につき,1つのQAペア\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs_list[iter - 1]\n answer_tensor = training_pair[0]\n question_tensor = training_pair[1]\n label_int = training_pair[2]\n\n '''\n answer_tensor:\n tensor([[ 6],\n [ 8],\n [ 7],\n [ 2],\n [ 1]])\n\n question_tensor:\n tensor([[ 56],\n [ 26621],\n [ 5],\n [ 6440],\n [ 4177],\n [ 1797],\n [ 1]])\n\n label_int:\n 1\n '''\n # loss: NLLLoss\n y = model.forward(answer_tensor, question_tensor)\n label_tensor = get_label_tensor(label_int).to(config.device)\n loss = criterion(y, label_tensor)\n\n print_loss_total += loss.item()\n\n batch_loss += loss\n if iter % batch_size == 0:\n # print(model.encoder.gru.)\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n batch_loss = 0.0\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n # 訓練データでの評価\n train_score = evaluate_randomly(train_pairs, model, n_iters=100)\n # 開発データでの評価\n dev_score = evaluate_randomly(dev_pairs, model, n_iters=100)\n print('Time:%s (%d %d%%) Loss:%.4f Accuracy(train data):%s Accuracy(dev data):%s' % (utils.time_since(start, iter / n_iters),\n iter, iter / n_iters * 100, print_loss_avg, train_score, dev_score))\n\n\ndef evaluate_randomly(pairs, model, n_iters):\n count = 0\n for i in range(n_iters):\n # pairsの中からランダムに1組のpairを取得し,tensor型に変換\n training_pair_list = tensors_from_pair(random_choice_pair_from_pairs(pairs))\n answer_tensor = training_pair_list[0]\n question_tensor = training_pair_list[1]\n label_int = training_pair_list[2]\n with torch.no_grad():\n predicted_label_tensor = model.forward(answer_tensor, question_tensor)\n negative_probability = predicted_label_tensor[0][0].item()\n positive_probability = predicted_label_tensor[0][1].item()\n if positive_probability >= negative_probability:\n predicted_label = 1\n else:\n predicted_label = 0\n\n if predicted_label == label_int:\n count += 1\n accuracy = count / n_iters\n return accuracy\n\n\ndef labeling(sequences_index_tuple, label):\n sequences_size = len(sequences_index_tuple[0])\n sequences_label_list = [label for i in range(sequences_size)]\n sequences_index_tuple_labeled = (sequences_index_tuple[0],\n sequences_index_tuple[1],\n sequences_label_list)\n return sequences_index_tuple_labeled\n\n\ndef merge_posi_nega_data(positive_tuple, negative_tuple):\n questions = positive_tuple[0] + negative_tuple[0]\n answers = positive_tuple[1] + negative_tuple[1]\n labels = positive_tuple[2] + negative_tuple[2]\n pairs = (questions, answers, labels)\n return pairs\n\n\ndef get_pairs(data_file, type):\n vocab, positive_index_tuple, negative_index_tuple = utils.load_data(data_file, type)\n positive_index_tuple_labeled = labeling(positive_index_tuple, label=1)\n negative_index_tuple_labeled = labeling(negative_index_tuple, label=0)\n pairs = merge_posi_nega_data(positive_index_tuple_labeled, negative_index_tuple_labeled)\n return vocab, pairs\n\ndef get_positive_pairs(data_file, type, input_lang):\n vocab, positive_index_tuple, negative_index_tuple = utils.load_data_with_input_lang(data_file, type, input_lang)\n positive_index_tuple_labeled = labeling(positive_index_tuple, label=1)\n pairs = positive_index_tuple_labeled\n return vocab, pairs\n\ndef main():\n torch.manual_seed(1)\n\n # loading training data as train_pairs\n # pairs の前半分はポジティブデータ、後半分はネガティブデータ\n vocab, train_pairs = get_pairs(data_file=config.quora_train_file, type='train')\n # loading eval data as eval_pairs\n _, dev_pairs = get_pairs(data_file=config.quora_dev_file, type='dev')\n # pairs[0]: questions_index_list\n # pairs[1]: answers_index_list\n # pairs[2]: label 0 or 1\n #\n # pairs: ([[q1_index_list], [q2_index_list], ..., [qn_index_list]],\n # [[a1_index_list], [a2_index_list], ..., [an_index_list]],\n # [[l1_int], [l2_int], ..., [ln_int]])\n print('QUORA Train size:{} Dev size:{} Vocab size:{}'.format(len(train_pairs[0]), len(dev_pairs[0]), len(vocab)))\n use_toy = True\n if use_toy:\n train_size = 5000\n dev_size = 500\n train_pairs = ((train_pairs[0][:train_size // 2] + train_pairs[0][-train_size // 2:]),\n (train_pairs[1][:train_size // 2] + train_pairs[1][-train_size // 2:]),\n (train_pairs[2][:train_size // 2] + train_pairs[2][-train_size // 2:]))\n dev_pairs = ((dev_pairs[0][:dev_size // 2] + dev_pairs[0][-dev_size // 2:]),\n (dev_pairs[1][:dev_size // 2] + dev_pairs[1][-dev_size // 2:]),\n (dev_pairs[2][:dev_size // 2] + dev_pairs[2][-dev_size // 2:]))\n\n else:\n train_size = len(train_pairs[0])\n hidden_size = config.args.embedding_dim\n encoder = EncoderRNN(vocab,\n hidden_size,\n glove_file=config.glove_file,\n embedding_dim=config.args.embedding_dim).to(config.device)\n model = Classifier(encoder, encoder.hidden_size).to(config.device)\n print('Using Train size:{} Dev size:{} Vocab size:{}'.format(len(train_pairs[0]), len(dev_pairs[0]), len(vocab)))\n train_iters(train_pairs, dev_pairs, model, n_iters=10 * train_size, batch_size=32, print_every=32)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.NLLLoss", "torch.zeros", "torch.nn.Linear", "torch.cat", "torch.nn.parameter.Parameter", "torch.no_grad", "torch.manual_seed", "torch.LongTensor", "torch.tensor", "torch.nn.Embedding", "torch.nn.GRUCell" ] ]